aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--tensorflow/contrib/bayesflow/python/ops/entropy.py42
-rw-r--r--tensorflow/contrib/bayesflow/python/ops/monte_carlo.py38
-rw-r--r--tensorflow/contrib/bayesflow/python/ops/special_math.py8
-rw-r--r--tensorflow/contrib/bayesflow/python/ops/stochastic_gradient_estimators.py14
-rw-r--r--tensorflow/contrib/bayesflow/python/ops/stochastic_graph.py8
-rw-r--r--tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py6
-rw-r--r--tensorflow/contrib/bayesflow/python/ops/variational_inference.py14
-rw-r--r--tensorflow/contrib/copy_graph/python/util/copy_elements.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/bernoulli.py8
-rw-r--r--tensorflow/contrib/distributions/python/ops/bijector.py76
-rw-r--r--tensorflow/contrib/distributions/python/ops/categorical.py4
-rw-r--r--tensorflow/contrib/distributions/python/ops/distribution.py66
-rw-r--r--tensorflow/contrib/distributions/python/ops/distribution_util.py44
-rw-r--r--tensorflow/contrib/distributions/python/ops/mvn.py8
-rw-r--r--tensorflow/contrib/distributions/python/ops/operator_pd.py50
-rw-r--r--tensorflow/contrib/distributions/python/ops/operator_pd_identity.py2
-rw-r--r--tensorflow/contrib/distributions/python/ops/operator_pd_vdvt_update.py4
-rw-r--r--tensorflow/contrib/distributions/python/ops/quantized_distribution.py10
-rw-r--r--tensorflow/contrib/distributions/python/ops/shape.py80
-rw-r--r--tensorflow/contrib/distributions/python/ops/wishart.py10
-rw-r--r--tensorflow/contrib/framework/python/framework/checkpoint_utils.py4
-rw-r--r--tensorflow/contrib/framework/python/framework/tensor_util.py16
-rw-r--r--tensorflow/contrib/framework/python/ops/ops.py2
-rw-r--r--tensorflow/contrib/graph_editor/reroute.py22
-rw-r--r--tensorflow/contrib/graph_editor/select.py16
-rw-r--r--tensorflow/contrib/graph_editor/subgraph.py8
-rw-r--r--tensorflow/contrib/graph_editor/transform.py2
-rw-r--r--tensorflow/contrib/graph_editor/util.py24
-rw-r--r--tensorflow/contrib/labeled_tensor/python/ops/core.py2
-rw-r--r--tensorflow/contrib/layers/python/layers/embedding_ops.py10
-rw-r--r--tensorflow/contrib/layers/python/layers/encoders.py8
-rw-r--r--tensorflow/contrib/layers/python/layers/feature_column.py22
-rw-r--r--tensorflow/contrib/layers/python/layers/feature_column_ops.py10
-rw-r--r--tensorflow/contrib/layers/python/layers/layers.py56
-rw-r--r--tensorflow/contrib/layers/python/layers/layers_test.py2
-rw-r--r--tensorflow/contrib/layers/python/layers/optimizers.py10
-rw-r--r--tensorflow/contrib/layers/python/layers/regularizers.py14
-rw-r--r--tensorflow/contrib/layers/python/layers/utils.py8
-rw-r--r--tensorflow/contrib/layers/python/ops/bucketization_op.py4
-rw-r--r--tensorflow/contrib/layers/python/ops/sparse_feature_cross_op.py2
-rw-r--r--tensorflow/contrib/layers/python/ops/sparse_ops.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py4
-rw-r--r--tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_queue_runner.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/dataframe/tensorflow_dataframe.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/dataframe/transform.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/dataframe/transforms/batch.py4
-rw-r--r--tensorflow/contrib/learn/python/learn/dataframe/transforms/boolean_mask.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/dataframe/transforms/reader_source.py4
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/classifier.py6
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/dnn.py16
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py18
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py54
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/estimator.py50
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/head.py20
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/linear.py16
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/model_fn.py6
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/random_forest.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/svm.py6
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/tensor_signature.py10
-rw-r--r--tensorflow/contrib/learn/python/learn/evaluable.py4
-rw-r--r--tensorflow/contrib/learn/python/learn/graph_actions.py18
-rw-r--r--tensorflow/contrib/learn/python/learn/graph_actions_test.py4
-rw-r--r--tensorflow/contrib/learn/python/learn/learn_io/graph_io.py58
-rw-r--r--tensorflow/contrib/learn/python/learn/metric_spec.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/monitors.py18
-rw-r--r--tensorflow/contrib/learn/python/learn/ops/losses_ops.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/trainable.py4
-rw-r--r--tensorflow/contrib/learn/python/learn/utils/export.py36
-rw-r--r--tensorflow/contrib/linear_optimizer/python/sdca_optimizer.py2
-rw-r--r--tensorflow/contrib/lookup/lookup_ops.py18
-rw-r--r--tensorflow/contrib/losses/python/losses/loss_ops.py38
-rw-r--r--tensorflow/contrib/metrics/__init__.py6
-rw-r--r--tensorflow/contrib/metrics/python/metrics/classification.py8
-rw-r--r--tensorflow/contrib/metrics/python/ops/confusion_matrix_ops.py2
-rw-r--r--tensorflow/contrib/metrics/python/ops/histogram_ops.py14
-rw-r--r--tensorflow/contrib/metrics/python/ops/metric_ops.py270
-rw-r--r--tensorflow/contrib/metrics/python/ops/set_ops.py18
-rw-r--r--tensorflow/contrib/opt/python/training/external_optimizer.py16
-rw-r--r--tensorflow/contrib/rnn/python/ops/lstm_ops.py66
-rw-r--r--tensorflow/contrib/rnn/python/ops/rnn.py2
-rw-r--r--tensorflow/contrib/slim/python/slim/data/data_decoder.py4
-rw-r--r--tensorflow/contrib/slim/python/slim/data/prefetch_queue.py2
-rw-r--r--tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py10
-rw-r--r--tensorflow/contrib/slim/python/slim/learning.py16
-rw-r--r--tensorflow/contrib/slim/python/slim/nets/resnet_utils.py12
-rw-r--r--tensorflow/contrib/slim/python/slim/nets/resnet_v1_test.py4
-rw-r--r--tensorflow/contrib/slim/python/slim/nets/resnet_v2_test.py4
-rw-r--r--tensorflow/contrib/solvers/python/ops/lanczos.py18
-rw-r--r--tensorflow/contrib/solvers/python/ops/least_squares.py12
-rw-r--r--tensorflow/contrib/solvers/python/ops/linear_equations.py12
-rw-r--r--tensorflow/contrib/tensor_forest/data/data_ops.py4
-rw-r--r--tensorflow/contrib/training/python/training/bucket_ops.py4
-rw-r--r--tensorflow/contrib/training/python/training/device_setter.py4
-rw-r--r--tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py66
-rw-r--r--tensorflow/contrib/training/python/training/training.py10
-rw-r--r--tensorflow/examples/learn/multiple_gpu.py4
-rw-r--r--tensorflow/python/client/session.py16
-rw-r--r--tensorflow/python/framework/dtypes.py2
-rw-r--r--tensorflow/python/framework/function.py2
-rw-r--r--tensorflow/python/framework/gen_docs_combined.py2
-rw-r--r--tensorflow/python/framework/importer.py12
-rw-r--r--tensorflow/python/framework/meta_graph.py4
-rw-r--r--tensorflow/python/framework/op_def_library.py2
-rw-r--r--tensorflow/python/framework/ops.py114
-rw-r--r--tensorflow/python/framework/sparse_tensor.py2
-rw-r--r--tensorflow/python/framework/subscribe.py16
-rw-r--r--tensorflow/python/framework/tensor_shape.py6
-rw-r--r--tensorflow/python/kernel_tests/cwise_ops_test.py4
-rw-r--r--tensorflow/python/ops/array_ops.py152
-rw-r--r--tensorflow/python/ops/candidate_sampling_ops.py18
-rw-r--r--tensorflow/python/ops/check_ops.py60
-rw-r--r--tensorflow/python/ops/clip_ops.py36
-rw-r--r--tensorflow/python/ops/control_flow_ops.py22
-rw-r--r--tensorflow/python/ops/ctc_ops.py27
-rw-r--r--tensorflow/python/ops/data_flow_ops.py8
-rw-r--r--tensorflow/python/ops/embedding_ops.py6
-rw-r--r--tensorflow/python/ops/gradients_impl.py18
-rw-r--r--tensorflow/python/ops/histogram_ops.py6
-rw-r--r--tensorflow/python/ops/image_ops_impl.py2
-rw-r--r--tensorflow/python/ops/init_ops.py8
-rw-r--r--tensorflow/python/ops/linalg_ops.py30
-rw-r--r--tensorflow/python/ops/logging_ops.py38
-rw-r--r--tensorflow/python/ops/math_ops.py148
-rw-r--r--tensorflow/python/ops/nn.py102
-rw-r--r--tensorflow/python/ops/nn_ops.py99
-rw-r--r--tensorflow/python/ops/parsing_ops.py40
-rw-r--r--tensorflow/python/ops/partitioned_variables.py4
-rw-r--r--tensorflow/python/ops/random_ops.py4
-rw-r--r--tensorflow/python/ops/resource_variable_ops.py2
-rw-r--r--tensorflow/python/ops/rnn.py94
-rw-r--r--tensorflow/python/ops/script_ops.py6
-rw-r--r--tensorflow/python/ops/sparse_ops.py50
-rw-r--r--tensorflow/python/ops/special_math_ops.py12
-rw-r--r--tensorflow/python/ops/string_ops.py4
-rw-r--r--tensorflow/python/ops/summary_ops.py2
-rw-r--r--tensorflow/python/ops/tensor_array_grad.py26
-rw-r--r--tensorflow/python/ops/tensor_array_ops.py24
-rw-r--r--tensorflow/python/ops/variable_scope.py6
-rw-r--r--tensorflow/python/ops/variables.py48
-rw-r--r--tensorflow/python/platform/benchmark.py4
-rw-r--r--tensorflow/python/summary/summary.py24
-rw-r--r--tensorflow/python/summary/summary_iterator.py2
-rw-r--r--tensorflow/python/summary/writer/writer.py2
-rw-r--r--tensorflow/python/training/adadelta.py6
-rw-r--r--tensorflow/python/training/adagrad.py2
-rw-r--r--tensorflow/python/training/adagrad_da.py4
-rw-r--r--tensorflow/python/training/basic_session_run_hooks.py4
-rw-r--r--tensorflow/python/training/ftrl.py2
-rw-r--r--tensorflow/python/training/input.py10
-rw-r--r--tensorflow/python/training/learning_rate_decay.py36
-rw-r--r--tensorflow/python/training/momentum.py4
-rw-r--r--tensorflow/python/training/moving_averages.py10
-rw-r--r--tensorflow/python/training/optimizer.py12
-rw-r--r--tensorflow/python/training/proximal_adagrad.py2
-rw-r--r--tensorflow/python/training/saver.py8
-rw-r--r--tensorflow/python/training/session_manager.py2
-rw-r--r--tensorflow/python/training/session_run_hook.py2
-rw-r--r--tensorflow/python/training/slot_creator.py8
-rw-r--r--tensorflow/python/training/supervisor.py14
-rw-r--r--tensorflow/python/training/training_util.py6
160 files changed, 1599 insertions, 1603 deletions
diff --git a/tensorflow/contrib/bayesflow/python/ops/entropy.py b/tensorflow/contrib/bayesflow/python/ops/entropy.py
index fea4f6e718..80b35c59d2 100644
--- a/tensorflow/contrib/bayesflow/python/ops/entropy.py
+++ b/tensorflow/contrib/bayesflow/python/ops/entropy.py
@@ -136,15 +136,15 @@ def elbo_ratio(log_p,
<= Log[p(x)]
```
- User supplies either `Output` of samples `z`, or number of samples to draw `n`
+ User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
- log_p: Callable mapping samples from `q` to `Output`s with
+ log_p: Callable mapping samples from `q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_p` works "just like" `q.log_prob`.
q: `tf.contrib.distributions.Distribution`.
- z: `Output` of samples from `q`, produced by `q.sample_n`.
- n: Integer `Output`. Number of samples to generate if `z` is not provided.
+ z: `Tensor` of samples from `q`, produced by `q.sample_n`.
+ n: Integer `Tensor`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
form: Either `ELBOForms.analytic_entropy` (use formula for entropy of `q`)
or `ELBOForms.sample` (sample estimate of entropy), or `ELBOForms.default`
@@ -153,7 +153,7 @@ def elbo_ratio(log_p,
name: A name to give this `Op`.
Returns:
- Scalar `Output` holding sample mean KL divergence. `shape` is the batch
+ Scalar `Tensor` holding sample mean KL divergence. `shape` is the batch
shape of `q`, and `dtype` is the same as `q`.
Raises:
@@ -189,12 +189,12 @@ def entropy_shannon(p,
= Entropy[p]
```
- User supplies either `Output` of samples `z`, or number of samples to draw `n`
+ User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
p: `tf.contrib.distributions.Distribution`
- z: `Output` of samples from `p`, produced by `p.sample_n(n)` for some `n`.
- n: Integer `Output`. Number of samples to generate if `z` is not provided.
+ z: `Tensor` of samples from `p`, produced by `p.sample_n(n)` for some `n`.
+ n: Integer `Tensor`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
form: Either `ELBOForms.analytic_entropy` (use formula for entropy of `q`)
or `ELBOForms.sample` (sample estimate of entropy), or `ELBOForms.default`
@@ -203,7 +203,7 @@ def entropy_shannon(p,
name: A name to give this `Op`.
Returns:
- An `Output` with same `dtype` as `p`, and shape equal to `p.batch_shape`.
+ A `Tensor` with same `dtype` as `p`, and shape equal to `p.batch_shape`.
Raises:
ValueError: If `form` not handled by this function.
@@ -316,24 +316,24 @@ def renyi_ratio(log_p, q, alpha, z=None, n=None, seed=None, name='renyi_ratio'):
#### Call signature
- User supplies either `Output` of samples `z`, or number of samples to draw `n`
+ User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
- log_p: Callable mapping samples from `q` to `Output`s with
+ log_p: Callable mapping samples from `q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_p` works "just like" `q.log_prob`.
q: `tf.contrib.distributions.Distribution`.
`float64` `dtype` recommended.
`log_p` and `q` should be supported on the same set.
- alpha: `Output` with shape `q.batch_shape` and values not equal to 1.
- z: `Output` of samples from `q`, produced by `q.sample_n`.
- n: Integer `Output`. The number of samples to use if `z` is not provided.
+ alpha: `Tensor` with shape `q.batch_shape` and values not equal to 1.
+ z: `Tensor` of samples from `q`, produced by `q.sample_n`.
+ n: Integer `Tensor`. The number of samples to use if `z` is not provided.
Note that this can be highly biased for small `n`, see docstring.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
- renyi_result: The scaled log of sample mean. `Output` with `shape` equal
+ renyi_result: The scaled log of sample mean. `Tensor` with `shape` equal
to batch shape of `q`, and `dtype` = `q.dtype`.
"""
with ops.name_scope(name, values=[alpha, n, z]):
@@ -362,7 +362,7 @@ def renyi_alpha(step,
alpha_min,
alpha_max=0.99999,
name='renyi_alpha'):
- r"""Exponentially decaying `Output` appropriate for Renyi ratios.
+ r"""Exponentially decaying `Tensor` appropriate for Renyi ratios.
When minimizing the Renyi divergence for `0 <= alpha < 1` (or maximizing the
Renyi equivalent of elbo) in high dimensions, it is not uncommon to experience
@@ -382,17 +382,17 @@ def renyi_alpha(step,
```
Args:
- step: Non-negative scalar `Output`. Typically the global step or an
+ step: Non-negative scalar `Tensor`. Typically the global step or an
offset version thereof.
- decay_time: Positive scalar `Output`.
- alpha_min: `float` or `double` `Output`.
+ decay_time: Positive scalar `Tensor`.
+ alpha_min: `float` or `double` `Tensor`.
The minimal, final value of `alpha`, achieved when `step >= decay_time`
- alpha_max: `Output` of same `dtype` as `alpha_min`.
+ alpha_max: `Tensor` of same `dtype` as `alpha_min`.
The maximal, beginning value of `alpha`, achieved when `step == 0`
name: A name to give this `Op`.
Returns:
- alpha: An `Output` of same `dtype` as `alpha_min`.
+ alpha: A `Tensor` of same `dtype` as `alpha_min`.
"""
with ops.name_scope(name, values=[step, decay_time, alpha_min, alpha_max]):
alpha_min = ops.convert_to_tensor(alpha_min, name='alpha_min')
diff --git a/tensorflow/contrib/bayesflow/python/ops/monte_carlo.py b/tensorflow/contrib/bayesflow/python/ops/monte_carlo.py
index dbfa3611ba..9cd2c5aee6 100644
--- a/tensorflow/contrib/bayesflow/python/ops/monte_carlo.py
+++ b/tensorflow/contrib/bayesflow/python/ops/monte_carlo.py
@@ -105,26 +105,26 @@ def expectation_importance_sampler(f,
If `f >= 0`, it is up to 2x more efficient to exponentiate the result of
`expectation_importance_sampler_logspace` applied to `Log[f]`.
- User supplies either `Output` of samples `z`, or number of samples to draw `n`
+ User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
- f: Callable mapping samples from `sampling_dist_q` to `Output`s with shape
+ f: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape
broadcastable to `q.batch_shape`.
For example, `f` works "just like" `q.log_prob`.
- log_p: Callable mapping samples from `sampling_dist_q` to `Output`s with
+ log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_p` works "just like" `sampling_dist_q.log_prob`.
sampling_dist_q: The sampling distribution.
`tf.contrib.distributions.Distribution`.
`float64` `dtype` recommended.
`log_p` and `q` should be supported on the same set.
- z: `Output` of samples from `q`, produced by `q.sample_n`.
- n: Integer `Output`. Number of samples to generate if `z` is not provided.
+ z: `Tensor` of samples from `q`, produced by `q.sample_n`.
+ n: Integer `Tensor`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
- The importance sampling estimate. `Output` with `shape` equal
+ The importance sampling estimate. `Tensor` with `shape` equal
to batch shape of `q`, and `dtype` = `q.dtype`.
"""
q = sampling_dist_q
@@ -182,26 +182,26 @@ def expectation_importance_sampler_logspace(
log-space.
- User supplies either `Output` of samples `z`, or number of samples to draw `n`
+ User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
- log_f: Callable mapping samples from `sampling_dist_q` to `Output`s with
+ log_f: Callable mapping samples from `sampling_dist_q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_f` works "just like" `sampling_dist_q.log_prob`.
- log_p: Callable mapping samples from `sampling_dist_q` to `Output`s with
+ log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with
shape broadcastable to `q.batch_shape`.
For example, `log_p` works "just like" `q.log_prob`.
sampling_dist_q: The sampling distribution.
`tf.contrib.distributions.Distribution`.
`float64` `dtype` recommended.
`log_p` and `q` should be supported on the same set.
- z: `Output` of samples from `q`, produced by `q.sample_n`.
- n: Integer `Output`. Number of samples to generate if `z` is not provided.
+ z: `Tensor` of samples from `q`, produced by `q.sample_n`.
+ n: Integer `Tensor`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
- Logarithm of the importance sampling estimate. `Output` with `shape` equal
+ Logarithm of the importance sampling estimate. `Tensor` with `shape` equal
to batch shape of `q`, and `dtype` = `q.dtype`.
"""
q = sampling_dist_q
@@ -215,10 +215,10 @@ def _logspace_mean(log_values):
"""Evaluate `Log[E[values]]` in a stable manner.
Args:
- log_values: `Output` holding `Log[values]`.
+ log_values: `Tensor` holding `Log[values]`.
Returns:
- `Output` of same `dtype` as `log_values`, reduced across dim 0.
+ `Tensor` of same `dtype` as `log_values`, reduced across dim 0.
`Log[Mean[values]]`.
"""
# center = Max[Log[values]], with stop-gradient
@@ -249,18 +249,18 @@ def expectation(f, p, z=None, n=None, seed=None, name='expectation'):
\approx E_p[f(Z)]
```
- User supplies either `Output` of samples `z`, or number of samples to draw `n`
+ User supplies either `Tensor` of samples `z`, or number of samples to draw `n`
Args:
- f: Callable mapping samples from `p` to `Output`s.
+ f: Callable mapping samples from `p` to `Tensors`.
p: `tf.contrib.distributions.Distribution`.
- z: `Output` of samples from `p`, produced by `p.sample_n`.
- n: Integer `Output`. Number of samples to generate if `z` is not provided.
+ z: `Tensor` of samples from `p`, produced by `p.sample_n`.
+ n: Integer `Tensor`. Number of samples to generate if `z` is not provided.
seed: Python integer to seed the random number generator.
name: A name to give this `Op`.
Returns:
- An `Output` with the same `dtype` as `p`.
+ A `Tensor` with the same `dtype` as `p`.
Example:
diff --git a/tensorflow/contrib/bayesflow/python/ops/special_math.py b/tensorflow/contrib/bayesflow/python/ops/special_math.py
index dc3bf067ca..77e7c0e093 100644
--- a/tensorflow/contrib/bayesflow/python/ops/special_math.py
+++ b/tensorflow/contrib/bayesflow/python/ops/special_math.py
@@ -65,11 +65,11 @@ def ndtr(x, name="ndtr"):
```
Args:
- x: `Output` of type `float32`, `float64`.
+ x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtr").
Returns:
- ndtr: `Output` with `dtype=x.dtype`.
+ ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
@@ -135,13 +135,13 @@ def log_ndtr(x, series_order=3, name="log_ndtr"):
Args:
- x: `Output` of type `float32`, `float64`.
+ x: `Tensor` of type `float32`, `float64`.
series_order: Positive Python `integer`. Maximum depth to
evaluate the asymptotic expansion. This is the `N` above.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
- log_ndtr: `Output` with `dtype=x.dtype`.
+ log_ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
diff --git a/tensorflow/contrib/bayesflow/python/ops/stochastic_gradient_estimators.py b/tensorflow/contrib/bayesflow/python/ops/stochastic_gradient_estimators.py
index 6691dfca91..2139419289 100644
--- a/tensorflow/contrib/bayesflow/python/ops/stochastic_gradient_estimators.py
+++ b/tensorflow/contrib/bayesflow/python/ops/stochastic_gradient_estimators.py
@@ -75,13 +75,13 @@ def score_function(stochastic_tensor, value, loss, baseline=None,
Args:
stochastic_tensor: `StochasticTensor` p(x).
- value: `Output` x. Samples from p(x).
- loss: `Output`.
- baseline: `Output` broadcastable to `loss`.
+ value: `Tensor` x. Samples from p(x).
+ loss: `Tensor`.
+ baseline: `Tensor` broadcastable to `loss`.
name: name to prepend ops with.
Returns:
- `Output` `p.log_prob(x) * (loss - b)`. Taking the gradient yields the score
+ `Tensor` `p.log_prob(x) * (loss - b)`. Taking the gradient yields the score
function estimator.
"""
with ops.name_scope(name, values=[value, loss, baseline]):
@@ -103,7 +103,7 @@ def get_score_function_with_advantage(advantage_fn=None,
Args:
advantage_fn: callable that takes the `StochasticTensor` and the
- downstream `loss` and returns an `Output` advantage
+ downstream `loss` and returns a `Tensor` advantage
(e.g. `loss - baseline`).
name: name to prepend ops with.
@@ -125,7 +125,7 @@ def get_score_function_with_constant_baseline(baseline, name="ScoreFunction"):
"""Score function estimator with constant baseline.
Args:
- baseline: `Output` to be subtracted from loss.
+ baseline: `Tensor` to be subtracted from loss.
name: name to prepend ops with.
Returns:
@@ -145,7 +145,7 @@ def get_score_function_with_baseline(baseline_fn=None, name="ScoreFunction"):
Args:
baseline_fn: callable that takes the `StochasticTensor` and the downstream
- `loss` and returns an `Output` baseline to be subtracted from the `loss`.
+ `loss` and returns a `Tensor` baseline to be subtracted from the `loss`.
If None, defaults to `get_mean_baseline`, which is an EMA of the loss.
name: name to prepend ops with.
diff --git a/tensorflow/contrib/bayesflow/python/ops/stochastic_graph.py b/tensorflow/contrib/bayesflow/python/ops/stochastic_graph.py
index 7f2a223605..81c04d8e9b 100644
--- a/tensorflow/contrib/bayesflow/python/ops/stochastic_graph.py
+++ b/tensorflow/contrib/bayesflow/python/ops/stochastic_graph.py
@@ -54,7 +54,7 @@ def _stochastic_dependencies_map(fixed_losses, stochastic_tensors=None):
"""Map stochastic tensors to the fixed losses that depend on them.
Args:
- fixed_losses: a list of `Output`s.
+ fixed_losses: a list of `Tensor`s.
stochastic_tensors: a list of `StochasticTensor`s to map to fixed losses.
If `None`, all `StochasticTensor`s in the graph will be used.
@@ -109,16 +109,16 @@ def surrogate_loss(sample_losses,
dimensionality of 1 or greater. All losses should have the same shape.
stochastic_tensors: a list of `StochasticTensor`s to add loss terms for.
If None, defaults to all `StochasticTensor`s in the graph upstream of
- the `Output`s in `sample_losses`.
+ the `Tensor`s in `sample_losses`.
name: the name with which to prepend created ops.
Returns:
- `Output` loss, which is the sum of `sample_losses` and the
+ `Tensor` loss, which is the sum of `sample_losses` and the
`loss_fn`s returned by the `StochasticTensor`s.
Raises:
TypeError: if `sample_losses` is not a list or tuple, or if its elements
- are not `Output`s.
+ are not `Tensor`s.
ValueError: if any loss in `sample_losses` does not have dimensionality 1
or greater.
"""
diff --git a/tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py b/tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py
index e99ef5374e..e52c81740d 100644
--- a/tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py
+++ b/tensorflow/contrib/bayesflow/python/ops/stochastic_tensor.py
@@ -91,10 +91,10 @@ class BaseStochasticTensor(object):
constant with respect to the input for purposes of the gradient.
Args:
- sample_loss: `Output`, sample loss downstream of this `StochasticTensor`.
+ sample_loss: `Tensor`, sample loss downstream of this `StochasticTensor`.
Returns:
- Either `None` or an `Output`.
+ Either `None` or a `Tensor`.
"""
raise NotImplementedError("surrogate_loss not implemented")
@@ -301,7 +301,7 @@ class StochasticTensor(BaseStochasticTensor):
value type set with the `value_type` context manager will be used.
loss_fn: callable that takes
`(st, st.value(), influenced_loss)`, where
- `st` is this `StochasticTensor`, and returns an `Output` loss. By
+ `st` is this `StochasticTensor`, and returns a `Tensor` loss. By
default, `loss_fn` is the `score_function`, or more precisely, the
integral of the score function, such that when the gradient is taken,
the score function results. See the `stochastic_gradient_estimators`
diff --git a/tensorflow/contrib/bayesflow/python/ops/variational_inference.py b/tensorflow/contrib/bayesflow/python/ops/variational_inference.py
index 6f2b54c9f8..bd8309b56b 100644
--- a/tensorflow/contrib/bayesflow/python/ops/variational_inference.py
+++ b/tensorflow/contrib/bayesflow/python/ops/variational_inference.py
@@ -135,7 +135,7 @@ def elbo(log_likelihood,
e.g. `q(Z) = q(z1)q(z2)q(z3)`.
Args:
- log_likelihood: `Output` log p(x|Z).
+ log_likelihood: `Tensor` log p(x|Z).
variational_with_prior: dict from `StochasticTensor` q(Z) to
`Distribution` p(Z). If `None`, defaults to all `StochasticTensor`
objects upstream of `log_likelihood` with priors registered with
@@ -148,7 +148,7 @@ def elbo(log_likelihood,
name: name to prefix ops with.
Returns:
- `Output` ELBO of the same type and shape as `log_likelihood`.
+ `Tensor` ELBO of the same type and shape as `log_likelihood`.
Raises:
TypeError: if variationals in `variational_with_prior` are not
@@ -181,7 +181,7 @@ def elbo_with_log_joint(log_joint,
Because only the joint is specified, analytic KL is not available.
Args:
- log_joint: `Output` log p(x, Z).
+ log_joint: `Tensor` log p(x, Z).
variational: list of `StochasticTensor` q(Z). If `None`, defaults to all
`StochasticTensor` objects upstream of `log_joint`.
keep_batch_dim: bool. Whether to keep the batch dimension when summing
@@ -192,7 +192,7 @@ def elbo_with_log_joint(log_joint,
name: name to prefix ops with.
Returns:
- `Output` ELBO of the same type and shape as `log_joint`.
+ `Tensor` ELBO of the same type and shape as `log_joint`.
Raises:
TypeError: if variationals in `variational` are not `StochasticTensor`s.
@@ -224,15 +224,15 @@ def _elbo(form, log_likelihood, log_joint, variational_with_prior,
Args:
form: ELBOForms constant. Controls how the ELBO is computed.
- log_likelihood: `Output` log p(x|Z).
- log_joint: `Output` log p(x, Z).
+ log_likelihood: `Tensor` log p(x|Z).
+ log_joint: `Tensor` log p(x, Z).
variational_with_prior: `dict<StochasticTensor, Distribution>`, varational
distributions to prior distributions.
keep_batch_dim: bool. Whether to keep the batch dimension when reducing
the entropy/KL.
Returns:
- ELBO `Output` with same shape and dtype as `log_likelihood`/`log_joint`.
+ ELBO `Tensor` with same shape and dtype as `log_likelihood`/`log_joint`.
"""
ELBOForms.check_form(form)
diff --git a/tensorflow/contrib/copy_graph/python/util/copy_elements.py b/tensorflow/contrib/copy_graph/python/util/copy_elements.py
index 3850d45749..19d67ad490 100644
--- a/tensorflow/contrib/copy_graph/python/util/copy_elements.py
+++ b/tensorflow/contrib/copy_graph/python/util/copy_elements.py
@@ -122,7 +122,7 @@ def copy_op_to_graph(org_instance, to_graph, variables,
The copied `Operation` from `to_graph`.
Raises:
- TypeError: If `org_instance` is not an `Operation` or `Output`.
+ TypeError: If `org_instance` is not an `Operation` or `Tensor`.
"""
#The name of the new instance
diff --git a/tensorflow/contrib/distributions/python/ops/bernoulli.py b/tensorflow/contrib/distributions/python/ops/bernoulli.py
index 79be73aae8..44962a5f1b 100644
--- a/tensorflow/contrib/distributions/python/ops/bernoulli.py
+++ b/tensorflow/contrib/distributions/python/ops/bernoulli.py
@@ -47,12 +47,12 @@ class Bernoulli(distribution.Distribution):
"""Construct Bernoulli distributions.
Args:
- logits: An N-D `Output` representing the log-odds
- of a positive event. Each entry in the `Output` parametrizes
+ logits: An N-D `Tensor` representing the log-odds
+ of a positive event. Each entry in the `Tensor` parametrizes
an independent Bernoulli distribution where the probability of an event
is sigmoid(logits). Only one of `logits` or `p` should be passed in.
- p: An N-D `Output` representing the probability of a positive
- event. Each entry in the `Output` parameterizes an independent
+ p: An N-D `Tensor` representing the probability of a positive
+ event. Each entry in the `Tensor` parameterizes an independent
Bernoulli distribution. Only one of `logits` or `p` should be passed
in.
dtype: dtype for samples.
diff --git a/tensorflow/contrib/distributions/python/ops/bijector.py b/tensorflow/contrib/distributions/python/ops/bijector.py
index ab8b320095..450874caef 100644
--- a/tensorflow/contrib/distributions/python/ops/bijector.py
+++ b/tensorflow/contrib/distributions/python/ops/bijector.py
@@ -90,9 +90,9 @@ class _Mapping(collections.namedtuple("_Mapping",
"""Custom __new__ so namedtuple items have defaults.
Args:
- x: `Output`. Forward.
- y: `Output`. Inverse.
- ildj: `Output`. Inverse log det Jacobian.
+ x: `Tensor`. Forward.
+ y: `Tensor`. Inverse.
+ ildj: `Tensor`. Inverse log det Jacobian.
condition_kwargs: Python dictionary. Extra args supplied to
forward/inverse/etc functions.
@@ -118,9 +118,9 @@ class _Mapping(collections.namedtuple("_Mapping",
"""Returns new _Mapping with args merged with self.
Args:
- x: `Output`. Forward.
- y: `Output`. Inverse.
- ildj: `Output`. Inverse log det Jacobian.
+ x: `Tensor`. Forward.
+ y: `Tensor`. Inverse.
+ ildj: `Tensor`. Inverse log det Jacobian.
condition_kwargs: Python dictionary. Extra args supplied to
forward/inverse/etc functions.
mapping: Instance of _Mapping to merge. Can only be specified if no other
@@ -166,7 +166,7 @@ class Bijector(object):
[diffeomorphism](https://en.wikipedia.org/wiki/Diffeomorphism), i.e., a
bijective, differentiable function. A `Bijector` is used by
`TransformedDistribution` but can be generally used for transforming a
- `Distribution` generated `Output`. A `Bijector` is characterized by three
+ `Distribution` generated `Tensor`. A `Bijector` is characterized by three
operations:
1. Forward Evaluation
@@ -258,11 +258,11 @@ class Bijector(object):
Example of why a `Bijector` needs to understand sample, batch, event
partitioning:
- - Consider the `Exp` `Bijector` applied to an `Output` which has sample,
- batch, and event (S, B, E) shape semantics. Suppose
- the `Output`'s partitioned-shape is `(S=[4], B=[2], E=[3, 3])`.
+ - Consider the `Exp` `Bijector` applied to a `Tensor` which has sample, batch,
+ and event (S, B, E) shape semantics. Suppose
+ the `Tensor`'s partitioned-shape is `(S=[4], B=[2], E=[3, 3])`.
- For `Exp`, the shape of the `Output` returned by `forward` and `inverse` is
+ For `Exp`, the shape of the `Tensor` returned by `forward` and `inverse` is
unchanged, i.e., `[4, 2, 3, 3]`. However the shape returned by
`inverse_log_det_jacobian` is `[4, 2]` because the Jacobian is a reduction
over the event dimensions.
@@ -445,7 +445,7 @@ class Bijector(object):
@property
def dtype(self):
- """dtype of `Output`s transformable by this distribution."""
+ """dtype of `Tensor`s transformable by this distribution."""
return self._dtype
@property
@@ -458,15 +458,15 @@ class Bijector(object):
return input_shape
def forward_event_shape(self, input_shape, name="forward_event_shape"):
- """Shape of a single sample from a single batch as an `int32` 1D `Output`.
+ """Shape of a single sample from a single batch as an `int32` 1D `Tensor`.
Args:
- input_shape: `Output`, `int32` vector indicating event-portion shape
+ input_shape: `Tensor`, `int32` vector indicating event-portion shape
passed into `forward` function.
name: name to give to the op
Returns:
- forward_event_shape: `Output`, `int32` vector indicating event-portion
+ forward_event_shape: `Tensor`, `int32` vector indicating event-portion
shape after applying `forward`.
"""
with self._name_scope(name, [input_shape]):
@@ -498,15 +498,15 @@ class Bijector(object):
return output_shape
def inverse_event_shape(self, output_shape, name="inverse_event_shape"):
- """Shape of a single sample from a single batch as an `int32` 1D `Output`.
+ """Shape of a single sample from a single batch as an `int32` 1D `Tensor`.
Args:
- output_shape: `Output`, `int32` vector indicating event-portion shape
+ output_shape: `Tensor`, `int32` vector indicating event-portion shape
passed into `inverse` function.
name: name to give to the op
Returns:
- inverse_event_shape: `Output`, `int32` vector indicating event-portion
+ inverse_event_shape: `Tensor`, `int32` vector indicating event-portion
shape after applying `inverse`.
"""
with self._name_scope(name, [output_shape]):
@@ -541,12 +541,12 @@ class Bijector(object):
"""Returns the forward `Bijector` evaluation, i.e., X = g(Y).
Args:
- x: `Output`. The input to the "forward" evaluation.
+ x: `Tensor`. The input to the "forward" evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
- `Output`.
+ `Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `x.dtype` is not
@@ -571,12 +571,12 @@ class Bijector(object):
"""Returns the inverse `Bijector` evaluation, i.e., X = g^{-1}(Y).
Args:
- y: `Output`. The input to the "inverse" evaluation.
+ y: `Tensor`. The input to the "inverse" evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
- `Output`.
+ `Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
@@ -623,12 +623,12 @@ class Bijector(object):
Note that `forward_log_det_jacobian` is the negative of this function.
Args:
- y: `Output`. The input to the "inverse" Jacobian evaluation.
+ y: `Tensor`. The input to the "inverse" Jacobian evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
- `Output`.
+ `Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
@@ -679,12 +679,12 @@ class Bijector(object):
See `inverse()`, `inverse_log_det_jacobian()` for more details.
Args:
- y: `Output`. The input to the "inverse" Jacobian evaluation.
+ y: `Tensor`. The input to the "inverse" Jacobian evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
- `Output`.
+ `Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
@@ -733,12 +733,12 @@ class Bijector(object):
"""Returns both the forward_log_det_jacobian.
Args:
- x: `Output`. The input to the "forward" Jacobian evaluation.
+ x: `Tensor`. The input to the "forward" Jacobian evaluation.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
- `Output`.
+ `Tensor`.
Raises:
TypeError: if `self.dtype` is specified and `y.dtype` is not
@@ -1189,7 +1189,7 @@ class Exp(Bijector):
"""Instantiates the `Exp` bijector.
Args:
- event_ndims: Scalar `int32` `Output` indicating the number of dimensions
+ event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution.
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
@@ -1279,19 +1279,19 @@ class ScaleAndShift(Bijector):
name="scale_and_shift"):
"""Instantiates the `ScaleAndShift` bijector.
- This `Bijector` is initialized with `scale` and `shift` `Output`s, giving
+ This `Bijector` is initialized with `scale` and `shift` `Tensors`, giving
the forward operation:
```Y = g(X) = matmul(scale, X) + shift```
Args:
- shift: Numeric `Output`.
- scale: Numeric `Output` of same `dtype` as `shift`. If `event_ndims = 0`,
+ shift: Numeric `Tensor`.
+ scale: Numeric `Tensor` of same `dtype` as `shift`. If `event_ndims = 0`,
`scale` is treated like a `1x1` matrix or a batch thereof.
Otherwise, the last two dimensions of `scale` define a matrix.
`scale` must have non-negative diagonal entries. The upper triangular
part of `scale` is ignored, effectively making it lower triangular.
- event_ndims: Scalar `int32` `Output` indicating the number of dimensions
+ event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution. Must be 0 or 1
validate_args: `Boolean` indicating whether arguments should be checked
for correctness.
@@ -1343,12 +1343,12 @@ class ScaleAndShift(Bijector):
work for, say, the left-hand argument of `batch_matmul`.
Args:
- scale: `Output`.
- event_ndims: `Output` (0D, `int32`).
+ scale: `Tensor`.
+ event_ndims: `Tensor` (0D, `int32`).
Returns:
- scale: `Output` with dims expanded according to [above] table.
- batch_ndims: `Output` (0D, `int32`). The ndims of the `batch` portion.
+ scale: `Tensor` with dims expanded according to [above] table.
+ batch_ndims: `Tensor` (0D, `int32`). The ndims of the `batch` portion.
"""
ndims = array_ops.rank(scale)
left = math_ops.select(
@@ -1747,7 +1747,7 @@ class CholeskyOuterProduct(Bijector):
"""Instantiates the `CholeskyOuterProduct` bijector.
Args:
- event_ndims: `constant` `int32` scalar `Output` indicating the number of
+ event_ndims: `constant` `int32` scalar `Tensor` indicating the number of
dimensions associated with a particular draw from the distribution. Must
be 0 or 2.
validate_args: `Boolean` indicating whether arguments should be checked
diff --git a/tensorflow/contrib/distributions/python/ops/categorical.py b/tensorflow/contrib/distributions/python/ops/categorical.py
index abcf5c1dcc..908690c1ce 100644
--- a/tensorflow/contrib/distributions/python/ops/categorical.py
+++ b/tensorflow/contrib/distributions/python/ops/categorical.py
@@ -86,12 +86,12 @@ class Categorical(distribution.Distribution):
"""Initialize Categorical distributions using class log-probabilities.
Args:
- logits: An N-D `Output`, `N >= 1`, representing the log probabilities
+ logits: An N-D `Tensor`, `N >= 1`, representing the log probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of logits for each class. Only one of `logits` or
`p` should be passed in.
- p: An N-D `Output`, `N >= 1`, representing the probabilities
+ p: An N-D `Tensor`, `N >= 1`, representing the probabilities
of a set of Categorical distributions. The first `N - 1` dimensions
index into a batch of independent distributions and the last dimension
represents a vector of probabilities for each class. Only one of
diff --git a/tensorflow/contrib/distributions/python/ops/distribution.py b/tensorflow/contrib/distributions/python/ops/distribution.py
index a9dddb6356..5a3583c22a 100644
--- a/tensorflow/contrib/distributions/python/ops/distribution.py
+++ b/tensorflow/contrib/distributions/python/ops/distribution.py
@@ -208,7 +208,7 @@ class Distribution(_BaseDistribution):
`sample_n`.
`sample_n_shape = (n,) + batch_shape + event_shape`, where `sample_n_shape` is
- the shape of the `Output` returned from `sample_n`, `n` is the number of
+ the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
@@ -229,7 +229,7 @@ class Distribution(_BaseDistribution):
# `event_shape` is `TensorShape([])`.
event_shape = u.get_event_shape()
- # `event_shape_t` is an `Output` which will evaluate to [].
+ # `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape
# Sampling returns a sample per distribution. `samples` has shape
@@ -321,7 +321,7 @@ class Distribution(_BaseDistribution):
name: A name for this distribution. Default: subclass name.
Raises:
- ValueError: if any member of graph_parents is `None` or not an `Output`.
+ ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
@@ -344,12 +344,12 @@ class Distribution(_BaseDistribution):
Subclasses should override static method `_param_shapes`.
Args:
- sample_shape: `Output` or python list/tuple. Desired shape of a call to
+ sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
- `dict` of parameter name to `Output` shapes.
+ `dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@@ -396,7 +396,7 @@ class Distribution(_BaseDistribution):
@property
def dtype(self):
- """The `DType` of `Output`s handled by this `Distribution`."""
+ """The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
@@ -460,7 +460,7 @@ class Distribution(_BaseDistribution):
raise NotImplementedError("batch_shape is not implemented")
def batch_shape(self, name="batch_shape"):
- """Shape of a single sample from a single event index as a 1-D `Output`.
+ """Shape of a single sample from a single event index as a 1-D `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
@@ -469,7 +469,7 @@ class Distribution(_BaseDistribution):
name: name to give to the op
Returns:
- batch_shape: `Output`.
+ batch_shape: `Tensor`.
"""
with self._name_scope(name):
return self._batch_shape()
@@ -491,13 +491,13 @@ class Distribution(_BaseDistribution):
raise NotImplementedError("event_shape is not implemented")
def event_shape(self, name="event_shape"):
- """Shape of a single sample from a single batch as a 1-D int32 `Output`.
+ """Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
- event_shape: `Output`.
+ event_shape: `Tensor`.
"""
with self._name_scope(name):
return self._event_shape()
@@ -526,13 +526,13 @@ class Distribution(_BaseDistribution):
sample.
Args:
- sample_shape: 0D or 1D `int32` `Output`. Shape of the generated samples.
+ sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
- samples: an `Output` with prepended dimensions `sample_shape`.
+ samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
@@ -552,14 +552,14 @@ class Distribution(_BaseDistribution):
"""Generate `n` samples.
Args:
- n: `Scalar` `Output` of type `int32` or `int64`, the number of
+ n: `Scalar` `Tensor` of type `int32` or `int64`, the number of
observations to sample.
seed: Python integer seed for RNG
name: name to give to the op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
- samples: an `Output` with a prepended dimension (n,).
+ samples: a `Tensor` with a prepended dimension (n,).
Raises:
TypeError: if `n` is not an integer type.
@@ -599,12 +599,12 @@ class Distribution(_BaseDistribution):
"""Log probability density/mass function (depending on `is_continuous`).
Args:
- value: `float` or `double` `Output`.
+ value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
- log_prob: an `Output` of shape `sample_shape(x) + self.batch_shape` with
+ log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
@@ -621,12 +621,12 @@ class Distribution(_BaseDistribution):
"""Probability density/mass function (depending on `is_continuous`).
Args:
- value: `float` or `double` `Output`.
+ value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
- prob: an `Output` of shape `sample_shape(x) + self.batch_shape` with
+ prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
@@ -656,12 +656,12 @@ class Distribution(_BaseDistribution):
`x << -1`.
Args:
- value: `float` or `double` `Output`.
+ value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
- logcdf: an `Output` of shape `sample_shape(x) + self.batch_shape` with
+ logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
@@ -687,12 +687,12 @@ class Distribution(_BaseDistribution):
```
Args:
- value: `float` or `double` `Output`.
+ value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
- cdf: an `Output` of shape `sample_shape(x) + self.batch_shape` with
+ cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
@@ -724,12 +724,12 @@ class Distribution(_BaseDistribution):
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
- value: `float` or `double` `Output`.
+ value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
- `Output` of shape `sample_shape(x) + self.batch_shape` with values of type
+ `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
with self._name_scope(name, values=[value]):
@@ -758,7 +758,7 @@ class Distribution(_BaseDistribution):
```
Args:
- value: `float` or `double` `Output`.
+ value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
@@ -820,12 +820,12 @@ class Distribution(_BaseDistribution):
"""Log probability density function.
Args:
- value: `float` or `double` `Output`.
+ value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
- log_prob: an `Output` of shape `sample_shape(x) + self.batch_shape` with
+ log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
@@ -842,12 +842,12 @@ class Distribution(_BaseDistribution):
"""Probability density function.
Args:
- value: `float` or `double` `Output`.
+ value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
- prob: an `Output` of shape `sample_shape(x) + self.batch_shape` with
+ prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
@@ -864,12 +864,12 @@ class Distribution(_BaseDistribution):
"""Log probability mass function.
Args:
- value: `float` or `double` `Output`.
+ value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
- log_pmf: an `Output` of shape `sample_shape(x) + self.batch_shape` with
+ log_pmf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
@@ -886,12 +886,12 @@ class Distribution(_BaseDistribution):
"""Probability mass function.
Args:
- value: `float` or `double` `Output`.
+ value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
- pmf: an `Output` of shape `sample_shape(x) + self.batch_shape` with
+ pmf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
diff --git a/tensorflow/contrib/distributions/python/ops/distribution_util.py b/tensorflow/contrib/distributions/python/ops/distribution_util.py
index a036843955..fb46c3c52e 100644
--- a/tensorflow/contrib/distributions/python/ops/distribution_util.py
+++ b/tensorflow/contrib/distributions/python/ops/distribution_util.py
@@ -41,8 +41,8 @@ def assert_close(
"""Assert that that x and y are within machine epsilon of each other.
Args:
- x: Numeric `Output`
- y: Numeric `Output`
+ x: Numeric `Tensor`
+ y: Numeric `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
@@ -79,7 +79,7 @@ def assert_integer_form(
"""Assert that x has integer components (or floats equal to integers).
Args:
- x: Numeric `Output`
+ x: Numeric `Tensor`
data: The tensors to print out if the condition is `False`. Defaults to
error message and first few entries of `x` and `y`.
summarize: Print this many entries of each tensor.
@@ -110,8 +110,8 @@ def get_logits_and_prob(
"""Converts logits to probabilities and vice-versa, and returns both.
Args:
- logits: Numeric `Output` representing log-odds.
- p: Numeric `Output` representing probabilities.
+ logits: Numeric `Tensor` representing log-odds.
+ p: Numeric `Tensor` representing probabilities.
multidimensional: `Boolean`, default `False`.
If `True`, represents whether the last dimension of `logits` or `p`,
a [N1, N2, ... k] dimensional tensor, represent the
@@ -185,14 +185,14 @@ def log_combinations(n, counts, name="log_combinations"):
where `i` runs over all `k` classes.
Args:
- n: Numeric `Output` broadcastable with `counts`. This represents `n`
+ n: Numeric `Tensor` broadcastable with `counts`. This represents `n`
outcomes.
- counts: Numeric `Output` broadcastable with `n`. This represents counts
+ counts: Numeric `Tensor` broadcastable with `n`. This represents counts
in `k` classes, where `k` is the last dimension of the tensor.
name: A name for this operation (optional).
Returns:
- `Output` representing the multinomial coefficient between `n` and `counts`.
+ `Tensor` representing the multinomial coefficient between `n` and `counts`.
"""
# First a bit about the number of ways counts could have come in:
# E.g. if counts = [1, 2], then this is 3 choose 2.
@@ -247,16 +247,16 @@ def matrix_diag_transform(matrix, transform=None, name=None):
```
Args:
- matrix: Rank `R` `Output`, `R >= 2`, where the last two dimensions are
+ matrix: Rank `R` `Tensor`, `R >= 2`, where the last two dimensions are
equal.
- transform: Element-wise function mapping `Output`s to `Output`s. To
+ transform: Element-wise function mapping `Tensors` to `Tensors`. To
be applied to the diagonal of `matrix`. If `None`, `matrix` is returned
unchanged. Defaults to `None`.
name: A name to give created ops.
Defaults to "matrix_diag_transform".
Returns:
- An `Output` with same shape and `dtype` as `matrix`.
+ A `Tensor` with same shape and `dtype` as `matrix`.
"""
with ops.name_scope(name, "matrix_diag_transform", [matrix]):
matrix = ops.convert_to_tensor(matrix, name="matrix")
@@ -295,13 +295,13 @@ def rotate_transpose(x, shift, name="rotate_transpose"):
```
Args:
- x: `Output`.
- shift: `Output`. Number of dimensions to transpose left (shift<0) or
+ x: `Tensor`.
+ shift: `Tensor`. Number of dimensions to transpose left (shift<0) or
transpose right (shift>0).
name: `String`. The name to give this op.
Returns:
- rotated_x: Input `Output` with dimensions circularly rotated by shift.
+ rotated_x: Input `Tensor` with dimensions circularly rotated by shift.
Raises:
TypeError: if shift is not integer type.
@@ -348,18 +348,18 @@ def pick_vector(cond,
true_vector,
false_vector,
name="pick_vector"):
- """Picks possibly different length row `Output`s based on condition.
+ """Picks possibly different length row `Tensor`s based on condition.
- Value `Output`s should have exactly one dimension.
+ Value `Tensor`s should have exactly one dimension.
If `cond` is a python Boolean or `tf.constant` then either `true_vector` or
`false_vector` is immediately returned. I.e., no graph nodes are created and
no validation happens.
Args:
- cond: `Output`. Must have `dtype=tf.bool` and be scalar.
- true_vector: `Output` of one dimension. Returned when cond is `True`.
- false_vector: `Output` of one dimension. Returned when cond is `False`.
+ cond: `Tensor`. Must have `dtype=tf.bool` and be scalar.
+ true_vector: `Tensor` of one dimension. Returned when cond is `True`.
+ false_vector: `Tensor` of one dimension. Returned when cond is `False`.
name: `String`. The name to give this op.
Example:
@@ -372,7 +372,7 @@ def pick_vector(cond,
```
Returns:
- true_or_false_vector: `Output`.
+ true_or_false_vector: `Tensor`.
Raises:
TypeError: if `cond.dtype != tf.bool`
@@ -439,14 +439,14 @@ def fill_lower_triangular(x, validate_args=False, name="fill_lower_triangular"):
`distribution_util_test.py`, function `_fill_lower_triangular`.
Args:
- x: `Output` representing lower triangular elements.
+ x: `Tensor` representing lower triangular elements.
validate_args: `Boolean`, default `False`. Whether to ensure the shape of
`x` can be mapped to a lower triangular matrix (controls non-static checks
only).
name: `String`. The name to give this op.
Returns:
- tril: `Output` with lower triangular elements filled from `x`.
+ tril: `Tensor` with lower triangular elements filled from `x`.
Raises:
ValueError: if shape if `x` has static shape which cannot be mapped to a
diff --git a/tensorflow/contrib/distributions/python/ops/mvn.py b/tensorflow/contrib/distributions/python/ops/mvn.py
index 3508fd02bb..e9d5647385 100644
--- a/tensorflow/contrib/distributions/python/ops/mvn.py
+++ b/tensorflow/contrib/distributions/python/ops/mvn.py
@@ -47,7 +47,7 @@ __all__ = [
]
_mvn_prob_note = """
-`x` is a batch vector with compatible shape if `x` is an `Output` whose
+`x` is a batch vector with compatible shape if `x` is a `Tensor` whose
shape can be broadcast up to either:
```
@@ -372,7 +372,7 @@ class MultivariateNormalDiag(_MultivariateNormalOperatorPD):
Args:
mu: Rank `N + 1` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
- diag_stdev: Rank `N + 1` `Output` with same `dtype` and shape as `mu`,
+ diag_stdev: Rank `N + 1` `Tensor` with same `dtype` and shape as `mu`,
representing the standard deviations. Must be positive.
validate_args: `Boolean`, default `False`. Whether to validate
input with asserts. If `validate_args` is `False`,
@@ -612,7 +612,7 @@ class MultivariateNormalCholesky(_MultivariateNormalOperatorPD):
Args:
mu: `(N+1)-D` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
- chol: `(N+2)-D` `Output` with same `dtype` as `mu` and shape
+ chol: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. The upper triangular part is ignored (treated as
though it is zero), and the diagonal must be positive.
validate_args: `Boolean`, default `False`. Whether to validate input
@@ -696,7 +696,7 @@ class MultivariateNormalFull(_MultivariateNormalOperatorPD):
Args:
mu: `(N+1)-D` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
- sigma: `(N+2)-D` `Output` with same `dtype` as `mu` and shape
+ sigma: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. Each batch member must be positive definite.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
diff --git a/tensorflow/contrib/distributions/python/ops/operator_pd.py b/tensorflow/contrib/distributions/python/ops/operator_pd.py
index c679104c08..283adf8b79 100644
--- a/tensorflow/contrib/distributions/python/ops/operator_pd.py
+++ b/tensorflow/contrib/distributions/python/ops/operator_pd.py
@@ -122,11 +122,11 @@ class OperatorPDBase(object):
"""Add matrix represented by this operator to `mat`. Equiv to `A + mat`.
Args:
- mat: `Output` with same `dtype` and shape broadcastable to `self`.
+ mat: `Tensor` with same `dtype` and shape broadcastable to `self`.
name: A name to give this `Op`.
Returns:
- An `Output` with broadcast shape and same `dtype` as `self`.
+ A `Tensor` with broadcast shape and same `dtype` as `self`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self.inputs + [mat]):
@@ -165,11 +165,11 @@ class OperatorPDBase(object):
```
Args:
- x: `Output` with compatible batch vector shape and same `dtype` as self.
+ x: `Tensor` with compatible batch vector shape and same `dtype` as self.
name: A name scope to use for ops added by this method.
Returns:
- `Output` with shape `[M1,...,Mm] + [N1,...,Nn]` and same `dtype`
+ `Tensor` with shape `[M1,...,Mm] + [N1,...,Nn]` and same `dtype`
as `self`.
"""
with ops.name_scope(self.name):
@@ -340,7 +340,7 @@ class OperatorPDBase(object):
name: A name scope to use for ops added by this method.
Returns:
- `int32` `Output`
+ `int32` `Tensor`
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self.inputs):
@@ -361,7 +361,7 @@ class OperatorPDBase(object):
name: A name scope to use for ops added by this method.
Returns:
- `int32` `Output`
+ `int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with ops.name_scope(self.name):
@@ -378,7 +378,7 @@ class OperatorPDBase(object):
name: A name scope to use for ops added by this method.
Returns:
- `int32` `Output`
+ `int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with ops.name_scope(self.name):
@@ -395,7 +395,7 @@ class OperatorPDBase(object):
name: A name scope to use for ops added by this method.
Returns:
- `int32` `Output`
+ `int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with ops.name_scope(self.name):
@@ -413,7 +413,7 @@ class OperatorPDBase(object):
name: A name scope to use for ops added by this method.
Returns:
- `int32` `Output`
+ `int32` `Tensor`
"""
# Derived classes get this "for free" once .shape() is implemented.
with ops.name_scope(self.name):
@@ -431,7 +431,7 @@ class OperatorPDBase(object):
```
Args:
- x: `Output` with shape `self.batch_shape + [k, r]` and same `dtype` as
+ x: `Tensor` with shape `self.batch_shape + [k, r]` and same `dtype` as
this `Operator`.
transpose_x: If `True`, `x` is transposed before multiplication.
name: A name to give this `Op`.
@@ -465,7 +465,7 @@ class OperatorPDBase(object):
```
Args:
- x: `Output` with shape `self.batch_shape + [k, r]` and same `dtype` as
+ x: `Tensor` with shape `self.batch_shape + [k, r]` and same `dtype` as
this `Operator`.
transpose_x: If `True`, `x` is transposed before multiplication.
name: A name scope to use for ops added by this method.
@@ -519,12 +519,12 @@ class OperatorPDBase(object):
```
Args:
- rhs: `Output` with same `dtype` as this operator and compatible shape,
+ rhs: `Tensor` with same `dtype` as this operator and compatible shape,
`rhs.shape = self.shape[:-1] + [r]` for `r >= 1`.
name: A name scope to use for ops added by this method.
Returns:
- `Output` with same `dtype` and shape as `x`.
+ `Tensor` with same `dtype` and shape as `x`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[rhs] + self.inputs):
@@ -571,12 +571,12 @@ class OperatorPDBase(object):
```
Args:
- rhs: `Output` with same `dtype` as this operator and compatible shape,
+ rhs: `Tensor` with same `dtype` as this operator and compatible shape,
`rhs.shape = self.shape[:-1] + [r]` for `r >= 1`.
name: A name scope to use for ops added by this method.
Returns:
- `Output` with same `dtype` and shape as `x`.
+ `Tensor` with same `dtype` and shape as `x`.
"""
with ops.name_scope(self.name):
with ops.name_scope(name, values=[rhs] + self.inputs):
@@ -642,12 +642,12 @@ def flip_matrix_to_vector(mat, batch_shape, static_batch_shape):
See also: flip_vector_to_matrix.
Args:
- mat: `Output` with rank `>= 2`.
- batch_shape: `int32` `Output` giving leading "batch" shape of result.
+ mat: `Tensor` with rank `>= 2`.
+ batch_shape: `int32` `Tensor` giving leading "batch" shape of result.
static_batch_shape: `TensorShape` object giving batch shape of result.
Returns:
- `Output` with same elements as `mat` but with shape `batch_shape + [k]`.
+ `Tensor` with same elements as `mat` but with shape `batch_shape + [k]`.
"""
mat = ops.convert_to_tensor(mat, name="mat")
if (static_batch_shape.is_fully_defined()
@@ -717,12 +717,12 @@ def flip_vector_to_matrix(vec, batch_shape, static_batch_shape):
See also: flip_matrix_to_vector.
Args:
- vec: `Output` with shape `[M1,...,Mm] + [N1,...,Nn] + [k]`
- batch_shape: `int32` `Output`.
+ vec: `Tensor` with shape `[M1,...,Mm] + [N1,...,Nn] + [k]`
+ batch_shape: `int32` `Tensor`.
static_batch_shape: `TensorShape` with statically determined batch shape.
Returns:
- `Output` with same `dtype` as `vec` and new shape.
+ `Tensor` with same `dtype` as `vec` and new shape.
"""
vec = ops.convert_to_tensor(vec, name="vec")
if (
@@ -805,17 +805,17 @@ def extract_batch_shape(x, num_event_dims, name="extract_batch_shape"):
"""Extract the batch shape from `x`.
Assuming `x.shape = batch_shape + event_shape`, when `event_shape` has
- `num_event_dims` dimensions. This `Op` returns the batch shape `Output`.
+ `num_event_dims` dimensions. This `Op` returns the batch shape `Tensor`.
Args:
- x: `Output` with rank at least `num_event_dims`. If rank is not high enough
+ x: `Tensor` with rank at least `num_event_dims`. If rank is not high enough
this `Op` will fail.
- num_event_dims: `int32` scalar `Output`. The number of trailing dimensions
+ num_event_dims: `int32` scalar `Tensor`. The number of trailing dimensions
in `x` to be considered as part of `event_shape`.
name: A name to prepend to created `Ops`.
Returns:
- batch_shape: `1-D` `int32` `Output`
+ batch_shape: `1-D` `int32` `Tensor`
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
diff --git a/tensorflow/contrib/distributions/python/ops/operator_pd_identity.py b/tensorflow/contrib/distributions/python/ops/operator_pd_identity.py
index ee224d7125..a95c34de0d 100644
--- a/tensorflow/contrib/distributions/python/ops/operator_pd_identity.py
+++ b/tensorflow/contrib/distributions/python/ops/operator_pd_identity.py
@@ -48,7 +48,7 @@ class OperatorPDIdentity(operator_pd.OperatorPDBase):
"""Initialize an `OperatorPDIdentity`.
Args:
- shape: `int32` rank 1 `Output` of length at least 2, and with the last
+ shape: `int32` rank 1 `Tensor` of length at least 2, and with the last
two entries equal (since this is a square matrix).
dtype: Data type of the matrix that this operator represents.
verify_pd: `Boolean`, if `True`, asserts are added to the initialization
diff --git a/tensorflow/contrib/distributions/python/ops/operator_pd_vdvt_update.py b/tensorflow/contrib/distributions/python/ops/operator_pd_vdvt_update.py
index eff3133c44..819a6da47c 100644
--- a/tensorflow/contrib/distributions/python/ops/operator_pd_vdvt_update.py
+++ b/tensorflow/contrib/distributions/python/ops/operator_pd_vdvt_update.py
@@ -88,9 +88,9 @@ class OperatorPDSqrtVDVTUpdate(operator_pd.OperatorPDBase):
Args:
operator: Subclass of `OperatorPDBase`. Represents the (batch) positive
definite matrix `M` in `R^{k x k}`.
- v: `Output` defining batch matrix of same `dtype` and `batch_shape` as
+ v: `Tensor` defining batch matrix of same `dtype` and `batch_shape` as
`operator`, and last two dimensions of shape `(k, r)`.
- diag: Optional `Output` defining batch vector of same `dtype` and
+ diag: Optional `Tensor` defining batch vector of same `dtype` and
`batch_shape` as `operator`, and last dimension of size `r`. If `None`,
the update becomes `VV^T` rather than `VDV^T`.
verify_pd: `Boolean`. If `True`, add asserts that `diag > 0`, which,
diff --git a/tensorflow/contrib/distributions/python/ops/quantized_distribution.py b/tensorflow/contrib/distributions/python/ops/quantized_distribution.py
index ec034b0718..bca3f99604 100644
--- a/tensorflow/contrib/distributions/python/ops/quantized_distribution.py
+++ b/tensorflow/contrib/distributions/python/ops/quantized_distribution.py
@@ -38,11 +38,11 @@ def _logsum_expbig_minus_expsmall(big, small):
To work correctly, we should have the pointwise relation: `small <= big`.
Args:
- big: Numeric `Output`
- small: Numeric `Output` with same `dtype` as `big` and broadcastable shape.
+ big: Numeric `Tensor`
+ small: Numeric `Tensor` with same `dtype` as `big` and broadcastable shape.
Returns:
- `Output` of same `dtype` of `big` and broadcast shape.
+ `Tensor` of same `dtype` of `big` and broadcast shape.
"""
with ops.name_scope("logsum_expbig_minus_expsmall", values=[small, big]):
return math_ops.log(1. - math_ops.exp(small - big)) + big
@@ -193,11 +193,11 @@ class QuantizedDistribution(distributions.Distribution):
Args:
distribution: The base distribution class to transform. Typically an
instance of `Distribution`.
- lower_cutoff: `Output` with same `dtype` as this distribution and shape
+ lower_cutoff: `Tensor` with same `dtype` as this distribution and shape
able to be added to samples. Should be a whole number. Default `None`.
If provided, base distribution's pdf/pmf should be defined at
`lower_cutoff`.
- upper_cutoff: `Output` with same `dtype` as this distribution and shape
+ upper_cutoff: `Tensor` with same `dtype` as this distribution and shape
able to be added to samples. Should be a whole number. Default `None`.
If provided, base distribution's pdf/pmf should be defined at
`upper_cutoff - 1`.
diff --git a/tensorflow/contrib/distributions/python/ops/shape.py b/tensorflow/contrib/distributions/python/ops/shape.py
index d8f419837e..f5b2d94a8a 100644
--- a/tensorflow/contrib/distributions/python/ops/shape.py
+++ b/tensorflow/contrib/distributions/python/ops/shape.py
@@ -33,12 +33,12 @@ class _DistributionShape(object):
"""Manage and manipulate `Distribution` shape.
Terminology:
- Recall that an `Output` has:
- - `shape`: size of `Output` dimensions,
- - `ndims`: size of `shape`; number of `Output` dimensions,
+ Recall that a `Tensor` has:
+ - `shape`: size of `Tensor` dimensions,
+ - `ndims`: size of `shape`; number of `Tensor` dimensions,
- `dims`: indexes into `shape`; useful for transpose, reduce.
- `Output`s sampled from a `Distribution` can be partitioned by `sample_dims`,
+ `Tensor`s sampled from a `Distribution` can be partitioned by `sample_dims`,
`batch_dims`, and `event_dims`. To understand the semantics of these
dimensions, consider when two of the three are fixed and the remaining
is varied:
@@ -49,12 +49,12 @@ class _DistributionShape(object):
- `event_dims`: indexes event coordinates from one sample.
The `sample`, `batch`, and `event` dimensions constitute the entirety of a
- `Distribution` `Output`'s shape.
+ `Distribution` `Tensor`'s shape.
The dimensions are always in `sample`, `batch`, `event` order.
Purpose:
- This class partitions `Output` notions of `shape`, `ndims`, and `dims` into
+ This class partitions `Tensor` notions of `shape`, `ndims`, and `dims` into
`Distribution` notions of `sample,` `batch,` and `event` dimensions. That
is, it computes any of:
@@ -64,10 +64,10 @@ class _DistributionShape(object):
sample_ndims batch_ndims event_ndims
```
- for a given `Output`, e.g., the result of
+ for a given `Tensor`, e.g., the result of
`Distribution.sample(sample_shape=...)`.
- For a given `Output`, this class computes the above table using minimal
+ For a given `Tensor`, this class computes the above table using minimal
information: `batch_ndims` and `event_ndims`.
Examples of `Distribution` `shape` semantics:
@@ -96,7 +96,7 @@ class _DistributionShape(object):
reduction_indices=batch_dims)
```
- The `Laplace` distribution generates an `Output` of shape `[1000]`. When
+ The `Laplace` distribution generates a `Tensor` of shape `[1000]`. When
fed to a `Normal`, this is interpreted as 1000 different locations, i.e.,
1000 non-identical Normals. Therefore a single call to `pdf(x)` yields
1000 probabilities, one for every location. The average over this batch
@@ -153,8 +153,8 @@ class _DistributionShape(object):
performance degradation because data must be switched from GPU to CPU.
For example, when `validate_args=False` and `event_ndims` is a
- non-constant `Output`, it is checked to be a non-negative integer at graph
- execution. (Same for `batch_ndims`). Constant `Output`s and non-`Output`
+ non-constant `Tensor`, it is checked to be a non-negative integer at graph
+ execution. (Same for `batch_ndims`). Constant `Tensor`s and non-`Tensor`
arguments are always checked for correctness since this can be done for
"free," i.e., during graph construction.
"""
@@ -170,19 +170,19 @@ class _DistributionShape(object):
`Distribution`. They may only be known at graph execution.
If both `batch_ndims` and `event_ndims` are python scalars (rather than
- either being an `Output`), functions in this class automatically perform
+ either being a `Tensor`), functions in this class automatically perform
sanity checks during graph construction.
Args:
- batch_ndims: `Output`. Number of `dims` (`rank`) of the batch portion of
- indexes of an `Output`. A "batch" is a non-identical distribution, i.e,
+ batch_ndims: `Tensor`. Number of `dims` (`rank`) of the batch portion of
+ indexes of a `Tensor`. A "batch" is a non-identical distribution, i.e,
Normal with different parameters.
- event_ndims: `Output`. Number of `dims` (`rank`) of the event portion of
- indexes of an `Output`. An "event" is what is sampled from a
+ event_ndims: `Tensor`. Number of `dims` (`rank`) of the event portion of
+ indexes of a `Tensor`. An "event" is what is sampled from a
distribution, i.e., a trivariate Normal has an event shape of [3] and a
4 dimensional Wishart has an event shape of [4, 4].
validate_args: `Boolean`, default `False`. When `True`, non-`tf.constant`
- `Output` arguments are checked for correctness. (`tf.constant`
+ `Tensor` arguments are checked for correctness. (`tf.constant`
arguments are always checked.)
name: `String`. The name prepended to Ops created by this class.
@@ -226,18 +226,18 @@ class _DistributionShape(object):
@property
def validate_args(self):
- """Returns True if graph-runtime `Output` checks are enabled."""
+ """Returns True if graph-runtime `Tensor` checks are enabled."""
return self._validate_args
def get_ndims(self, x, name="get_ndims"):
- """Get `Output` number of dimensions (rank).
+ """Get `Tensor` number of dimensions (rank).
Args:
- x: `Output`.
+ x: `Tensor`.
name: `String`. The name to give this op.
Returns:
- ndims: Scalar number of dimensions associated with an `Output`.
+ ndims: Scalar number of dimensions associated with a `Tensor`.
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
@@ -250,11 +250,11 @@ class _DistributionShape(object):
"""Returns number of dimensions corresponding to iid draws ("sample").
Args:
- x: `Output`.
+ x: `Tensor`.
name: `String`. The name to give this op.
Returns:
- sample_ndims: `Output` (0D, `int32`).
+ sample_ndims: `Tensor` (0D, `int32`).
Raises:
ValueError: if `sample_ndims` is calculated to be negative.
@@ -295,13 +295,13 @@ class _DistributionShape(object):
```
Args:
- x: `Output`.
+ x: `Tensor`.
name: `String`. The name to give this op.
Returns:
- sample_dims: `Output` (1D, `int32`).
- batch_dims: `Output` (1D, `int32`).
- event_dims: `Output` (1D, `int32`).
+ sample_dims: `Tensor` (1D, `int32`).
+ batch_dims: `Tensor` (1D, `int32`).
+ event_dims: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
def make_dims(start_sum, size, name):
@@ -323,16 +323,16 @@ class _DistributionShape(object):
self.event_ndims, name="event_dims"))
def get_shape(self, x, name="get_shape"):
- """Returns `Output`'s shape partitioned into `sample`, `batch`, `event`.
+ """Returns `Tensor`'s shape partitioned into `sample`, `batch`, `event`.
Args:
- x: `Output`.
+ x: `Tensor`.
name: `String`. The name to give this op.
Returns:
- sample_shape: `Output` (1D, `int32`).
- batch_shape: `Output` (1D, `int32`).
- event_shape: `Output` (1D, `int32`).
+ sample_shape: `Tensor` (1D, `int32`).
+ batch_shape: `Tensor` (1D, `int32`).
+ event_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
@@ -359,7 +359,7 @@ class _DistributionShape(object):
def make_batch_of_event_sample_matrices(
self, x, name="make_batch_of_event_sample_matrices"):
- """Reshapes/transposes `Distribution` `Output` from S+B+E to B_+E_+S_.
+ """Reshapes/transposes `Distribution` `Tensor` from S+B+E to B_+E_+S_.
Where:
- `B_ = B if B else [1]`,
@@ -367,12 +367,12 @@ class _DistributionShape(object):
- `S_ = [tf.reduce_prod(S)]`.
Args:
- x: `Output`.
+ x: `Tensor`.
name: `String`. The name to give this op.
Returns:
- x: `Output`. Input transposed/reshaped to `B_+E_+S_`.
- sample_shape: `Output` (1D, `int32`).
+ x: `Tensor`. Input transposed/reshaped to `B_+E_+S_`.
+ sample_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
@@ -388,7 +388,7 @@ class _DistributionShape(object):
def undo_make_batch_of_event_sample_matrices(
self, x, sample_shape, name="undo_make_batch_of_event_sample_matrices"):
- """Reshapes/transposes `Distribution` `Output` from B_+E_+S_ to S+B+E.
+ """Reshapes/transposes `Distribution` `Tensor` from B_+E_+S_ to S+B+E.
Where:
- `B_ = B if B else [1]`,
@@ -398,12 +398,12 @@ class _DistributionShape(object):
This function "reverses" `make_batch_of_event_sample_matrices`.
Args:
- x: `Output` of shape `B_+E_+S_`.
- sample_shape: `Output` (1D, `int32`).
+ x: `Tensor` of shape `B_+E_+S_`.
+ sample_shape: `Tensor` (1D, `int32`).
name: `String`. The name to give this op.
Returns:
- x: `Output`. Input transposed/reshaped to `S+B+E`.
+ x: `Tensor`. Input transposed/reshaped to `S+B+E`.
"""
with self._name_scope(name, values=[x, sample_shape]):
x = ops.convert_to_tensor(x, name="x")
diff --git a/tensorflow/contrib/distributions/python/ops/wishart.py b/tensorflow/contrib/distributions/python/ops/wishart.py
index 288d958ca2..ae106628cc 100644
--- a/tensorflow/contrib/distributions/python/ops/wishart.py
+++ b/tensorflow/contrib/distributions/python/ops/wishart.py
@@ -170,7 +170,7 @@ class _WishartOperatorPD(distribution.Distribution):
@property
def cholesky_input_output_matrices(self):
- """Boolean indicating if `Output` input/outputs are Cholesky factorized."""
+ """Boolean indicating if `Tensor` input/outputs are Cholesky factorized."""
return self._cholesky_input_output_matrices
@property
@@ -488,9 +488,9 @@ class WishartCholesky(_WishartOperatorPD):
"""Construct Wishart distributions.
Args:
- df: `float` or `double` `Output`. Degrees of freedom, must be greater than
+ df: `float` or `double` `Tensor`. Degrees of freedom, must be greater than
or equal to dimension of the scale matrix.
- scale: `float` or `double` `Output`. The Cholesky factorization of
+ scale: `float` or `double` `Tensor`. The Cholesky factorization of
the symmetric positive definite scale matrix of the distribution.
cholesky_input_output_matrices: `Boolean`. Any function which whose input
or output is a matrix assumes the input is Cholesky and returns a
@@ -589,9 +589,9 @@ class WishartFull(_WishartOperatorPD):
"""Construct Wishart distributions.
Args:
- df: `float` or `double` `Output`. Degrees of freedom, must be greater than
+ df: `float` or `double` `Tensor`. Degrees of freedom, must be greater than
or equal to dimension of the scale matrix.
- scale: `float` or `double` `Output`. The symmetric positive definite
+ scale: `float` or `double` `Tensor`. The symmetric positive definite
scale matrix of the distribution.
cholesky_input_output_matrices: `Boolean`. Any function which whose input
or output is a matrix assumes the input is Cholesky and returns a
diff --git a/tensorflow/contrib/framework/python/framework/checkpoint_utils.py b/tensorflow/contrib/framework/python/framework/checkpoint_utils.py
index 9c14d24857..c8d644382b 100644
--- a/tensorflow/contrib/framework/python/framework/checkpoint_utils.py
+++ b/tensorflow/contrib/framework/python/framework/checkpoint_utils.py
@@ -71,7 +71,7 @@ def load_variable(checkpoint_dir, name):
name: Name of the tensor to return.
Returns:
- `Output` object.
+ `Tensor` object.
"""
# TODO(b/29227106): Fix this in the right place and remove this.
if name.endswith(":0"):
@@ -111,7 +111,7 @@ def _set_checkpoint_initializer(variable, file_pattern, tensor_name, slice_spec,
Args:
variable: `Variable` object.
file_pattern: string, where to load checkpoints from.
- tensor_name: Name of the `Output` to load from checkpoint reader.
+ tensor_name: Name of the `Tensor` to load from checkpoint reader.
slice_spec: Slice specification for loading partitioned variables.
name: Name of the operation.
"""
diff --git a/tensorflow/contrib/framework/python/framework/tensor_util.py b/tensorflow/contrib/framework/python/framework/tensor_util.py
index f5abf106fd..7176196627 100644
--- a/tensorflow/contrib/framework/python/framework/tensor_util.py
+++ b/tensorflow/contrib/framework/python/framework/tensor_util.py
@@ -43,7 +43,7 @@ def _assert_same_base_type(items, expected_type=None):
r"""Asserts all items are of the same base type.
Args:
- items: List of graph items (e.g., `Variable`, `Output`, `SparseTensor`,
+ items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
@@ -155,8 +155,8 @@ def remove_squeezable_dimensions(predictions, labels):
operations, which could result in a performance hit.
Args:
- predictions: Predicted values, an `Output` of arbitrary dimensions.
- labels: Label values, an `Output` whose dimensions match `predictions`.
+ predictions: Predicted values, a `Tensor` of arbitrary dimensions.
+ labels: Label values, a `Tensor` whose dimensions match `predictions`.
Returns:
Tuple of `predictions` and `labels`, possibly with last dim squeezed.
@@ -358,19 +358,19 @@ def with_shape(expected_shape, tensor):
def convert_to_tensor_or_sparse_tensor(
value, dtype=None, name=None, as_ref=False):
- """Converts value to a `SparseTensor` or `Output`.
+ """Converts value to a `SparseTensor` or `Tensor`.
Args:
value: A `SparseTensor`, `SparseTensorValue`, or an object whose type has a
- registered `Output` conversion function.
+ registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
- name: Optional name to use if a new `Output` is created.
+ name: Optional name to use if a new `Tensor` is created.
as_ref: True if we want the result as a ref tensor. Only used if a new
- `Output` is created.
+ `Tensor` is created.
Returns:
- A `SparseTensor` or `Output` based on `value`.
+ A `SparseTensor` or `Tensor` based on `value`.
Raises:
RuntimeError: If result type is incompatible with `dtype`.
diff --git a/tensorflow/contrib/framework/python/ops/ops.py b/tensorflow/contrib/framework/python/ops/ops.py
index 5659791c78..f403942fe7 100644
--- a/tensorflow/contrib/framework/python/ops/ops.py
+++ b/tensorflow/contrib/framework/python/ops/ops.py
@@ -36,7 +36,7 @@ def get_graph_from_inputs(op_input_list, graph=None):
`op_input_list`, we attempt to use the default graph.
Args:
- op_input_list: A list of inputs to an operation, which may include `Output`,
+ op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
diff --git a/tensorflow/contrib/graph_editor/reroute.py b/tensorflow/contrib/graph_editor/reroute.py
index dd3e7ea7f4..5728435113 100644
--- a/tensorflow/contrib/graph_editor/reroute.py
+++ b/tensorflow/contrib/graph_editor/reroute.py
@@ -44,8 +44,8 @@ def _check_ts_compatibility(ts0, ts1):
"""Make sure the shape and dtype of the two tensor's lists are compatible.
Args:
- ts0: an object convertible to a list of `tf.Output`.
- ts1: an object convertible to a list of `tf.Output`.
+ ts0: an object convertible to a list of `tf.Tensor`.
+ ts1: an object convertible to a list of `tf.Tensor`.
Raises:
ValueError: if any pair of tensors (same index in ts0 and ts1) have
a dtype or a shape which is not compatible.
@@ -163,8 +163,8 @@ def _reroute_ts(ts0, ts1, mode, can_modify=None, cannot_modify=None):
Warning: this function is directly manipulating the internals of the tf.Graph.
Args:
- ts0: an object convertible to a list of `tf.Output`.
- ts1: an object convertible to a list of `tf.Output`.
+ ts0: an object convertible to a list of `tf.Tensor`.
+ ts1: an object convertible to a list of `tf.Tensor`.
mode: what to do with those tensors: "a->b" or "b<->a" for swaping and
"a->b" or "b->a" for one direction re-routing.
can_modify: iterable of operations which can be modified. Any operation
@@ -175,7 +175,7 @@ def _reroute_ts(ts0, ts1, mode, can_modify=None, cannot_modify=None):
Returns:
The number of individual modifications made by the function.
Raises:
- TypeError: if `ts0` or `ts1` cannot be converted to a list of `tf.Output`.
+ TypeError: if `ts0` or `ts1` cannot be converted to a list of `tf.Tensor`.
TypeError: if `can_modify` or `cannot_modify` is not `None` and cannot be
converted to a list of `tf.Operation`.
"""
@@ -215,8 +215,8 @@ def swap_ts(ts0, ts1, can_modify=None, cannot_modify=None):
A0 A1 A0 A1
Args:
- ts0: an object convertible to a list of `tf.Output`.
- ts1: an object convertible to a list of `tf.Output`.
+ ts0: an object convertible to a list of `tf.Tensor`.
+ ts1: an object convertible to a list of `tf.Tensor`.
can_modify: iterable of operations which can be modified. Any operation
outside within_ops will be left untouched by this function.
cannot_modify: iterable of operations which cannot be modified.
@@ -242,8 +242,8 @@ def reroute_a2b_ts(ts0, ts1, can_modify=None, cannot_modify=None):
The end of the tensors in ts1 are left dangling.
Args:
- ts0: an object convertible to a list of `tf.Output`.
- ts1: an object convertible to a list of `tf.Output`.
+ ts0: an object convertible to a list of `tf.Tensor`.
+ ts1: an object convertible to a list of `tf.Tensor`.
can_modify: iterable of operations which can be modified. Any operation
outside within_ops will be left untouched by this function.
cannot_modify: iterable of operations which cannot be modified. Any
@@ -268,8 +268,8 @@ def reroute_b2a_ts(ts0, ts1, can_modify=None, cannot_modify=None):
The end of the tensors in ts0 are left dangling.
Args:
- ts0: an object convertible to a list of `tf.Output`.
- ts1: an object convertible to a list of `tf.Output`.
+ ts0: an object convertible to a list of `tf.Tensor`.
+ ts1: an object convertible to a list of `tf.Tensor`.
can_modify: iterable of operations which can be modified. Any operation
outside within_ops will be left untouched by this function.
cannot_modify: iterable of operations which cannot be modified.
diff --git a/tensorflow/contrib/graph_editor/select.py b/tensorflow/contrib/graph_editor/select.py
index 8253bc1943..0e2914cb0d 100644
--- a/tensorflow/contrib/graph_editor/select.py
+++ b/tensorflow/contrib/graph_editor/select.py
@@ -119,7 +119,7 @@ def filter_ts(ops, positive_filter):
positive_filter: a function deciding whether to keep a tensor or not.
If `True`, all the tensors are returned.
Returns:
- A list of `tf.Output`.
+ A list of `tf.Tensor`.
Raises:
TypeError: if ops cannot be converted to a list of `tf.Operation`.
"""
@@ -616,7 +616,7 @@ def select_ops(*args, **kwargs):
Args:
*args: list of 1) regular expressions (compiled or not) or 2) (array of)
- `tf.Operation`. `tf.Output` instances are silently ignored.
+ `tf.Operation`. `tf.Tensor` instances are silently ignored.
**kwargs: 'graph': `tf.Graph` in which to perform the regex query.This is
required when using regex.
'positive_filter': an elem if selected only if `positive_filter(elem)` is
@@ -628,7 +628,7 @@ def select_ops(*args, **kwargs):
Raises:
TypeError: if the optional keyword argument graph is not a `tf.Graph`
or if an argument in args is not an (array of) `tf.Operation`
- or an (array of) `tf.Output` (silently ignored) or a string
+ or an (array of) `tf.Tensor` (silently ignored) or a string
or a regular expression.
ValueError: if one of the keyword arguments is unexpected or if a regular
expression is used without passing a graph as a keyword argument.
@@ -682,7 +682,7 @@ def select_ts(*args, **kwargs):
Args:
*args: list of 1) regular expressions (compiled or not) or 2) (array of)
- `tf.Output`. `tf.Operation` instances are silently ignored.
+ `tf.Tensor`. `tf.Operation` instances are silently ignored.
**kwargs: 'graph': `tf.Graph` in which to perform the regex query.This is
required when using regex.
'positive_filter': an elem if selected only if `positive_filter(elem)` is
@@ -690,10 +690,10 @@ def select_ts(*args, **kwargs):
'restrict_ts_regex': a regular expression is ignored if it doesn't start
with the substring "(?#ts)".
Returns:
- A list of `tf.Output`.
+ A list of `tf.Tensor`.
Raises:
TypeError: if the optional keyword argument graph is not a `tf.Graph`
- or if an argument in args is not an (array of) `tf.Output`
+ or if an argument in args is not an (array of) `tf.Tensor`
or an (array of) `tf.Operation` (silently ignored) or a string
or a regular expression.
ValueError: if one of the keyword arguments is unexpected or if a regular
@@ -758,10 +758,10 @@ def select_ops_and_ts(*args, **kwargs):
Returns:
A tuple `(ops, ts)` where:
`ops` is a list of `tf.Operation`, and
- `ts` is a list of `tf.Output`
+ `ts` is a list of `tf.Tensor`
Raises:
TypeError: if the optional keyword argument graph is not a `tf.Graph`
- or if an argument in args is not an (array of) `tf.Output`
+ or if an argument in args is not an (array of) `tf.Tensor`
or an (array of) `tf.Operation` or a string or a regular expression.
ValueError: if one of the keyword arguments is unexpected or if a regular
expression is used without passing a graph as a keyword argument.
diff --git a/tensorflow/contrib/graph_editor/subgraph.py b/tensorflow/contrib/graph_editor/subgraph.py
index e678823245..00a755c79f 100644
--- a/tensorflow/contrib/graph_editor/subgraph.py
+++ b/tensorflow/contrib/graph_editor/subgraph.py
@@ -165,14 +165,14 @@ class SubGraphView(object):
Args:
inside_ops: an object convertible to a list of `tf.Operation`. This list
defines all the operations in the subgraph.
- passthrough_ts: an object convertible to a list of `tf.Output`. This list
+ passthrough_ts: an object convertible to a list of `tf.Tensor`. This list
define all the "passthrough" tensors. A passthrough tensor is a tensor
which goes directly from the input of the subgraph to it output, without
any intermediate operations. All the non passthrough tensors are
silently ignored.
Raises:
TypeError: if inside_ops cannot be converted to a list of `tf.Operation`
- or if `passthrough_ts` cannot be converted to a list of `tf.Output`.
+ or if `passthrough_ts` cannot be converted to a list of `tf.Tensor`.
"""
inside_ops = util.make_list_of_op(inside_ops)
@@ -598,7 +598,7 @@ def make_view(*args, **kwargs):
Args:
*args: list of 1) regular expressions (compiled or not) or 2) (array of)
- `tf.Operation` 3) (array of) `tf.Output`. Those objects will be converted
+ `tf.Operation` 3) (array of) `tf.Tensor`. Those objects will be converted
into a list of operations and a list of candidate for passthrough tensors.
**kwargs: keyword graph is used 1) to check that the ops and ts are from
the correct graph 2) for regular expression query
@@ -606,7 +606,7 @@ def make_view(*args, **kwargs):
A subgraph view.
Raises:
TypeError: if the optional keyword argument graph is not a `tf.Graph`
- or if an argument in args is not an (array of) `tf.Output`
+ or if an argument in args is not an (array of) `tf.Tensor`
or an (array of) `tf.Operation` or a string or a regular expression.
ValueError: if one of the keyword arguments is unexpected.
"""
diff --git a/tensorflow/contrib/graph_editor/transform.py b/tensorflow/contrib/graph_editor/transform.py
index b62b15d22c..26047437d7 100644
--- a/tensorflow/contrib/graph_editor/transform.py
+++ b/tensorflow/contrib/graph_editor/transform.py
@@ -89,7 +89,7 @@ def assign_renamed_collections_handler(info, elem, elem_):
Args:
info: Transform._Info instance.
- elem: the original element (`tf.Output` or `tf.Operation`)
+ elem: the original element (`tf.Tensor` or `tf.Operation`)
elem_: the transformed element
"""
# TODO(fkp): handle known special cases
diff --git a/tensorflow/contrib/graph_editor/util.py b/tensorflow/contrib/graph_editor/util.py
index a72d0e34b4..11ee2435c9 100644
--- a/tensorflow/contrib/graph_editor/util.py
+++ b/tensorflow/contrib/graph_editor/util.py
@@ -210,7 +210,7 @@ def make_list_of_op(ops, check_graph=True, allow_graph=True, ignore_ts=False):
operation.
check_graph: if `True` check if all the operations belong to the same graph.
allow_graph: if `False` a `tf.Graph` cannot be converted.
- ignore_ts: if True, silently ignore `tf.Output`.
+ ignore_ts: if True, silently ignore `tf.Tensor`.
Returns:
A newly created list of `tf.Operation`.
Raises:
@@ -241,7 +241,7 @@ def get_tensors(graph):
Args:
graph: a `tf.Graph`.
Returns:
- A list of `tf.Output`.
+ A list of `tf.Tensor`.
Raises:
TypeError: if graph is not a `tf.Graph`.
"""
@@ -254,17 +254,17 @@ def get_tensors(graph):
def make_list_of_t(ts, check_graph=True, allow_graph=True, ignore_ops=False):
- """Convert ts to a list of `tf.Output`.
+ """Convert ts to a list of `tf.Tensor`.
Args:
- ts: can be an iterable of `tf.Output`, a `tf.Graph` or a single tensor.
+ ts: can be an iterable of `tf.Tensor`, a `tf.Graph` or a single tensor.
check_graph: if `True` check if all the tensors belong to the same graph.
allow_graph: if `False` a `tf.Graph` cannot be converted.
ignore_ops: if `True`, silently ignore `tf.Operation`.
Returns:
- A newly created list of `tf.Output`.
+ A newly created list of `tf.Tensor`.
Raises:
- TypeError: if `ts` cannot be converted to a list of `tf.Output` or,
+ TypeError: if `ts` cannot be converted to a list of `tf.Tensor` or,
if `check_graph` is `True`, if all the ops do not belong to the same graph.
"""
if isinstance(ts, tf_ops.Graph):
@@ -287,11 +287,11 @@ def get_generating_ops(ts):
"""Return all the generating ops of the tensors in `ts`.
Args:
- ts: a list of `tf.Output`
+ ts: a list of `tf.Tensor`
Returns:
A list of all the generating `tf.Operation` of the tensors in `ts`.
Raises:
- TypeError: if `ts` cannot be converted to a list of `tf.Output`.
+ TypeError: if `ts` cannot be converted to a list of `tf.Tensor`.
"""
ts = make_list_of_t(ts, allow_graph=False)
return [t.op for t in ts]
@@ -301,11 +301,11 @@ def get_consuming_ops(ts):
"""Return all the consuming ops of the tensors in ts.
Args:
- ts: a list of `tf.Output`
+ ts: a list of `tf.Tensor`
Returns:
A list of all the consuming `tf.Operation` of the tensors in `ts`.
Raises:
- TypeError: if ts cannot be converted to a list of `tf.Output`.
+ TypeError: if ts cannot be converted to a list of `tf.Tensor`.
"""
ts = make_list_of_t(ts, allow_graph=False)
ops = []
@@ -434,14 +434,14 @@ def make_placeholder_from_tensor(t, scope=None):
Note that the correct graph scope must be set by the calling function.
Args:
- t: a `tf.Output` whose name will be used to create the placeholder
+ t: a `tf.Tensor` whose name will be used to create the placeholder
(see function placeholder_name).
scope: absolute scope within which to create the placeholder. None
means that the scope of `t` is preserved. `""` means the root scope.
Returns:
A newly created `tf.placeholder`.
Raises:
- TypeError: if `t` is not `None` or a `tf.Output`.
+ TypeError: if `t` is not `None` or a `tf.Tensor`.
"""
return tf_array_ops.placeholder(
dtype=t.dtype, shape=t.get_shape(), name=placeholder_name(
diff --git a/tensorflow/contrib/labeled_tensor/python/ops/core.py b/tensorflow/contrib/labeled_tensor/python/ops/core.py
index fef8f60dc7..870dbdd383 100644
--- a/tensorflow/contrib/labeled_tensor/python/ops/core.py
+++ b/tensorflow/contrib/labeled_tensor/python/ops/core.py
@@ -519,7 +519,7 @@ LabeledTensorLike = tc.Union(LabeledTensor, ops.Output, np.ndarray, Scalar)
def convert_to_labeled_tensor(value, dtype=None, name=None):
"""Converts the given `value` to a `LabeledTensor`.
- This function accepts `LabeledTensor` objects, 0-dimensional `Output` objects
+ This function accepts `LabeledTensor` objects, 0-dimensional `Tensor` objects
and numpy arrays, and Python scalars. Higher dimensional unlabeled tensors
must use the `LabeledTensor` constructor explicitly.
diff --git a/tensorflow/contrib/layers/python/layers/embedding_ops.py b/tensorflow/contrib/layers/python/layers/embedding_ops.py
index 4dc52ad8cd..8c064c9370 100644
--- a/tensorflow/contrib/layers/python/layers/embedding_ops.py
+++ b/tensorflow/contrib/layers/python/layers/embedding_ops.py
@@ -202,9 +202,9 @@ def hashed_embedding_lookup(params, values, dimension, name=None,
partitioned in 4 tensors with length `[3, 3, 2, 2]`.
Args:
- params: An `Output`, `list` of `Output`s, or `PartitionedVariable`.
+ params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
Each tensor must be of rank 1 with fully-defined shape.
- values: `Output` of values to be embedded.
+ values: `Tensor` of values to be embedded.
dimension: Embedding dimension
name: An optional name for this op.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
@@ -278,7 +278,7 @@ def hashed_embedding_lookup_sparse(params,
See `tf.contrib.layers.hashed_embedding_lookup` for embedding with hashing.
Args:
- params: An `Output`, `list` of `Output`s, or `PartitionedVariable`.
+ params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
Each tensor must be of rank 1 with fully-defined shape.
sparse_values: A 2-D `SparseTensor` containing the values to be embedded.
Some rows may be empty.
@@ -358,12 +358,12 @@ def embedding_lookup_unique(params, ids, name=None):
Args:
params: A list of tensors with the same shape and type, or a
`PartitionedVariable`. Shape `[index, d1, d2, ...]`.
- ids: A one-dimensional `Output` with type `int32` or `int64` containing
+ ids: A one-dimensional `Tensor` with type `int32` or `int64` containing
the ids to be looked up in `params`. Shape `[ids1, ids2, ...]`.
name: A name for this operation (optional).
Returns:
- An `Output` with the same type as the tensors in `params` and dimension of
+ A `Tensor` with the same type as the tensors in `params` and dimension of
`[ids1, ids2, d1, d2, ...]`.
Raises:
diff --git a/tensorflow/contrib/layers/python/layers/encoders.py b/tensorflow/contrib/layers/python/layers/encoders.py
index efb7ab2855..8b6abb4b45 100644
--- a/tensorflow/contrib/layers/python/layers/encoders.py
+++ b/tensorflow/contrib/layers/python/layers/encoders.py
@@ -41,7 +41,7 @@ def bow_encoder(ids,
"""Maps a sequence of symbols to a vector per example by averaging embeddings.
Args:
- ids: `[batch_size, doc_length]` `Output` or `SparseTensor` of type
+ ids: `[batch_size, doc_length]` `Tensor` or `SparseTensor` of type
`int32` or `int64` with symbol ids.
vocab_size: Integer number of symbols in vocabulary.
embed_dim: Integer number of dimensions for embedding matrix.
@@ -59,7 +59,7 @@ def bow_encoder(ids,
reuse: If `True`, variables inside the op will be reused.
Returns:
- Encoding `Output` `[batch_size, embed_dim]` produced by
+ Encoding `Tensor` `[batch_size, embed_dim]` produced by
averaging embeddings.
Raises:
@@ -102,7 +102,7 @@ def embed_sequence(ids,
Typical use case would be reusing embeddings between an encoder and decoder.
Args:
- ids: `[batch_size, doc_length]` `Output` of type `int32` or `int64`
+ ids: `[batch_size, doc_length]` `Tensor` of type `int32` or `int64`
with symbol ids.
vocab_size: Integer number of symbols in vocabulary.
embed_dim: Integer number of dimensions for embedding matrix.
@@ -118,7 +118,7 @@ def embed_sequence(ids,
reuse: If `True`, variables inside the op will be reused.
Returns:
- `Output` of `[batch_size, doc_length, embed_dim]` with embedded sequences.
+ `Tensor` of `[batch_size, doc_length, embed_dim]` with embedded sequences.
Raises:
ValueError: if `embed_dim` or `vocab_size` are not specified when not
diff --git a/tensorflow/contrib/layers/python/layers/feature_column.py b/tensorflow/contrib/layers/python/layers/feature_column.py
index 6ce67a2616..4c375adf2f 100644
--- a/tensorflow/contrib/layers/python/layers/feature_column.py
+++ b/tensorflow/contrib/layers/python/layers/feature_column.py
@@ -786,7 +786,7 @@ class _OneHotColumn(_FeatureColumn,
in `insert_transformed_feature`. Rank should be >= `output_rank`.
unused_weight_collections: Unused. One hot encodings are not variable.
unused_trainable: Unused. One hot encodings are not trainable.
- output_rank: the desired rank of the output `Output`.
+ output_rank: the desired rank of the output `Tensor`.
Returns:
A multihot Tensor to be fed into the first layer of neural network.
@@ -844,7 +844,7 @@ class _EmbeddingColumn(_FeatureColumn, collections.namedtuple(
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
- tensor_name_in_ckpt: (Optional). Name of the `Output` in the provided
+ tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
shared_embedding_name: (Optional). The common name for shared embedding.
@@ -981,7 +981,7 @@ def embedding_column(sparse_id_column,
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
- tensor_name_in_ckpt: (Optional). Name of the `Output` in the provided
+ tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
@@ -1027,7 +1027,7 @@ def shared_embedding_columns(sparse_id_columns,
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
- tensor_name_in_ckpt: (Optional). Name of the `Output` in the provided
+ tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
@@ -1193,7 +1193,7 @@ def hashed_embedding_column(column_name,
def _reshape_real_valued_tensor(input_tensor, output_rank, column_name=None):
- """Reshaping logic for dense, numeric `Output`s.
+ """Reshaping logic for dense, numeric `Tensors`.
Follows the following rules:
1. If `output_rank > input_rank + 1` raise a `ValueError`.
@@ -1201,15 +1201,15 @@ def _reshape_real_valued_tensor(input_tensor, output_rank, column_name=None):
dimension and return
3. If `output_rank == input_rank`, return `input_tensor`.
4. If `output_rank < input_rank`, flatten the inner dimensions of
- `input_tensor` and return an `Output` with `output_rank`
+ `input_tensor` and return a `Tensor` with `output_rank`
Args:
- input_tensor: a dense `Output` to be reshaped.
- output_rank: the desired rank of the reshaped `Output`.
+ input_tensor: a dense `Tensor` to be reshaped.
+ output_rank: the desired rank of the reshaped `Tensor`.
column_name: (optional) the name of the associated column. Used for error
messages.
Returns:
- An `Output` with the same entries as `input_tensor` and rank `output_rank`.
+ A `Tensor` with the same entries as `input_tensor` and rank `output_rank`.
Raises:
ValueError: if `output_rank > input_rank + 1`.
"""
@@ -1607,7 +1607,7 @@ class _CrossedColumn(_FeatureColumn,
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
- tensor_name_in_ckpt: (Optional). Name of the `Output` in the provided
+ tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
@@ -1756,7 +1756,7 @@ def crossed_column(columns, hash_bucket_size, combiner=None,
ckpt_to_load_from: (Optional). String representing checkpoint name/pattern
to restore the column weights. Required if `tensor_name_in_ckpt` is not
None.
- tensor_name_in_ckpt: (Optional). Name of the `Output` in the provided
+ tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided
checkpoint from which to restore the column weights. Required if
`ckpt_to_load_from` is not None.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
diff --git a/tensorflow/contrib/layers/python/layers/feature_column_ops.py b/tensorflow/contrib/layers/python/layers/feature_column_ops.py
index 207e0a773b..c084faee1e 100644
--- a/tensorflow/contrib/layers/python/layers/feature_column_ops.py
+++ b/tensorflow/contrib/layers/python/layers/feature_column_ops.py
@@ -50,7 +50,7 @@ def _embeddings_from_arguments(column,
args: the _DeepEmbeddingLookupArguments for this column.
weight_collections: collections to store weights in.
trainable: whether these embeddings should be trainable.
- output_rank: the desired rank of the returned `Output`. Inner dimensions will
+ output_rank: the desired rank of the returned `Tensor`. Inner dimensions will
be combined to produce the desired rank.
Returns:
@@ -616,7 +616,7 @@ def parse_feature_columns_from_examples(serialized,
the serialized protos in the batch.
Returns:
- A `dict` mapping FeatureColumn to `Output` and `SparseTensor` values.
+ A `dict` mapping FeatureColumn to `Tensor` and `SparseTensor` values.
"""
check_feature_columns(feature_columns)
columns_to_tensors = parsing_ops.parse_example(
@@ -672,7 +672,7 @@ def transform_features(features, feature_columns):
should be instances of classes derived from _FeatureColumn.
Returns:
- A `dict` mapping FeatureColumn to `Output` and `SparseTensor` values.
+ A `dict` mapping FeatureColumn to `Tensor` and `SparseTensor` values.
"""
check_feature_columns(feature_columns)
columns_to_tensor = features.copy()
@@ -710,9 +710,9 @@ def parse_feature_columns_from_sequence_examples(
Returns:
A tuple consisting of:
context_features: a dict mapping `FeatureColumns` from
- `context_feature_columns` to their parsed `Output`s/`SparseTensor`s.
+ `context_feature_columns` to their parsed `Tensors`/`SparseTensor`s.
sequence_features: a dict mapping `FeatureColumns` from
- `sequence_feature_columns` to their parsed `Output`s/`SparseTensor`s.
+ `sequence_feature_columns` to their parsed `Tensors`/`SparseTensor`s.
"""
# Sequence example parsing requires a single (scalar) example.
try:
diff --git a/tensorflow/contrib/layers/python/layers/layers.py b/tensorflow/contrib/layers/python/layers/layers.py
index 8fcac3d9cc..f754452d6e 100644
--- a/tensorflow/contrib/layers/python/layers/layers.py
+++ b/tensorflow/contrib/layers/python/layers/layers.py
@@ -106,7 +106,7 @@ def avg_pool2d(inputs,
scope: Optional scope for name_scope.
Returns:
- An `Output` representing the results of the pooling operation.
+ A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: if `data_format` is neither `NHWC` nor `NCHW`.
@@ -202,7 +202,7 @@ def _fused_batch_norm(
scope: Optional scope for `variable_scope`.
Returns:
- An `Output` representing the output of the operation.
+ A `Tensor` representing the output of the operation.
Raises:
ValueError: if `data_format` is neither `NHWC` nor `NCHW`.
@@ -309,7 +309,7 @@ def _fused_batch_norm(
_fused_batch_norm_training,
_fused_batch_norm_inference)
- # If `is_training` doesn't have a constant value, because it is an `Output`,
+ # If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `need_updates` will be true.
is_training_value = utils.constant_value(is_training)
@@ -431,7 +431,7 @@ def batch_norm(
scope: Optional scope for `variable_scope`.
Returns:
- An `Output` representing the output of the operation.
+ A `Tensor` representing the output of the operation.
Raises:
ValueError: if `batch_weights` is not None and `fused` is True.
@@ -553,7 +553,7 @@ def batch_norm(
finally:
variable_scope.get_variable_scope().set_partitioner(partitioner)
- # If `is_training` doesn't have a constant value, because it is an `Output`,
+ # If `is_training` doesn't have a constant value, because it is a `Tensor`,
# a `Variable` or `Placeholder` then is_training_value will be None and
# `needs_moments` will be true.
is_training_value = utils.constant_value(is_training)
@@ -732,7 +732,7 @@ def convolution(inputs,
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
- `inputs` to produce an `Output` of activations. If a `normalizer_fn` is
+ `inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
@@ -922,7 +922,7 @@ def convolution2d_in_plane(
scope: Optional scope for `variable_scope`.
Returns:
- An `Output` representing the output of the operation.
+ A `Tensor` representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'ConvInPlane', [inputs], reuse=reuse) as sc:
@@ -992,7 +992,7 @@ def convolution2d_transpose(
second variable called 'biases' is added to the result of the operation.
Args:
- inputs: A 4-D `Output` of type `float` and shape
+ inputs: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
num_outputs: integer, the number of output filters.
@@ -1131,11 +1131,11 @@ def dropout(inputs,
Args:
inputs: the tensor to pass to the nn.dropout op.
- keep_prob: A scalar `Output` with the same type as x. The probability
+ keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
- noise_shape: A 1-D `Output` of type `int32`, representing the
+ noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
- is_training: A bool `Output` indicating whether or not the model
+ is_training: A bool `Tensor` indicating whether or not the model
is in training mode. If so, dropout is applied and values scaled.
Otherwise, inputs is returned.
outputs_collections: collection to add the outputs.
@@ -1235,16 +1235,16 @@ def _inner_flatten(inputs, new_rank, output_collections=None, scope=None):
rank of `inputs`.
Args:
- inputs: an `Output` or `SparseTensor`.
- new_rank: the desired rank of the returned `Output` or `SparseTensor`.
+ inputs: a `Tensor` or `SparseTensor`.
+ new_rank: the desired rank of the returned `Tensor` or `SparseTensor`.
output_collections: collection to which the outputs will be added.
scope: optional scope for `name_scope`.
Returns:
- An `Output` or `SparseTensor` conataining the same values as `inputs`, but
+ A `Tensor` or `SparseTensor` conataining the same values as `inputs`, but
with innermost dimensions flattened to obtain rank `new_rank`.
Raises:
- TypeError: `inputs` is not an `Output` or `SparseTensor`.
+ TypeError: `inputs` is not a `Tensor` or `SparseTensor`.
"""
with ops.name_scope(scope, 'InnerFlatten', [inputs, new_rank]) as sc:
if isinstance(inputs, sparse_tensor.SparseTensor):
@@ -1274,7 +1274,7 @@ def fully_connected(inputs,
`fully_connected` creates a variable called `weights`, representing a fully
connected weight matrix, which is multiplied by the `inputs` to produce a
- `Output` of hidden units. If a `normalizer_fn` is provided (such as
+ `Tensor` of hidden units. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation_fn` is not `None`,
@@ -1403,7 +1403,7 @@ def layer_norm(inputs,
scope: Optional scope for `variable_scope`.
Returns:
- An `Output` representing the output of the operation.
+ A `Tensor` representing the output of the operation.
Raises:
ValueError: if rank or last dimension of `inputs` is undefined.
@@ -1484,7 +1484,7 @@ def max_pool2d(inputs,
scope: Optional scope for name_scope.
Returns:
- An `Output` representing the results of the pooling operation.
+ A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: if `data_format` is neither `NHWC` nor `NCHW`.
@@ -1551,7 +1551,7 @@ def pool(inputs,
scope: Optional scope for name_scope.
Returns:
- An `Output` representing the results of the pooling operation.
+ A `Tensor` representing the results of the pooling operation.
Raises:
ValueError: if arguments are invalid.
@@ -1637,7 +1637,7 @@ def repeat(inputs, repetitions, layer, *args, **kwargs):
layers are called with `scope='stack'`.
Args:
- inputs: An `Output` suitable for layer.
+ inputs: A `Tensor` suitable for layer.
repetitions: Int, number of repetitions.
layer: A layer with arguments `(inputs, *args, **kwargs)`
*args: Extra args for the layer.
@@ -1727,7 +1727,7 @@ def separable_convolution2d(
scope: Optional scope for variable_scope.
Returns:
- An `Output` representing the output of the operation.
+ A `Tensor` representing the output of the operation.
"""
with variable_scope.variable_scope(
scope, 'SeparableConv2d', [inputs], reuse=reuse) as sc:
@@ -1800,11 +1800,11 @@ def softmax(logits, scope=None):
needs to have a specified number of elements (number of classes).
Args:
- logits: N-dimensional `Output` with logits, where N > 1.
+ logits: N-dimensional `Tensor` with logits, where N > 1.
scope: Optional scope for variable_scope.
Returns:
- an `Output` with same shape and type as logits.
+ a `Tensor` with same shape and type as logits.
"""
# TODO(jrru): Add axis argument which defaults to last dimension.
with variable_scope.variable_scope(scope, 'softmax', [logits]):
@@ -1838,13 +1838,13 @@ def stack(inputs, layer, stack_args, **kwargs):
layers are called with `scope='stack'`.
Args:
- inputs: An `Output` suitable for layer.
+ inputs: A `Tensor` suitable for layer.
layer: A layer with arguments `(inputs, *args, **kwargs)`
stack_args: A list/tuple of parameters for each call of layer.
**kwargs: Extra kwargs for the layer.
Returns:
- an `Output` result of applying the stacked layers.
+ a `Tensor` result of applying the stacked layers.
Raises:
ValueError: if the op is unknown or wrong.
@@ -1878,13 +1878,13 @@ def unit_norm(inputs, dim, epsilon=1e-7, scope=None):
Note that the rank of `input` must be known.
Args:
- inputs: An `Output` of arbitrary size.
+ inputs: A `Tensor` of arbitrary size.
dim: The dimension along which the input is normalized.
epsilon: A small value to add to the inputs to avoid dividing by zero.
scope: Optional scope for variable_scope.
Returns:
- The normalized `Output`.
+ The normalized `Tensor`.
Raises:
ValueError: If dim is smaller than the number of dimensions in 'inputs'.
@@ -1960,7 +1960,7 @@ def legacy_fully_connected(x,
collection.
Args:
- x: The input `Output`.
+ x: The input `Tensor`.
num_output_units: The size of the output.
activation_fn: activation function, default set to None to skip it and
maintain a linear activation.
diff --git a/tensorflow/contrib/layers/python/layers/layers_test.py b/tensorflow/contrib/layers/python/layers/layers_test.py
index ab0f35cbbf..fb96f19744 100644
--- a/tensorflow/contrib/layers/python/layers/layers_test.py
+++ b/tensorflow/contrib/layers/python/layers/layers_test.py
@@ -1322,7 +1322,7 @@ def _sparsify(array, threshold=0.5):
class PartialFlattenTest(tf.test.TestCase):
def testDensePartialFlatten(self):
- """Test `_inner_flatten` on `Output`s."""
+ """Test `_inner_flatten` on `Tensor`s."""
shape = [2, 3, 4, 5, 6]
np.random.seed(5446)
inputs = np.random.randint(0, 100, size=shape)
diff --git a/tensorflow/contrib/layers/python/layers/optimizers.py b/tensorflow/contrib/layers/python/layers/optimizers.py
index 1bd66977e1..2908096c6c 100644
--- a/tensorflow/contrib/layers/python/layers/optimizers.py
+++ b/tensorflow/contrib/layers/python/layers/optimizers.py
@@ -74,7 +74,7 @@ def optimize_loss(loss,
- string, name of the optimizer like 'SGD', 'Adam', see OPTIMIZER_CLS_NAMES
for full list. E.g. `optimize_loss(..., optimizer='Adam')`.
- - function, takes learning rate `Output` as argument and must return
+ - function, takes learning rate `Tensor` as argument and must return
`Optimizer` instance. E.g. `optimize_loss(...,
optimizer=lambda lr: tf.train.MomentumOptimizer(lr, momentum=0.5))`.
Alternatively, if `learning_rate` is `None`, the function takes no
@@ -87,13 +87,13 @@ def optimize_loss(loss,
E.g., `optimizer_loss(..., optimizer=tf.train.AdagradOptimizer(0.5))`.
Args:
- loss: Scalar `Output`.
- global_step: Scalar int `Output`, step counter for each update. If not
+ loss: Scalar `Tensor`.
+ global_step: Scalar int `Tensor`, step counter for each update. If not
supplied, it will be fetched from the default graph (see
`tf.contrib.framework.get_global_step` for details). If it's
not been created, no step will be incremented with each weight
update. `learning_rate_decay_fn` requires `global_step`.
- learning_rate: float or `Output`, magnitude of update per each training
+ learning_rate: float or `Tensor`, magnitude of update per each training
step. Can be `None`.
optimizer: string, class or optimizer instance, used as trainer.
string should be name of optimizer, like 'SGD',
@@ -114,7 +114,7 @@ def optimize_loss(loss,
This callable takes a `list` of `(gradients, variables)` `tuple`s and
returns the same thing with the gradients modified.
learning_rate_decay_fn: function, takes `learning_rate` and `global_step`
- `Output`s, returns `Output`.
+ `Tensor`s, returns `Tensor`.
Can be used to implement any learning rate decay
functions.
For example: `tf.train.exponential_decay`.
diff --git a/tensorflow/contrib/layers/python/layers/regularizers.py b/tensorflow/contrib/layers/python/layers/regularizers.py
index e45b237e53..86d0516774 100644
--- a/tensorflow/contrib/layers/python/layers/regularizers.py
+++ b/tensorflow/contrib/layers/python/layers/regularizers.py
@@ -40,7 +40,7 @@ def l1_regularizer(scale, scope=None):
L1 regularization encourages sparsity.
Args:
- scale: A scalar multiplier `Output`. 0.0 disables the regularizer.
+ scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: An optional scope name.
Returns:
@@ -79,7 +79,7 @@ def l2_regularizer(scale, scope=None):
Small values of L2 can help prevent overfitting the training data.
Args:
- scale: A scalar multiplier `Output`. 0.0 disables the regularizer.
+ scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: An optional scope name.
Returns:
@@ -113,8 +113,8 @@ def l1_l2_regularizer(scale_l1=1.0, scale_l2=1.0, scope=None):
"""Returns a function that can be used to apply L1 L2 regularizations.
Args:
- scale_l1: A scalar multiplier `Output` for L1 regularization.
- scale_l2: A scalar multiplier `Output` for L2 regularization.
+ scale_l1: A scalar multiplier `Tensor` for L1 regularization.
+ scale_l2: A scalar multiplier `Tensor` for L2 regularization.
scope: An optional scope name.
Returns:
@@ -163,9 +163,9 @@ def apply_regularization(regularizer, weights_list=None):
subtraction, it usually shouldn't hurt much either.
Args:
- regularizer: A function that takes a single `Output` argument and returns
- a scalar `Output` output.
- weights_list: List of weights `Output`s or `Variables` to apply
+ regularizer: A function that takes a single `Tensor` argument and returns
+ a scalar `Tensor` output.
+ weights_list: List of weights `Tensors` or `Variables` to apply
`regularizer` over. Defaults to the `GraphKeys.WEIGHTS` collection if
`None`.
diff --git a/tensorflow/contrib/layers/python/layers/utils.py b/tensorflow/contrib/layers/python/layers/utils.py
index 4e86f71e3e..e2a6eda7c2 100644
--- a/tensorflow/contrib/layers/python/layers/utils.py
+++ b/tensorflow/contrib/layers/python/layers/utils.py
@@ -40,7 +40,7 @@ NamedOutputs = namedtuple('NamedOutputs', ['name', 'outputs'])
def collect_named_outputs(collections, alias, outputs):
- """Add `Output` outputs tagged with alias to collections.
+ """Add `Tensor` outputs tagged with alias to collections.
It is useful to collect end-points or tags for summaries. Example of usage:
@@ -70,7 +70,7 @@ def gather_tensors_alias(tensors):
If the tensor does not have an alias it would default to its name.
Args:
- tensors: A list of `Output`s.
+ tensors: A list of `Tensors`.
Returns:
A list of strings with the alias of each tensor.
@@ -84,7 +84,7 @@ def get_tensor_alias(tensor):
If the tensor does not have an alias it would default to its name.
Args:
- tensor: An `Output`.
+ tensor: A `Tensor`.
Returns:
A string with the alias of the tensor.
@@ -116,7 +116,7 @@ def constant_value(value_or_tensor_or_var, dtype=None):
"""Returns value if value_or_tensor_or_var has a constant value.
Args:
- value_or_tensor_or_var: A value, an `Output` or a `Variable`.
+ value_or_tensor_or_var: A value, a `Tensor` or a `Variable`.
dtype: Optional `tf.dtype`, if set it would check it has the right
dtype.
diff --git a/tensorflow/contrib/layers/python/ops/bucketization_op.py b/tensorflow/contrib/layers/python/ops/bucketization_op.py
index ae7dd446cc..6d232da4ad 100644
--- a/tensorflow/contrib/layers/python/ops/bucketization_op.py
+++ b/tensorflow/contrib/layers/python/ops/bucketization_op.py
@@ -31,12 +31,12 @@ def bucketize(input_tensor, boundaries, name=None):
See bucketize_op.cc for more details.
Args:
- input_tensor: An `Output` which will be bucketize.
+ input_tensor: A `Tensor` which will be bucketize.
boundaries: A list of floats gives the boundaries. It has to be sorted.
name: A name prefix for the returned tensors (optional).
Returns:
- An `Output` with type int32 which indicates the corresponding bucket for
+ A `Tensor` with type int32 which indicates the corresponding bucket for
each value in `input_tensor`.
Raises:
diff --git a/tensorflow/contrib/layers/python/ops/sparse_feature_cross_op.py b/tensorflow/contrib/layers/python/ops/sparse_feature_cross_op.py
index 709d96e401..e4141c6b6d 100644
--- a/tensorflow/contrib/layers/python/ops/sparse_feature_cross_op.py
+++ b/tensorflow/contrib/layers/python/ops/sparse_feature_cross_op.py
@@ -50,7 +50,7 @@ def sparse_feature_cross(inputs, hashed_output=False, num_buckets=0,
See sparse_feature_cross_kernel.cc for more details.
Args:
- inputs: List of `SparseTensor` or `Output` to be crossed.
+ inputs: List of `SparseTensor` or `Tensor` to be crossed.
hashed_output: If true, returns the hash of the cross instead of the string.
This will allow us avoiding string manipulations.
num_buckets: It is used if hashed_output is true.
diff --git a/tensorflow/contrib/layers/python/ops/sparse_ops.py b/tensorflow/contrib/layers/python/ops/sparse_ops.py
index a2c637468f..325f5ac97b 100644
--- a/tensorflow/contrib/layers/python/ops/sparse_ops.py
+++ b/tensorflow/contrib/layers/python/ops/sparse_ops.py
@@ -41,7 +41,7 @@ def dense_to_sparse_tensor(dense_tensor, ignore_value=None):
"""Converts a dense Tensor to a SparseTensor, dropping ignore_value cells.
Args:
- dense_tensor: An `Output`.
+ dense_tensor: A `Tensor`.
ignore_value: Entries in `dense_tensor` equal to this value will be
absent from the return `SparseTensor`. If `None`, default value of
dense_tensor's dtype will be used (e.g. '' for `str`, 0 for `int`).
diff --git a/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py b/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py
index 1b98fae702..46e1def77e 100644
--- a/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py
+++ b/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py
@@ -194,8 +194,8 @@ def enqueue_data(data,
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given array or `DataFrame`. In
- the case of a pandas `DataFrame`, the first enqueued `Output` corresponds to
- the index of the `DataFrame`. For numpy arrays, the first enqueued `Output`
+ the case of a pandas `DataFrame`, the first enqueued `Tensor` corresponds to
+ the index of the `DataFrame`. For numpy arrays, the first enqueued `Tensor`
contains the row number.
Args:
diff --git a/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_queue_runner.py b/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_queue_runner.py
index 31a2cf8fd6..72f3bbc3f4 100644
--- a/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_queue_runner.py
+++ b/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_queue_runner.py
@@ -44,7 +44,7 @@ class FeedingQueueRunner(qr.QueueRunner):
close_op: Op to close the queue. Pending enqueue ops are preserved.
cancel_op: Op to close the queue and cancel pending enqueue ops.
feed_fns: a list of functions that return a dictionary mapping fed
- `Output`s to values. Must be the same length as `enqueue_ops`.
+ `Tensor`s to values. Must be the same length as `enqueue_ops`.
queue_closed_exception_types: Optional tuple of Exception types that
indicate that the queue has been closed when raised during an enqueue
operation. Defaults to
diff --git a/tensorflow/contrib/learn/python/learn/dataframe/tensorflow_dataframe.py b/tensorflow/contrib/learn/python/learn/dataframe/tensorflow_dataframe.py
index 3e14d651c5..b17a4b8d05 100644
--- a/tensorflow/contrib/learn/python/learn/dataframe/tensorflow_dataframe.py
+++ b/tensorflow/contrib/learn/python/learn/dataframe/tensorflow_dataframe.py
@@ -144,7 +144,7 @@ class TensorFlowDataFrame(df.DataFrame):
'''
Args:
- boolean_series: a `Series` that evaluates to a boolean `Output`.
+ boolean_series: a `Series` that evaluates to a boolean `Tensor`.
Returns:
A new `DataFrame` with the same columns as `self`, but selecting only the
diff --git a/tensorflow/contrib/learn/python/learn/dataframe/transform.py b/tensorflow/contrib/learn/python/learn/dataframe/transform.py
index 39a782d93e..c28da59ac7 100644
--- a/tensorflow/contrib/learn/python/learn/dataframe/transform.py
+++ b/tensorflow/contrib/learn/python/learn/dataframe/transform.py
@@ -171,7 +171,7 @@ class Transform(object):
Note this output type is used both for `__call__`, in which case the
values are `TransformedSeries`, and for `apply_transform`, in which case
- the values are `Output`s.
+ the values are `Tensor`s.
Returns:
A namedtuple type fixing the order and names of the outputs of this
diff --git a/tensorflow/contrib/learn/python/learn/dataframe/transforms/batch.py b/tensorflow/contrib/learn/python/learn/dataframe/transforms/batch.py
index 448f06fad5..200ec57b67 100644
--- a/tensorflow/contrib/learn/python/learn/dataframe/transforms/batch.py
+++ b/tensorflow/contrib/learn/python/learn/dataframe/transforms/batch.py
@@ -64,7 +64,7 @@ class Batch(AbstractBatchTransform):
Note that dimension 0 is assumed to correspond to "example number" so
`Batch` does not prepend an additional dimension to incoming `Series`.
- For example, if an `Output` in `transform_input` has shape [x, y], the
+ For example, if a `Tensor` in `transform_input` has shape [x, y], the
corresponding output will have shape [batch_size, y].
"""
@@ -91,7 +91,7 @@ class ShuffleBatch(AbstractBatchTransform):
Note that dimension 0 is assumed to correspond to "example number" so
`ShuffleBatch` does not prepend an additional dimension to incoming `Series`.
- For example, if an `Output` in `transform_input` has shape [x, y], the
+ For example, if a `Tensor` in `transform_input` has shape [x, y], the
corresponding output will have shape [batch_size, y].
"""
diff --git a/tensorflow/contrib/learn/python/learn/dataframe/transforms/boolean_mask.py b/tensorflow/contrib/learn/python/learn/dataframe/transforms/boolean_mask.py
index 18b81f8010..130ac0c90f 100644
--- a/tensorflow/contrib/learn/python/learn/dataframe/transforms/boolean_mask.py
+++ b/tensorflow/contrib/learn/python/learn/dataframe/transforms/boolean_mask.py
@@ -35,7 +35,7 @@ def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"):
Args:
sparse_tensor: a `SparseTensor`.
- mask: a 1D boolean dense`Output` whose length is equal to the 0th dimension
+ mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension
of `sparse_tensor`.
name: optional name for this operation.
Returns:
diff --git a/tensorflow/contrib/learn/python/learn/dataframe/transforms/reader_source.py b/tensorflow/contrib/learn/python/learn/dataframe/transforms/reader_source.py
index 74a0f770ef..e8fa402bd6 100644
--- a/tensorflow/contrib/learn/python/learn/dataframe/transforms/reader_source.py
+++ b/tensorflow/contrib/learn/python/learn/dataframe/transforms/reader_source.py
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""`ReaderSource` produces `Output`s of keys and values using a `tf.Reader`."""
+"""`ReaderSource` produces `Tensor`s of keys and values using a `tf.Reader`."""
from __future__ import absolute_import
from __future__ import division
@@ -24,7 +24,7 @@ from tensorflow.python.training import input as input_ops
class ReaderSource(transform.TensorFlowTransform):
- """Produces `Output`s of keys and values using a `tf.Reader`."""
+ """Produces `Tensor`s of keys and values using a `tf.Reader`."""
def __init__(self,
reader_cls,
diff --git a/tensorflow/contrib/learn/python/learn/estimators/classifier.py b/tensorflow/contrib/learn/python/learn/estimators/classifier.py
index 55dca2f527..f62764043f 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/classifier.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/classifier.py
@@ -34,9 +34,9 @@ def classification_signature_fn(examples, unused_features, predictions):
"""Creates classification signature from given examples and predictions.
Args:
- examples: `Output`.
- unused_features: `dict` of `Output`s.
- predictions: `dict` of `Output`s.
+ examples: `Tensor`.
+ unused_features: `dict` of `Tensor`s.
+ predictions: `dict` of `Tensor`s.
Returns:
Tuple of default classification signature and empty named signatures.
diff --git a/tensorflow/contrib/learn/python/learn/estimators/dnn.py b/tensorflow/contrib/learn/python/learn/estimators/dnn.py
index af6216fc5e..f23adc0c34 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/dnn.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/dnn.py
@@ -68,8 +68,8 @@ def _dnn_model_fn(features, labels, mode, params):
"""Deep Neural Net model_fn.
Args:
- features: `Output` or dict of `Output` (depends on data passed to `fit`).
- labels: `Output` of shape [batch_size, 1] or [batch_size] labels of
+ features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
+ labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
@@ -91,7 +91,7 @@ def _dnn_model_fn(features, labels, mode, params):
* num_ps_replicas: The number of parameter server replicas.
Returns:
- predictions: A dict of `Output` objects.
+ predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
"""
@@ -211,7 +211,7 @@ class DNNClassifier(evaluable.Evaluable, trainable.Trainable):
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
- `key=weight_column_name` whose value is an `Output`.
+ `key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
@@ -219,7 +219,7 @@ class DNNClassifier(evaluable.Evaluable, trainable.Trainable):
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
- whose `value` is an `Output`.
+ whose `value` is a `Tensor`.
"""
def __init__(self,
@@ -401,7 +401,7 @@ class DNNClassifier(evaluable.Evaluable, trainable.Trainable):
name: string, name of the tensor.
Returns:
- `Output` object.
+ `Tensor` object.
"""
return self._estimator.get_variable_value(name)
@@ -510,7 +510,7 @@ class DNNRegressor(dnn_linear_combined.DNNLinearCombinedRegressor):
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
- `key=weight_column_name` whose value is an `Output`.
+ `key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
@@ -518,7 +518,7 @@ class DNNRegressor(dnn_linear_combined.DNNLinearCombinedRegressor):
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
- whose `value` is an `Output`.
+ whose `value` is a `Tensor`.
"""
def __init__(self,
diff --git a/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py b/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py
index 3d0d1f22a1..10b288f1fb 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py
@@ -54,7 +54,7 @@ class _DNNLinearCombinedBaseEstimator(estimator.BaseEstimator):
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
- `key=weight_column_name` whose value is an `Output`.
+ `key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
@@ -62,7 +62,7 @@ class _DNNLinearCombinedBaseEstimator(estimator.BaseEstimator):
`key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
- whose `value` is an `Output`.
+ whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
@@ -369,8 +369,8 @@ def _dnn_linear_combined_model_fn(features, labels, mode, params):
"""Deep Neural Net and Linear combined model_fn.
Args:
- features: `Output` or dict of `Output` (depends on data passed to `fit`).
- labels: `Output` of shape [batch_size, 1] or [batch_size] labels of dtype
+ features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
+ labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of dtype
`int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
@@ -586,7 +586,7 @@ class DNNLinearCombinedClassifier(evaluable.Evaluable, trainable.Trainable):
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
- `key=weight_column_name` whose value is an `Output`.
+ `key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
@@ -594,7 +594,7 @@ class DNNLinearCombinedClassifier(evaluable.Evaluable, trainable.Trainable):
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
- whose `value` is an `Output`.
+ whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
@@ -802,7 +802,7 @@ class DNNLinearCombinedClassifier(evaluable.Evaluable, trainable.Trainable):
name: string, name of the tensor.
Returns:
- `Output` object.
+ `Tensor` object.
"""
return self._estimator.get_variable_value(name)
@@ -946,7 +946,7 @@ class DNNLinearCombinedRegressor(_DNNLinearCombinedBaseEstimator):
Input of `fit`, `train`, and `evaluate` should have following features,
otherwise there will be a `KeyError`:
if `weight_column_name` is not `None`, a feature with
- `key=weight_column_name` whose value is an `Output`.
+ `key=weight_column_name` whose value is a `Tensor`.
for each `column` in `dnn_feature_columns` + `linear_feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
@@ -954,7 +954,7 @@ class DNNLinearCombinedRegressor(_DNNLinearCombinedBaseEstimator):
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
- whose `value` is an `Output`.
+ whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_linear_weights pylint: disable=invalid-name
diff --git a/tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py b/tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py
index fbcf665e60..e49d157919 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py
@@ -64,12 +64,12 @@ def padding_mask(sequence_lengths, padded_length):
"""Creates a mask used for calculating losses with padded input.
Args:
- sequence_lengths: An `Output` of shape `[batch_size]` containing the
- unpadded length of each sequence.
- padded_length: A scalar `Output` indicating the length of the sequences
+ sequence_lengths: A `Tensor` of shape `[batch_size]` containing the unpadded
+ length of each sequence.
+ padded_length: A scalar `Tensor` indicating the length of the sequences
after padding
Returns:
- A boolean `Output` M of shape `[batch_size, padded_length]` where
+ A boolean `Tensor` M of shape `[batch_size, padded_length]` where
`M[i, j] == True` when `lengths[i] > j`.
"""
@@ -84,7 +84,7 @@ def mask_activations_and_labels(activations, labels, sequence_lengths):
Args:
activations: Output of the RNN, shape `[batch_size, padded_length, k]`.
labels: Label values, shape `[batch_size, padded_length]`.
- sequence_lengths: An `Output` of shape `[batch_size]` with the unpadded
+ sequence_lengths: A `Tensor` of shape `[batch_size]` with the unpadded
length of each sequence. If `None`, then each sequence is unpadded.
Returns:
@@ -116,15 +116,15 @@ def mask_activations_and_labels(activations, labels, sequence_lengths):
def select_last_activations(activations, sequence_lengths):
"""Selects the nth set of activations for each n in `sequence_length`.
- Reuturns an `Output` of shape `[batch_size, k]`. If `sequence_length` is not
+ Reuturns a `Tensor` of shape `[batch_size, k]`. If `sequence_length` is not
`None`, then `output[i, :] = activations[i, sequence_length[i], :]`. If
`sequence_length` is `None`, then `output[i, :] = activations[i, -1, :]`.
Args:
- activations: An `Output` with shape `[batch_size, padded_length, k]`.
- sequence_lengths: An `Output` with shape `[batch_size]` or `None`.
+ activations: A `Tensor` with shape `[batch_size, padded_length, k]`.
+ sequence_lengths: A `Tensor` with shape `[batch_size]` or `None`.
Returns:
- An `Output` of shape `[batch_size, k]`.
+ A `Tensor` of shape `[batch_size, k]`.
"""
with ops.name_scope('select_last_activations',
values=[activations, sequence_lengths]):
@@ -151,12 +151,12 @@ def _concatenate_context_input(sequence_input, context_input):
returned.
Args:
- sequence_input: An `Output` of dtype `float32` and shape `[batch_size,
+ sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,
padded_length, d0]`.
- context_input: An `Output` of dtype `float32` and shape `[batch_size, d1]`.
+ context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.
Returns:
- An `Output` of dtype `float32` and shape `[batch_size, padded_length,
+ A `Tensor` of dtype `float32` and shape `[batch_size, padded_length,
d0 + d1]`.
Raises:
@@ -212,7 +212,7 @@ def build_sequence_input(features,
weight_collections: List of graph collections to which weights are added.
scope: Optional scope, passed through to parsing ops.
Returns:
- An `Output` of dtype `float32` and shape `[batch_size, padded_length, ?]`.
+ A `Tensor` of dtype `float32` and shape `[batch_size, padded_length, ?]`.
This will be used as input to an RNN.
"""
sequence_input = layers.sequence_input_from_feature_columns(
@@ -242,7 +242,7 @@ def construct_rnn(initial_state,
Args:
initial_state: The initial state to pass the the RNN. If `None`, the
default starting state for `self._cell` is used.
- sequence_input: An `Output` with shape `[batch_size, padded_length, d]`
+ sequence_input: A `Tensor` with shape `[batch_size, padded_length, d]`
that will be passed as input to the RNN.
cell: An initialized `RNNCell`.
num_label_columns: The desired output dimension.
@@ -280,7 +280,7 @@ def _mask_multivalue(sequence_length, metric):
"""Wrapper function that masks values by `sequence_length`.
Args:
- sequence_length: An `Output` with shape `[batch_size]` and dtype `int32`
+ sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
metric: A metric function. Its signature must contain `predictions` and
@@ -305,7 +305,7 @@ def _get_default_metrics(problem_type, prediction_type, sequence_length):
problem_type: `ProblemType.CLASSIFICATION` or`ProblemType.REGRESSION`.
prediction_type: `PredictionType.SINGLE_VALUE` or
`PredictionType.MULTIPLE_VALUE`.
- sequence_length: An `Output` with shape `[batch_size]` and dtype `int32`
+ sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
Returns:
@@ -341,7 +341,7 @@ def _multi_value_predictions(
If `predict_probabilities` is `False`, this function returns a `dict`
containing single entry with key `PREDICTIONS_KEY`. If `predict_probabilities`
is `True`, it will contain a second entry with key `PROBABILITIES_KEY`. The
- value of this entry is an `Output` of probabilities with shape
+ value of this entry is a `Tensor` of probabilities with shape
`[batch_size, padded_length, num_classes]`.
Note that variable length inputs will yield some predictions that don't have
@@ -356,7 +356,7 @@ def _multi_value_predictions(
should be returned. Should only be set to `True` for
classification/logistic regression problems.
Returns:
- A `dict` mapping strings to `Output`s.
+ A `dict` mapping strings to `Tensors`.
"""
with ops.name_scope('MultiValuePrediction'):
activations_shape = array_ops.shape(activations)
@@ -389,13 +389,13 @@ def _single_value_predictions(
If `predict_probabilities` is `False`, this function returns a `dict`
containing single entry with key `PREDICTIONS_KEY`. If `predict_probabilities`
is `True`, it will contain a second entry with key `PROBABILITIES_KEY`. The
- value of this entry is an `Output` of probabilities with shape
+ value of this entry is a `Tensor` of probabilities with shape
`[batch_size, num_classes]`.
Args:
activations: Output from an RNN. Should have dtype `float32` and shape
`[batch_size, padded_length, ?]`.
- sequence_length: An `Output` with shape `[batch_size]` and dtype `int32`
+ sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
target_column: An initialized `TargetColumn`, calculate predictions.
@@ -403,7 +403,7 @@ def _single_value_predictions(
should be returned. Should only be set to `True` for
classification/logistic regression problems.
Returns:
- A `dict` mapping strings to `Output`s.
+ A `dict` mapping strings to `Tensors`.
"""
with ops.name_scope('SingleValuePrediction'):
last_activations = select_last_activations(activations, sequence_length)
@@ -427,15 +427,15 @@ def _multi_value_loss(
Args:
activations: Output from an RNN. Should have dtype `float32` and shape
`[batch_size, padded_length, ?]`.
- labels: An `Output` with length `[batch_size, padded_length]`.
- sequence_length: An `Output` with shape `[batch_size]` and dtype `int32`
+ labels: A `Tensor` with length `[batch_size, padded_length]`.
+ sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
target_column: An initialized `TargetColumn`, calculate predictions.
features: A `dict` containing the input and (optionally) sequence length
information and initial state.
Returns:
- A scalar `Output` containing the loss.
+ A scalar `Tensor` containing the loss.
"""
with ops.name_scope('MultiValueLoss'):
activations_masked, labels_masked = mask_activations_and_labels(
@@ -450,15 +450,15 @@ def _single_value_loss(
Args:
activations: Output from an RNN. Should have dtype `float32` and shape
`[batch_size, padded_length, ?]`.
- labels: An `Output` with length `[batch_size]`.
- sequence_length: An `Output` with shape `[batch_size]` and dtype `int32`
+ labels: A `Tensor` with length `[batch_size]`.
+ sequence_length: A `Tensor` with shape `[batch_size]` and dtype `int32`
containing the length of each sequence in the batch. If `None`, sequences
are assumed to be unpadded.
target_column: An initialized `TargetColumn`, calculate predictions.
features: A `dict` containing the input and (optionally) sequence length
information and initial state.
Returns:
- A scalar `Output` containing the loss.
+ A scalar `Tensor` containing the loss.
"""
with ops.name_scope('SingleValueLoss'):
diff --git a/tensorflow/contrib/learn/python/learn/estimators/estimator.py b/tensorflow/contrib/learn/python/learn/estimators/estimator.py
index 53ebc2efad..6b5b241936 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/estimator.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/estimator.py
@@ -127,8 +127,8 @@ def infer_real_valued_columns_from_input_fn(input_fn):
Args:
input_fn: Input function returning a tuple of:
- features - Dictionary of string feature name to `Output` or `Output`.
- labels - `Output` of label values.
+ features - Dictionary of string feature name to `Tensor` or `Tensor`.
+ labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
@@ -454,7 +454,7 @@ class BaseEstimator(
Returns:
A numpy array of predicted classes or regression values if the
- constructor's `model_fn` returns an `Output` for `predictions` or a `dict`
+ constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
@@ -515,20 +515,20 @@ class BaseEstimator(
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
- `Output` of `Example` strings, parses it into features that are then
+ `Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
- string key to `Output` and labels is an `Output` that's currently not
+ string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
- the raw `Example` strings `Output` that the exported model will take as
+ the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
- signature map, given `Output` of `Example` strings, `dict` of `Output`s
- for features and `Output` or `dict` of `Output`s for predictions.
+ signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
+ for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
@@ -562,8 +562,8 @@ class BaseEstimator(
Expected to be overriden by sub-classes that require custom support.
Args:
- features: `Output` or `dict` of `Output` objects.
- labels: `Output` or `dict` of `Output` objects.
+ features: `Tensor` or `dict` of `Tensor` objects.
+ labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
@@ -575,7 +575,7 @@ class BaseEstimator(
"""Method that builds model graph and returns prediction ops.
Args:
- features: `Output` or `dict` of `Output` objects.
+ features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
@@ -588,8 +588,8 @@ class BaseEstimator(
Expected to be overriden by sub-classes that require custom support.
Args:
- features: `Output` or `dict` of `Output` objects.
- labels: `Output` or `dict` of `Output` objects.
+ features: `Tensor` or `dict` of `Tensor` objects.
+ labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
@@ -619,7 +619,7 @@ class BaseEstimator(
examples_batch: batch of tf.Example
Returns:
- features: `Output` or `dict` of `Output` objects.
+ features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
@@ -934,9 +934,9 @@ class Estimator(BaseEstimator):
Args:
model_fn: Model function. Follows the signature:
* Args:
- * `features` are single `Output` or `dict` of `Output`s
+ * `features` are single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
- * `labels` are `Output` or `dict` of `Output`s (for multi-head
+ * `labels` are `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
@@ -952,11 +952,11 @@ class Estimator(BaseEstimator):
Also supports a legacy signature which returns tuple of:
- * predictions: `Output`, `SparseTensor` or dictionary of same.
- Can also be any type that is convertible to an `Output` or
+ * predictions: `Tensor`, `SparseTensor` or dictionary of same.
+ Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
- * loss: Scalar loss `Output`.
- * train_op: Training update `Output` or `Operation`.
+ * loss: Scalar loss `Tensor`.
+ * train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
@@ -1043,8 +1043,8 @@ class Estimator(BaseEstimator):
build model.
Args:
- features: `Output` or `dict` of `Output` objects.
- labels: `Output` or `dict` of `Output` objects.
+ features: `Tensor` or `dict` of `Tensor` objects.
+ labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
@@ -1059,8 +1059,8 @@ class Estimator(BaseEstimator):
build model.
Args:
- features: `Output` or `dict` of `Output` objects.
- labels: `Output` or `dict` of `Output` objects.
+ features: `Tensor` or `dict` of `Tensor` objects.
+ labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
@@ -1097,7 +1097,7 @@ class Estimator(BaseEstimator):
build model.
Args:
- features: `Output` or `dict` of `Output` objects.
+ features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
diff --git a/tensorflow/contrib/learn/python/learn/estimators/head.py b/tensorflow/contrib/learn/python/learn/estimators/head.py
index edb91ca49a..e7d9eb4f34 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/head.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/head.py
@@ -305,7 +305,7 @@ class _RegressionHead(_Head):
name: Op name.
Returns:
- A loss `Output`.
+ A loss `Tensor`.
"""
labels = _check_labels(labels, self._label_name)
@@ -349,10 +349,10 @@ class _RegressionHead(_Head):
"""Returns a dict of predictions.
Args:
- logits: logits `Output` before applying possible centered bias.
+ logits: logits `Tensor` before applying possible centered bias.
Returns:
- Dict of prediction `Output` keyed by `PredictionKey`.
+ Dict of prediction `Tensor` keyed by `PredictionKey`.
"""
if self._enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(
@@ -364,10 +364,10 @@ class _RegressionHead(_Head):
"""Returns a dict of predictions.
Args:
- logits: logits `Output` after applying possible centered bias.
+ logits: logits `Tensor` after applying possible centered bias.
Returns:
- Dict of prediction `Output` keyed by `PredictionKey`.
+ Dict of prediction `Tensor` keyed by `PredictionKey`.
"""
predictions = {}
if self.logits_dimension == 1:
@@ -490,7 +490,7 @@ class _MultiClassHead(_Head):
name: Op name.
Returns:
- A loss `Output`.
+ A loss `Tensor`.
"""
labels = _check_labels(labels, self._label_name)
@@ -534,10 +534,10 @@ class _MultiClassHead(_Head):
"""Returns a dict of predictions.
Args:
- logits: logits `Output` before applying possible centered bias.
+ logits: logits `Tensor` before applying possible centered bias.
Returns:
- Dict of prediction `Output` keyed by `PredictionKey`.
+ Dict of prediction `Tensor` keyed by `PredictionKey`.
"""
if self._enable_centered_bias:
logits = nn.bias_add(logits, _centered_bias(
@@ -549,10 +549,10 @@ class _MultiClassHead(_Head):
"""Returns a dict of predictions.
Args:
- logits: logits `Output` after applying possible centered bias.
+ logits: logits `Tensor` after applying possible centered bias.
Returns:
- Dict of prediction `Output` keyed by `PredictionKey`.
+ Dict of prediction `Tensor` keyed by `PredictionKey`.
"""
predictions = {prediction_key.PredictionKey.LOGITS: logits}
if self.logits_dimension == 1:
diff --git a/tensorflow/contrib/learn/python/learn/estimators/linear.py b/tensorflow/contrib/learn/python/learn/estimators/linear.py
index e1bf477079..a1175c327d 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/linear.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/linear.py
@@ -85,8 +85,8 @@ def _linear_model_fn(features, labels, mode, params):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
- features: `Output` or dict of `Output` (depends on data passed to `fit`).
- labels: `Output` of shape [batch_size, 1] or [batch_size] labels of
+ features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
+ labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
@@ -161,8 +161,8 @@ def sdca_model_fn(features, labels, mode, params):
"""A model_fn for linear models that use the SDCA optimizer.
Args:
- features: A dict of `Output` keyed by column name.
- labels: `Output` of shape [batch_size, 1] or [batch_size] labels of
+ features: A dict of `Tensor` keyed by column name.
+ labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
@@ -309,7 +309,7 @@ class LinearClassifier(evaluable.Evaluable, trainable.Trainable):
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
- `key=weight_column_name` whose value is an `Output`.
+ `key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
@@ -317,7 +317,7 @@ class LinearClassifier(evaluable.Evaluable, trainable.Trainable):
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
- whose `value` is an `Output`.
+ whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_weight pylint: disable=invalid-name
@@ -581,7 +581,7 @@ class LinearRegressor(evaluable.Evaluable, trainable.Trainable):
otherwise there will be a KeyError:
* if `weight_column_name` is not `None`:
- key=weight_column_name, value=an `Output`
+ key=weight_column_name, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
@@ -589,7 +589,7 @@ class LinearRegressor(evaluable.Evaluable, trainable.Trainable):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
- key=column.name, value=an `Output`
+ key=column.name, value=a `Tensor`
"""
def __init__(self, # _joint_weights: pylint: disable=invalid-name
diff --git a/tensorflow/contrib/learn/python/learn/estimators/model_fn.py b/tensorflow/contrib/learn/python/learn/estimators/model_fn.py
index 75aa7c860c..3f9351ce22 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/model_fn.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/model_fn.py
@@ -59,11 +59,11 @@ class ModelFnOps(collections.namedtuple(
Args:
mode: One of `ModeKeys`. Specifies if this training, evaluation or
prediction.
- predictions: Predictions `Output` or dict of `Output`.
- loss: Training loss `Output`.
+ predictions: Predictions `Tensor` or dict of `Tensor`.
+ loss: Training loss `Tensor`.
train_op: Op for the training step.
eval_metric_ops: Dict of metric results keyed by name. The values of the
- dict are the results of calling a metric function, such as `Output`.
+ dict are the results of calling a metric function, such as `Tensor`.
signature_fn: The signature_fn used for exporting.
Returns:
diff --git a/tensorflow/contrib/learn/python/learn/estimators/random_forest.py b/tensorflow/contrib/learn/python/learn/estimators/random_forest.py
index e6fddf49f3..c2c41255c9 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/random_forest.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/random_forest.py
@@ -45,7 +45,7 @@ def _assert_float32(tensors):
"""Assert all tensors are float32.
Args:
- tensors: `Output` or `dict` of `Output` objects.
+ tensors: `Tensor` or `dict` of `Tensor` objects.
Raises:
TypeError: if any tensor is not float32.
diff --git a/tensorflow/contrib/learn/python/learn/estimators/svm.py b/tensorflow/contrib/learn/python/learn/estimators/svm.py
index fc20547899..eeee673c5a 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/svm.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/svm.py
@@ -85,15 +85,15 @@ class SVM(trainable.Trainable, evaluable.Evaluable):
Input of `fit` and `evaluate` should have following features, otherwise there
will be a `KeyError`:
- a feature with `key=example_id_column` whose value is an `Output` of dtype
+ a feature with `key=example_id_column` whose value is a `Tensor` of dtype
string.
if `weight_column_name` is not `None`, a feature with
- `key=weight_column_name` whose value is an `Output`.
+ `key=weight_column_name` whose value is a `Tensor`.
for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `RealValuedColumn, a feature with `key=column.name`
- whose `value` is an `Output`.
+ whose `value` is a `Tensor`.
"""
def __init__(self,
diff --git a/tensorflow/contrib/learn/python/learn/estimators/tensor_signature.py b/tensorflow/contrib/learn/python/learn/estimators/tensor_signature.py
index 93e6119473..a120bc6cc3 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/tensor_signature.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/tensor_signature.py
@@ -31,7 +31,7 @@ from tensorflow.python.ops import parsing_ops
class TensorSignature(collections.namedtuple(
"TensorSignature", ["dtype", "shape", "is_sparse"])):
- """Signature of the `Output` object.
+ """Signature of the `Tensor` object.
Useful to check compatibility of tensors.
@@ -104,7 +104,7 @@ def tensors_compatible(tensors, signatures):
"""Check that tensors are compatible with signatures.
Args:
- tensors: Dict of `Output` objects or single `Output` object.
+ tensors: Dict of `Tensor` objects or single `Tensor` object.
signatures: Dict of `TensorSignature` objects or
single `TensorSignature` object.
@@ -135,7 +135,7 @@ def create_signatures(tensors):
"""Creates TensorSignature objects for given tensors.
Args:
- tensors: Dict of `Output` objects or single `Output`.
+ tensors: Dict of `Tensor` objects or single `Tensor`.
Returns:
Dict of `TensorSignature` objects or single `TensorSignature`.
@@ -173,11 +173,11 @@ def create_example_parser_from_signatures(signatures, examples_batch,
Args:
signatures: Dict of `TensorSignature` objects or single `TensorSignature`.
- examples_batch: string `Output` of serialized `Example` proto.
+ examples_batch: string `Tensor` of serialized `Example` proto.
single_feature_name: string, single feature name.
Returns:
- features: `Output` or `dict` of `Output` objects.
+ features: `Tensor` or `dict` of `Tensor` objects.
"""
feature_spec = {}
if not isinstance(signatures, dict):
diff --git a/tensorflow/contrib/learn/python/learn/evaluable.py b/tensorflow/contrib/learn/python/learn/evaluable.py
index 9bd803ea35..051d132416 100644
--- a/tensorflow/contrib/learn/python/learn/evaluable.py
+++ b/tensorflow/contrib/learn/python/learn/evaluable.py
@@ -56,8 +56,8 @@ class Evaluable(object):
be integers representing the class index (i.e. values from 0 to
n_classes-1).
input_fn: Input function returning a tuple of:
- features - Dictionary of string feature name to `Output` or `Output`.
- labels - `Output` or dictionary of `Output` with labels.
+ features - Dictionary of string feature name to `Tensor` or `Tensor`.
+ labels - `Tensor` or dictionary of `Tensor` with labels.
If input_fn is set, `x`, `y`, and `batch_size` must be `None`. If
`steps` is not provided, this should raise `OutOfRangeError` or
`StopIteration` after the desired amount of data (e.g., one epoch) has
diff --git a/tensorflow/contrib/learn/python/learn/graph_actions.py b/tensorflow/contrib/learn/python/learn/graph_actions.py
index 4190dd16ff..5781d88bb8 100644
--- a/tensorflow/contrib/learn/python/learn/graph_actions.py
+++ b/tensorflow/contrib/learn/python/learn/graph_actions.py
@@ -161,7 +161,7 @@ def _monitored_train(graph,
one is extracted from the graph using the same logic as in `Supervisor`.
init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
default.
- init_feed_dict: A dictionary that maps `Output` objects to feed values.
+ init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
init_fn: Optional callable passed to Supervisor to initialize the model.
log_every_steps: Output logs regularly. The logs contain timing data and the
@@ -348,7 +348,7 @@ def train(graph,
one is extracted from the graph using the same logic as in `Supervisor`.
init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
default.
- init_feed_dict: A dictionary that maps `Output` objects to feed values.
+ init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
init_fn: Optional callable passed to Supervisor to initialize the model.
log_every_steps: Output logs regularly. The logs contain timing data and the
@@ -662,7 +662,7 @@ def evaluate(graph,
"""Evaluate a model loaded from a checkpoint.
Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint
- to restore variables from, and a `dict` of `Output`s to evaluate, run an eval
+ to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval
loop for `max_steps` steps, or until an exception (generally, an
end-of-input signal from a reader operation) is raised from running
`eval_dict`.
@@ -683,7 +683,7 @@ def evaluate(graph,
returned. If `update_op` is None, then it's evaluated in every step. If
`max_steps` is `None`, this should depend on a reader that will raise an
end-of-input exception when the inputs are exhausted.
- update_op: An `Output` which is run in every step.
+ update_op: A `Tensor` which is run in every step.
global_step_tensor: A `Variable` containing the global step. If `None`,
one is extracted from the graph using the same logic as in `Supervisor`.
Used to place eval summaries on training curves.
@@ -828,7 +828,7 @@ def run_feeds_iter(output_dict, feed_dicts, restore_checkpoint_path=None):
init all variables.
Args:
- output_dict: A `dict` mapping string names to `Output` objects to run.
+ output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dicts: Iterable of `dict` objects of input values to feed.
restore_checkpoint_path: A string containing the path to a checkpoint to
@@ -837,7 +837,7 @@ def run_feeds_iter(output_dict, feed_dicts, restore_checkpoint_path=None):
Yields:
A sequence of dicts of values read from `output_dict` tensors, one item
yielded for each item in `feed_dicts`. Keys are the same as `output_dict`,
- values are the results read from the corresponding `Output` in
+ values are the results read from the corresponding `Tensor` in
`output_dict`.
Raises:
@@ -886,13 +886,13 @@ def infer(restore_checkpoint_path, output_dict, feed_dict=None):
Args:
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
- output_dict: A `dict` mapping string names to `Output` objects to run.
+ output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
- feed_dict: `dict` object mapping `Output` objects to input values to feed.
+ feed_dict: `dict` object mapping `Tensor` objects to input values to feed.
Returns:
Dict of values read from `output_dict` tensors. Keys are the same as
- `output_dict`, values are the results read from the corresponding `Output`
+ `output_dict`, values are the results read from the corresponding `Tensor`
in `output_dict`.
Raises:
diff --git a/tensorflow/contrib/learn/python/learn/graph_actions_test.py b/tensorflow/contrib/learn/python/learn/graph_actions_test.py
index 177f15ea5a..caacb1928a 100644
--- a/tensorflow/contrib/learn/python/learn/graph_actions_test.py
+++ b/tensorflow/contrib/learn/python/learn/graph_actions_test.py
@@ -148,7 +148,7 @@ class GraphActionsTest(tf.test.TestCase):
This includes a regular variable, local variable, and fake table.
Returns:
- Tuple of 3 `Output` objects, 2 input and 1 output.
+ Tuple of 3 `Tensor` objects, 2 input and 1 output.
"""
tf.contrib.framework.create_global_step()
in0 = tf.Variable(1.0)
@@ -567,7 +567,7 @@ class GraphActionsTrainTest(tf.test.TestCase):
This includes a regular variable, local variable, and fake table.
Returns:
- Tuple of 3 `Output` objects, 2 input and 1 output.
+ Tuple of 3 `Tensor` objects, 2 input and 1 output.
"""
tf.contrib.framework.create_global_step()
in0 = tf.Variable(1.0)
diff --git a/tensorflow/contrib/learn/python/learn/learn_io/graph_io.py b/tensorflow/contrib/learn/python/learn/learn_io/graph_io.py
index c9e761b3b7..37268c319c 100644
--- a/tensorflow/contrib/learn/python/learn/learn_io/graph_io.py
+++ b/tensorflow/contrib/learn/python/learn/learn_io/graph_io.py
@@ -78,7 +78,7 @@ def read_batch_examples(file_pattern, batch_size, reader,
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
- batch_size: An int or scalar `Output` specifying the batch size to use.
+ batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
@@ -88,14 +88,14 @@ def read_batch_examples(file_pattern, batch_size, reader,
`tf.global_variables_initializer()` as shown in the tests.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
- read_batch_size: An int or scalar `Output` specifying the number of
+ read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
name: Name of resulting op.
Returns:
- String `Output` of batched `Example` proto.
+ String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
@@ -130,7 +130,7 @@ def read_keyed_batch_examples(
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
- batch_size: An int or scalar `Output` specifying the batch size to use.
+ batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
@@ -140,7 +140,7 @@ def read_keyed_batch_examples(
`tf.global_variables_initializer()` as shown in the tests.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
- read_batch_size: An int or scalar `Output` specifying the number of
+ read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
@@ -148,8 +148,8 @@ def read_keyed_batch_examples(
Returns:
Returns tuple of:
- - `Output` of string keys.
- - String `Output` of batched `Example` proto.
+ - `Tensor` of string keys.
+ - String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
@@ -199,7 +199,7 @@ def _read_keyed_batch_examples_shared_queue(file_pattern,
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
- batch_size: An int or scalar `Output` specifying the batch size to use.
+ batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
@@ -209,7 +209,7 @@ def _read_keyed_batch_examples_shared_queue(file_pattern,
`tf.global_variables_initializer()` as shown in the tests.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
- read_batch_size: An int or scalar `Output` specifying the number of
+ read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
@@ -217,8 +217,8 @@ def _read_keyed_batch_examples_shared_queue(file_pattern,
Returns:
Returns tuple of:
- - `Output` of string keys.
- - String `Output` of batched `Example` proto.
+ - `Tensor` of string keys.
+ - String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
@@ -323,7 +323,7 @@ def _read_keyed_batch_examples_helper(file_pattern,
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
- batch_size: An int or scalar `Output` specifying the batch size to use.
+ batch_size: An int or scalar `Tensor` specifying the batch size to use.
reader: A function or class that returns an object with
`read` method, (filename tensor) -> (example tensor).
randomize_input: Whether the input should be randomized.
@@ -333,7 +333,7 @@ def _read_keyed_batch_examples_helper(file_pattern,
`tf.global_variables_initializer()` as shown in the tests.
queue_capacity: Capacity for input queue.
num_threads: The number of threads enqueuing examples.
- read_batch_size: An int or scalar `Output` specifying the number of
+ read_batch_size: An int or scalar `Tensor` specifying the number of
records to read at once
parse_fn: Parsing function, takes `Example` Tensor returns parsed
representation. If `None`, no parsing is done.
@@ -342,8 +342,8 @@ def _read_keyed_batch_examples_helper(file_pattern,
Returns:
Returns tuple of:
- - `Output` of string keys.
- - String `Output` of batched `Example` proto.
+ - `Tensor` of string keys.
+ - String `Tensor` of batched `Example` proto.
Raises:
ValueError: for invalid inputs.
@@ -447,7 +447,7 @@ def read_keyed_batch_features(file_pattern,
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
- batch_size: An int or scalar `Output` specifying the batch size to use.
+ batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
@@ -477,8 +477,8 @@ def read_keyed_batch_features(file_pattern,
Returns:
Returns tuple of:
- - `Output` of string keys.
- - A dict of `Output` or `SparseTensor` objects for each in `features`.
+ - `Tensor` of string keys.
+ - A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
@@ -532,7 +532,7 @@ def _read_keyed_batch_features_shared_queue(file_pattern,
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
- batch_size: An int or scalar `Output` specifying the batch size to use.
+ batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
@@ -555,8 +555,8 @@ def _read_keyed_batch_features_shared_queue(file_pattern,
Returns:
Returns tuple of:
- - `Output` of string keys.
- - A dict of `Output` or `SparseTensor` objects for each in `features`.
+ - `Tensor` of string keys.
+ - A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
@@ -604,8 +604,8 @@ def queue_parsed_features(parsed_features,
All ops are added to the default graph.
Args:
- parsed_features: A dict of string key to `Output` or `SparseTensor` objects.
- keys: `Output` of string keys.
+ parsed_features: A dict of string key to `Tensor` or `SparseTensor` objects.
+ keys: `Tensor` of string keys.
feature_queue_capacity: Capacity of the parsed features queue.
num_queue_runners: Deprecated. Defaults to 2 if this and
`num_enqueue_threads` are both `None`. This is the number of queue
@@ -622,8 +622,8 @@ def queue_parsed_features(parsed_features,
Returns:
Returns tuple of:
- - `Output` corresponding to `keys` if provided, otherwise `None`.
- - A dict of string key to `Output` or `SparseTensor` objects corresponding
+ - `Tensor` corresponding to `keys` if provided, otherwise `None`.
+ - A dict of string key to `Tensor` or `SparseTensor` objects corresponding
to `parsed_features`.
Raises:
ValueError: for invalid inputs.
@@ -748,7 +748,7 @@ def read_batch_features(file_pattern,
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
- batch_size: An int or scalar `Output` specifying the batch size to use.
+ batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
reader: A function or class that returns an object with
@@ -767,7 +767,7 @@ def read_batch_features(file_pattern,
name: Name of resulting op.
Returns:
- A dict of `Output` or `SparseTensor` objects for each in `features`.
+ A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
@@ -793,7 +793,7 @@ def read_batch_record_features(file_pattern, batch_size, features,
Args:
file_pattern: List of files or pattern of file paths containing
`Example` records. See `tf.gfile.Glob` for pattern rules.
- batch_size: An int or scalar `Output` specifying the batch size to use.
+ batch_size: An int or scalar `Tensor` specifying the batch size to use.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values.
randomize_input: Whether the input should be randomized.
@@ -806,7 +806,7 @@ def read_batch_record_features(file_pattern, batch_size, features,
name: Name of resulting op.
Returns:
- A dict of `Output` or `SparseTensor` objects for each in `features`.
+ A dict of `Tensor` or `SparseTensor` objects for each in `features`.
Raises:
ValueError: for invalid inputs.
diff --git a/tensorflow/contrib/learn/python/learn/metric_spec.py b/tensorflow/contrib/learn/python/learn/metric_spec.py
index ba9ede3324..a4df7ba658 100644
--- a/tensorflow/contrib/learn/python/learn/metric_spec.py
+++ b/tensorflow/contrib/learn/python/learn/metric_spec.py
@@ -181,7 +181,7 @@ class MetricSpec(object):
The result of calling `metric_fn`.
Raises:
- ValueError: If `predictions` or `labels` is a single `Output` and
+ ValueError: If `predictions` or `labels` is a single `Tensor` and
`self.prediction_key` or `self.label_key` is not `None`; or if
`self.label_key` is `None` but `labels` is a dict with more than one
element, or if `self.prediction_key` is `None but `predictions` is a
diff --git a/tensorflow/contrib/learn/python/learn/monitors.py b/tensorflow/contrib/learn/python/learn/monitors.py
index f5399cec2c..9c70cc8dea 100644
--- a/tensorflow/contrib/learn/python/learn/monitors.py
+++ b/tensorflow/contrib/learn/python/learn/monitors.py
@@ -218,7 +218,7 @@ class BaseMonitor(object):
step: `int`, the current value of the global step.
Returns:
- List of `Output` objects or string tensor names to be run.
+ List of `Tensor` objects or string tensor names to be run.
Raises:
ValueError: if we've already begun a step, or `step` < 0, or
@@ -557,7 +557,7 @@ class SummarySaver(EveryN):
"""Initializes a `SummarySaver` monitor.
Args:
- summary_op: `Output` of type `string`. A serialized `Summary` protocol
+ summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
buffer, as output by TF summary methods like `summary.scalar` or
`summary.merge_all`.
save_steps: `int`, save summaries every N steps. See `EveryN`.
@@ -783,7 +783,7 @@ def get_default_monitors(loss_op=None, summary_op=None, save_summary_steps=100,
"""Returns a default set of typically-used monitors.
Args:
- loss_op: `Output`, the loss tensor. This will be printed using `PrintTensor`
+ loss_op: `Tensor`, the loss tensor. This will be printed using `PrintTensor`
at the default interval.
summary_op: See `SummarySaver`.
save_summary_steps: See `SummarySaver`.
@@ -919,18 +919,18 @@ class ExportMonitor(EveryN):
every_n_steps: Run monitor every N steps.
export_dir: str, folder to export.
input_fn: A function that takes no argument and returns a tuple of
- (features, labels), where features is a dict of string key to `Output`
- and labels is an `Output` that's currently not used (and so can be
+ (features, labels), where features is a dict of string key to `Tensor`
+ and labels is a `Tensor` that's currently not used (and so can be
`None`).
input_feature_key: String key into the features dict returned by
- `input_fn` that corresponds to the raw `Example` strings `Output` that
+ `input_fn` that corresponds to the raw `Example` strings `Tensor` that
the exported model will take as input. Can only be `None` if you're
using a custom `signature_fn` that does not use the first arg
(examples).
exports_to_keep: int, number of exports to keep.
signature_fn: Function that returns a default signature and a named
- signature map, given `Output` of `Example` strings, `dict` of `Output`s
- for features and `dict` of `Output`s for predictions.
+ signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
+ for features and `dict` of `Tensor`s for predictions.
default_batch_size: Default batch size of the `Example` placeholder.
Raises:
@@ -1147,7 +1147,7 @@ class NanLoss(EveryN):
"""Initializes NanLoss monitor.
Args:
- loss_tensor: `Output`, the loss tensor.
+ loss_tensor: `Tensor`, the loss tensor.
every_n_steps: `int`, run check every this many steps.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
diff --git a/tensorflow/contrib/learn/python/learn/ops/losses_ops.py b/tensorflow/contrib/learn/python/learn/ops/losses_ops.py
index d816a9481b..083281c1d9 100644
--- a/tensorflow/contrib/learn/python/learn/ops/losses_ops.py
+++ b/tensorflow/contrib/learn/python/learn/ops/losses_ops.py
@@ -66,7 +66,7 @@ def softmax_classifier(tensor_in,
name: Operation name.
Returns:
- `tuple` of softmax predictions and loss `Output`s.
+ `tuple` of softmax predictions and loss `Tensor`s.
"""
with ops.name_scope(name, 'softmax_classifier', [tensor_in, labels]):
logits = nn.xw_plus_b(tensor_in, weights, biases)
diff --git a/tensorflow/contrib/learn/python/learn/trainable.py b/tensorflow/contrib/learn/python/learn/trainable.py
index fe4a7797d7..8a1548738e 100644
--- a/tensorflow/contrib/learn/python/learn/trainable.py
+++ b/tensorflow/contrib/learn/python/learn/trainable.py
@@ -43,8 +43,8 @@ class Trainable(object):
be integers representing the class index (i.e. values from 0 to
n_classes-1).
input_fn: Input function returning a tuple of:
- features - Dictionary of string feature name to `Output` or `Output`.
- labels - `Output` or dictionary of `Output` with labels.
+ features - Dictionary of string feature name to `Tensor` or `Tensor`.
+ labels - `Tensor` or dictionary of `Tensor` with labels.
If input_fn is set, `x`, `y`, and `batch_size` must be `None`.
steps: Number of steps for which to train model. If `None`, train forever.
'steps' works incrementally. If you call two times fit(steps=10) then
diff --git a/tensorflow/contrib/learn/python/learn/utils/export.py b/tensorflow/contrib/learn/python/learn/utils/export.py
index 8139151db5..fe2f8320f2 100644
--- a/tensorflow/contrib/learn/python/learn/utils/export.py
+++ b/tensorflow/contrib/learn/python/learn/utils/export.py
@@ -89,9 +89,9 @@ def generic_signature_fn(examples, unused_features, predictions):
export_estimator.
Args:
- examples: `Output`.
- unused_features: `dict` of `Output`s.
- predictions: `Output` or `dict` of `Output`s.
+ examples: `Tensor`.
+ unused_features: `dict` of `Tensor`s.
+ predictions: `Tensor` or `dict` of `Tensor`s.
Returns:
Tuple of default signature and empty named signatures.
@@ -114,10 +114,10 @@ def classification_signature_fn(examples, unused_features, predictions):
"""Creates classification signature from given examples and predictions.
Args:
- examples: `Output`.
- unused_features: `dict` of `Output`s.
- predictions: `Output` or dict of tensors that contains the classes tensor
- as in {'classes': `Output`}.
+ examples: `Tensor`.
+ unused_features: `dict` of `Tensor`s.
+ predictions: `Tensor` or dict of tensors that contains the classes tensor
+ as in {'classes': `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
@@ -142,10 +142,10 @@ def classification_signature_fn_with_prob(
"""Classification signature from given examples and predicted probabilities.
Args:
- examples: `Output`.
- unused_features: `dict` of `Output`s.
- predictions: `Output` of predicted probabilities or dict that contains the
- probabilities tensor as in {'probabilities', `Output`}.
+ examples: `Tensor`.
+ unused_features: `dict` of `Tensor`s.
+ predictions: `Tensor` of predicted probabilities or dict that contains the
+ probabilities tensor as in {'probabilities', `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
@@ -169,9 +169,9 @@ def regression_signature_fn(examples, unused_features, predictions):
"""Creates regression signature from given examples and predictions.
Args:
- examples: `Output`.
- unused_features: `dict` of `Output`s.
- predictions: `Output`.
+ examples: `Tensor`.
+ unused_features: `dict` of `Tensor`s.
+ predictions: `Tensor`.
Returns:
Tuple of default regression signature and empty named signatures.
@@ -191,11 +191,11 @@ def logistic_regression_signature_fn(examples, unused_features, predictions):
"""Creates logistic regression signature from given examples and predictions.
Args:
- examples: `Output`.
- unused_features: `dict` of `Output`s.
- predictions: `Output` of shape [batch_size, 2] of predicted probabilities or
+ examples: `Tensor`.
+ unused_features: `dict` of `Tensor`s.
+ predictions: `Tensor` of shape [batch_size, 2] of predicted probabilities or
dict that contains the probabilities tensor as in
- {'probabilities', `Output`}.
+ {'probabilities', `Tensor`}.
Returns:
Tuple of default regression signature and named signature.
diff --git a/tensorflow/contrib/linear_optimizer/python/sdca_optimizer.py b/tensorflow/contrib/linear_optimizer/python/sdca_optimizer.py
index b6e59b02b6..6ff4bf3175 100644
--- a/tensorflow/contrib/linear_optimizer/python/sdca_optimizer.py
+++ b/tensorflow/contrib/linear_optimizer/python/sdca_optimizer.py
@@ -46,7 +46,7 @@ class SDCAOptimizer(object):
Here the expectation is that the input_fn_* functions passed to train and
evaluate return a pair (dict, label_tensor) where dict has `example_id_column`
- as `key` whose value is an `Output` of shape [batch_size] and dtype string.
+ as `key` whose value is a `Tensor` of shape [batch_size] and dtype string.
num_loss_partitions defines the number of partitions of the global loss
function and should be set to (#concurrent train ops/per worker) x (#workers).
Convergence of (global) loss is guaranteed if num_loss_partitions is larger or
diff --git a/tensorflow/contrib/lookup/lookup_ops.py b/tensorflow/contrib/lookup/lookup_ops.py
index 9161a414d0..d4f9c92b2d 100644
--- a/tensorflow/contrib/lookup/lookup_ops.py
+++ b/tensorflow/contrib/lookup/lookup_ops.py
@@ -153,11 +153,11 @@ class InitializableLookupTableBase(LookupInterface):
The `default_value` is used for keys not present in the table.
Args:
- keys: Keys to look up. May be either a `SparseTensor` or dense `Output`.
+ keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: A name for the operation (optional).
Returns:
- A `SparseTensor` if keys are sparse, otherwise a dense `Output`.
+ A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` or `default_value` doesn't match the table data
@@ -391,7 +391,7 @@ class TextFileInitializer(TableInitializerBase):
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
- (eg. trainer or eval workers). The filename may be a scalar `Output`.
+ (eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_dtype: The `key` data type.
key_index: the index that represents information of a line to get the
table 'key' values from.
@@ -503,7 +503,7 @@ class TextFileStringTableInitializer(TextFileInitializer):
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
- (eg. trainer or eval workers). The filename may be a scalar `Output`.
+ (eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the keys
from. The default is 0 that represents the whole line content.
value_column_index: The column index from the text file to get the
@@ -553,7 +553,7 @@ class TextFileIdTableInitializer(TextFileInitializer):
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
- (eg. trainer or eval workers). The filename may be a scalar `Output`.
+ (eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the `key`
values from. The default is to use the line number, starting from zero.
value_column_index: The column index from the text file ro get the `value`
@@ -606,8 +606,8 @@ def string_to_index(tensor, mapping, default_value=-1, name=None):
```
Args:
- tensor: A 1-D input `Output` with the strings to map to indices.
- mapping: A 1-D string `Output` that specifies the mapping of strings to
+ tensor: A 1-D input `Tensor` with the strings to map to indices.
+ mapping: A 1-D string `Tensor` that specifies the mapping of strings to
indices.
default_value: The `int64` value to use for out-of-vocabulary strings.
Defaults to -1.
@@ -662,8 +662,8 @@ def index_to_string(tensor, mapping, default_value="UNK", name=None):
```
Args:
- tensor: A `int64` `Output` with the indices to map to strings.
- mapping: A 1-D string `Output` that specifies the strings to map from
+ tensor: A `int64` `Tensor` with the indices to map to strings.
+ mapping: A 1-D string `Tensor` that specifies the strings to map from
indices.
default_value: The string value to use for out-of-vocabulary indices.
name: A name for this op (optional).
diff --git a/tensorflow/contrib/losses/python/losses/loss_ops.py b/tensorflow/contrib/losses/python/losses/loss_ops.py
index a3fd7cd7ca..7610f9275f 100644
--- a/tensorflow/contrib/losses/python/losses/loss_ops.py
+++ b/tensorflow/contrib/losses/python/losses/loss_ops.py
@@ -79,8 +79,8 @@ def _scale_losses(losses, weights):
"""Computes the scaled loss.
Args:
- losses: An `Output` of size [batch_size, d1, ... dN].
- weights: An `Output` of size [1], [batch_size] or [batch_size, d1, ... dN].
+ losses: A `Tensor` of size [batch_size, d1, ... dN].
+ weights: A `Tensor` of size [1], [batch_size] or [batch_size, d1, ... dN].
The `losses` are reduced (tf.reduce_sum) until its dimension matches
that of `weights` at which point the reduced `losses` are element-wise
multiplied by `weights` and a final reduce_sum is computed on the result.
@@ -89,7 +89,7 @@ def _scale_losses(losses, weights):
multiplication, and summing the result.
Returns:
- A scalar tf.float32 `Output` whose value represents the sum of the scaled
+ A scalar tf.float32 `Tensor` whose value represents the sum of the scaled
`losses`.
"""
# First, compute the sum of the losses over all elements:
@@ -109,9 +109,9 @@ def _safe_div(numerator, denominator, name="value"):
creep into the gradient computation.
Args:
- numerator: An arbitrary `Output`.
- denominator: An `Output` whose shape matches `numerator` and whose values
- are assumed to be non-negative.
+ numerator: An arbitrary `Tensor`.
+ denominator: A `Tensor` whose shape matches `numerator` and whose values are
+ assumed to be non-negative.
name: An optional name for the returned op.
Returns:
@@ -153,7 +153,7 @@ def compute_weighted_loss(
weight: Deprecated alias for `weights`.
Returns:
- A scalar `Output` that returns the weighted loss.
+ A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If `weights` is `None` or the shape is not compatible with
@@ -238,7 +238,7 @@ def add_loss(loss, loss_collection=ops.GraphKeys.LOSSES):
"""Adds a externally defined loss to the collection of losses.
Args:
- loss: A loss `Output`.
+ loss: A loss `Tensor`.
loss_collection: Optional collection to add the loss to.
"""
if loss_collection:
@@ -281,7 +281,7 @@ def get_total_loss(add_regularization_losses=True, name="total_loss"):
name: The name of the returned tensor.
Returns:
- An `Output` whose value represents the total loss.
+ A `Tensor` whose value represents the total loss.
Raises:
ValueError: if `losses` is not iterable.
@@ -320,7 +320,7 @@ def absolute_difference(
weight: Deprecated alias for `weights`.
Returns:
- A scalar `Output` representing the loss value.
+ A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
@@ -364,7 +364,7 @@ def sigmoid_cross_entropy(
weight: Deprecated alias for `weights`.
Returns:
- A scalar `Output` representing the loss value.
+ A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of
@@ -413,7 +413,7 @@ def softmax_cross_entropy(
weight: Deprecated alias for `weights`.
Returns:
- A scalar `Output` representing the loss value.
+ A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
@@ -460,7 +460,7 @@ def sparse_softmax_cross_entropy(
weight: Deprecated alias for `weights`.
Returns:
- A scalar `Output` representing the loss value.
+ A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shapes of logits, labels, and weight are incompatible, or
@@ -506,7 +506,7 @@ def log_loss(
weight: Deprecated alias for `weights`.
Returns:
- A scalar `Output` representing the loss value.
+ A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
@@ -539,7 +539,7 @@ def hinge_loss(logits, labels=None, scope=None, target=None):
target: Deprecated alias for `labels`.
Returns:
- An `Output` of same shape as logits and target representing the loss values
+ A `Tensor` of same shape as logits and target representing the loss values
across the batch.
Raises:
@@ -583,7 +583,7 @@ def mean_squared_error(
weight: Deprecated alias for `weights`.
Returns:
- A scalar `Output` representing the loss value.
+ A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
@@ -642,7 +642,7 @@ def mean_pairwise_squared_error(
weight: Deprecated alias for `weights`.
Returns:
- A scalar `Output` representing the loss value.
+ A scalar `Tensor` representing the loss value.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
@@ -704,7 +704,7 @@ def cosine_distance(
Args:
predictions: An arbitrary matrix.
- labels: An `Output` whose shape matches 'predictions'
+ labels: A `Tensor` whose shape matches 'predictions'
dim: The dimension along which the cosine distance is computed.
weights: Coefficients for the loss a scalar, a tensor of shape
[batch_size] or a tensor whose shape matches `predictions`.
@@ -713,7 +713,7 @@ def cosine_distance(
weight: Deprecated alias for `weights`.
Returns:
- A scalar `Output` representing the loss value.
+ A scalar `Tensor` representing the loss value.
Raises:
ValueError: If `predictions` shape doesn't match `labels` shape, or
diff --git a/tensorflow/contrib/metrics/__init__.py b/tensorflow/contrib/metrics/__init__.py
index 1cf60274bb..8bfa1f97d8 100644
--- a/tensorflow/contrib/metrics/__init__.py
+++ b/tensorflow/contrib/metrics/__init__.py
@@ -17,14 +17,14 @@
### API
This module provides functions for computing streaming metrics: metrics computed
-on dynamically valued `Output`s. Each metric declaration returns a
+on dynamically valued `Tensors`. Each metric declaration returns a
"value_tensor", an idempotent operation that returns the current value of the
metric, and an "update_op", an operation that accumulates the information
-from the current value of the `Output`s being measured as well as returns the
+from the current value of the `Tensors` being measured as well as returns the
value of the "value_tensor".
To use any of these metrics, one need only declare the metric, call `update_op`
-repeatedly to accumulate data over the desired number of `Output` values (often
+repeatedly to accumulate data over the desired number of `Tensor` values (often
each one is a single batch) and finally evaluate the value_tensor. For example,
to use the `streaming_mean`:
diff --git a/tensorflow/contrib/metrics/python/metrics/classification.py b/tensorflow/contrib/metrics/python/metrics/classification.py
index dc7be0ae04..014a0d7886 100644
--- a/tensorflow/contrib/metrics/python/metrics/classification.py
+++ b/tensorflow/contrib/metrics/python/metrics/classification.py
@@ -30,14 +30,14 @@ def accuracy(predictions, labels, weights=None):
"""Computes the percentage of times that predictions matches labels.
Args:
- predictions: the predicted values, an `Output` whose dtype and shape
+ predictions: the predicted values, a `Tensor` whose dtype and shape
matches 'labels'.
- labels: the ground truth values, an `Output` of any shape and
+ labels: the ground truth values, a `Tensor` of any shape and
bool, integer, or string dtype.
- weights: None or `Output` of float values to reweight the accuracy.
+ weights: None or `Tensor` of float values to reweight the accuracy.
Returns:
- Accuracy `Output`.
+ Accuracy `Tensor`.
Raises:
ValueError: if dtypes don't match or
diff --git a/tensorflow/contrib/metrics/python/ops/confusion_matrix_ops.py b/tensorflow/contrib/metrics/python/ops/confusion_matrix_ops.py
index 951a53ad78..dd57f0478b 100644
--- a/tensorflow/contrib/metrics/python/ops/confusion_matrix_ops.py
+++ b/tensorflow/contrib/metrics/python/ops/confusion_matrix_ops.py
@@ -71,7 +71,7 @@ def confusion_matrix(predictions, labels, num_classes=None, dtype=dtypes.int32,
using both predictions and labels array.
dtype: Data type of the confusion matrix.
name: Scope name.
- weights: An optional `Output` whose shape matches `predictions`.
+ weights: An optional `Tensor` whose shape matches `predictions`.
Returns:
A k X k matrix representing the confusion matrix, where k is the number of
diff --git a/tensorflow/contrib/metrics/python/ops/histogram_ops.py b/tensorflow/contrib/metrics/python/ops/histogram_ops.py
index b8be5d5be5..cadace37a0 100644
--- a/tensorflow/contrib/metrics/python/ops/histogram_ops.py
+++ b/tensorflow/contrib/metrics/python/ops/histogram_ops.py
@@ -57,10 +57,10 @@ def auc_using_histogram(boolean_labels,
numbers of bins and comparing results.
Args:
- boolean_labels: 1-D boolean `Output`. Entry is `True` if the corresponding
+ boolean_labels: 1-D boolean `Tensor`. Entry is `True` if the corresponding
record is in class.
- scores: 1-D numeric `Output`, same shape as boolean_labels.
- score_range: `Output` of shape `[2]`, same dtype as `scores`. The min/max
+ scores: 1-D numeric `Tensor`, same shape as boolean_labels.
+ score_range: `Tensor` of shape `[2]`, same dtype as `scores`. The min/max
values of score that we expect. Scores outside range will be clipped.
nbins: Integer number of bins to use. Accuracy strictly increases as the
number of bins increases.
@@ -71,7 +71,7 @@ def auc_using_histogram(boolean_labels,
name: A name for this Op. Defaults to "auc_using_histogram".
Returns:
- auc: `float32` scalar `Output`. Fetching this converts internal histograms
+ auc: `float32` scalar `Tensor`. Fetching this converts internal histograms
to auc value.
update_op: `Op`, when run, updates internal histograms.
"""
@@ -179,14 +179,14 @@ def _auc_convert_hist_to_auc(hist_true_acc, hist_false_acc, nbins):
"""Convert histograms to auc.
Args:
- hist_true_acc: `Output` holding accumulated histogram of scores for records
+ hist_true_acc: `Tensor` holding accumulated histogram of scores for records
that were `True`.
- hist_false_acc: `Output` holding accumulated histogram of scores for
+ hist_false_acc: `Tensor` holding accumulated histogram of scores for
records that were `False`.
nbins: Integer number of bins in the histograms.
Returns:
- Scalar `Output` estimating AUC.
+ Scalar `Tensor` estimating AUC.
"""
# Note that this follows the "Approximating AUC" section in:
# Efficient AUC learning curve calculation, R. R. Bouckaert,
diff --git a/tensorflow/contrib/metrics/python/ops/metric_ops.py b/tensorflow/contrib/metrics/python/ops/metric_ops.py
index e6beae6f87..90b56b6a97 100644
--- a/tensorflow/contrib/metrics/python/ops/metric_ops.py
+++ b/tensorflow/contrib/metrics/python/ops/metric_ops.py
@@ -45,8 +45,8 @@ def _safe_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is <= 0.
Args:
- numerator: A real `Output`.
- denominator: A real `Output`, with dtype matching `numerator`.
+ numerator: A real `Tensor`.
+ denominator: A real `Tensor`, with dtype matching `numerator`.
name: Name for the returned op.
Returns:
@@ -63,8 +63,8 @@ def _safe_scalar_div(numerator, denominator, name):
"""Divides two values, returning 0 if the denominator is 0.
Args:
- numerator: A scalar `float64` `Output`.
- denominator: A scalar `float64` `Output`.
+ numerator: A scalar `float64` `Tensor`.
+ denominator: A scalar `float64` `Tensor`.
name: Name for the returned op.
Returns:
@@ -112,8 +112,8 @@ def _count_condition(values, weights=None, metrics_collections=None,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- values: A `bool` `Output` of arbitrary size.
- weights: An optional `Output` whose shape is broadcastable to `values`.
+ values: A `bool` `Tensor` of arbitrary size.
+ weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
@@ -157,11 +157,11 @@ def _streaming_true_positives(predictions, labels, weights=None,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: The predicted values, a `bool` `Output` of arbitrary
+ predictions: The predicted values, a `bool` `Tensor` of arbitrary
dimensions.
- labels: The ground truth values, a `bool` `Output` whose dimensions must
+ labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
@@ -197,11 +197,11 @@ def _streaming_false_positives(predictions, labels, weights=None,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: The predicted values, a `bool` `Output` of arbitrary
+ predictions: The predicted values, a `bool` `Tensor` of arbitrary
dimensions.
- labels: The ground truth values, a `bool` `Output` whose dimensions must
+ labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
@@ -237,11 +237,11 @@ def _streaming_false_negatives(predictions, labels, weights=None,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: The predicted values, a `bool` `Output` of arbitrary
+ predictions: The predicted values, a `bool` `Tensor` of arbitrary
dimensions.
- labels: The ground truth values, a `bool` `Output` whose dimensions must
+ labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
@@ -276,8 +276,8 @@ def _broadcast_weights(weights, values):
`reduce_sum(w * v) / reduce_sum(_broadcast_weights(w, v))`.
Args:
- weights: `Output` whose shape is broadcastable to `values`.
- values: `Output` of any shape.
+ weights: `Tensor` whose shape is broadcastable to `values`.
+ values: `Tensor` of any shape.
Returns:
`weights` broadcast to `values` shape.
@@ -309,8 +309,8 @@ def streaming_mean(values, weights=None, metrics_collections=None,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- values: An `Output` of arbitrary dimensions.
- weights: An optional `Output` whose shape is broadcastable to `values`.
+ values: A `Tensor` of arbitrary dimensions.
+ weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
@@ -378,8 +378,8 @@ def streaming_mean_tensor(values, weights=None, metrics_collections=None,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- values: An `Output` of arbitrary dimensions.
- weights: An optional `Output` whose shape is broadcastable to `values`.
+ values: A `Tensor` of arbitrary dimensions.
+ weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that `mean`
should be added to.
updates_collections: An optional list of collections that `update_op`
@@ -441,7 +441,7 @@ def streaming_accuracy(predictions, labels, weights=None,
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the `accuracy`.
- Internally, an `is_correct` operation computes an `Output` with elements 1.0
+ Internally, an `is_correct` operation computes a `Tensor` with elements 1.0
where the corresponding elements of `predictions` and `labels` match and 0.0
otherwise. Then `update_op` increments `total` with the reduced sum of the
product of `weights` and `is_correct`, and it increments `count` with the
@@ -450,10 +450,10 @@ def streaming_accuracy(predictions, labels, weights=None,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: The predicted values, an `Output` of any shape.
- labels: The ground truth values, an `Output` whose shape matches
+ predictions: The predicted values, a `Tensor` of any shape.
+ labels: The ground truth values, a `Tensor` whose shape matches
`predictions`.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `accuracy` should
be added to.
updates_collections: An optional list of collections that `update_op` should
@@ -501,10 +501,10 @@ def streaming_precision(predictions, labels, weights=None,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: The predicted values, a `bool` `Output` of arbitrary shape.
- labels: The ground truth values, a `bool` `Output` whose dimensions must
+ predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
+ labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `precision` should
be added to.
updates_collections: An optional list of collections that `update_op` should
@@ -512,7 +512,7 @@ def streaming_precision(predictions, labels, weights=None,
name: An optional variable_scope name.
Returns:
- precision: Scalar float `Output` with the value of `true_positives`
+ precision: Scalar float `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately and whose value matches
@@ -576,10 +576,10 @@ def streaming_recall(predictions, labels, weights=None,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: The predicted values, a `bool` `Output` of arbitrary shape.
- labels: The ground truth values, a `bool` `Output` whose dimensions must
+ predictions: The predicted values, a `bool` `Tensor` of arbitrary shape.
+ labels: The ground truth values, a `bool` `Tensor` whose dimensions must
match `predictions`.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `recall` should
be added to.
updates_collections: An optional list of collections that `update_op` should
@@ -587,7 +587,7 @@ def streaming_recall(predictions, labels, weights=None,
name: An optional variable_scope name.
Returns:
- recall: Scalar float `Output` with the value of `true_positives` divided
+ recall: Scalar float `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately and whose value matches
@@ -653,12 +653,12 @@ def _tp_fn_tn_fp(predictions, labels, thresholds, weights=None):
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: A floating point `Output` of arbitrary shape and whose values
+ predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
- labels: An `Output` whose shape matches `predictions`. `labels` will be cast
+ labels: A `Tensor` whose shape matches `predictions`. `labels` will be cast
to `bool`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
Returns:
true_positive: A variable of shape [len(thresholds)].
@@ -776,10 +776,10 @@ def streaming_auc(predictions, labels, weights=None, num_thresholds=200,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: A floating point `Output` of arbitrary shape and whose values
+ predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
- labels: A `bool` `Output` whose shape matches `predictions`.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ labels: A `bool` `Tensor` whose shape matches `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
num_thresholds: The number of thresholds to use when discretizing the roc
curve.
metrics_collections: An optional list of collections that `auc` should be
@@ -870,11 +870,11 @@ def streaming_specificity_at_sensitivity(
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
- predictions: A floating point `Output` of arbitrary shape and whose values
+ predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
- labels: A `bool` `Output` whose shape matches `predictions`.
+ labels: A `bool` `Tensor` whose shape matches `predictions`.
sensitivity: A scalar value in range `[0, 1]`.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
num_thresholds: The number of thresholds to use for matching the given
sensitivity.
metrics_collections: An optional list of collections that `specificity`
@@ -974,11 +974,11 @@ def streaming_sensitivity_at_specificity(
following: https://en.wikipedia.org/wiki/Sensitivity_and_specificity
Args:
- predictions: A floating point `Output` of arbitrary shape and whose values
+ predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
- labels: A `bool` `Output` whose shape matches `predictions`.
+ labels: A `bool` `Tensor` whose shape matches `predictions`.
specificity: A scalar value in range `[0, 1]`.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
num_thresholds: The number of thresholds to use for matching the given
specificity.
metrics_collections: An optional list of collections that `sensitivity`
@@ -1059,11 +1059,11 @@ def streaming_precision_at_thresholds(predictions, labels, thresholds,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: A floating point `Output` of arbitrary shape and whose values
+ predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
- labels: A `bool` `Output` whose shape matches `predictions`.
+ labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `auc` should be
added to.
updates_collections: An optional list of collections that `update_op` should
@@ -1131,11 +1131,11 @@ def streaming_recall_at_thresholds(predictions, labels, thresholds,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: A floating point `Output` of arbitrary shape and whose values
+ predictions: A floating point `Tensor` of arbitrary shape and whose values
are in the range `[0, 1]`.
- labels: A `bool` `Output` whose shape matches `predictions`.
+ labels: A `bool` `Tensor` whose shape matches `predictions`.
thresholds: A python list or tuple of float thresholds in `[0, 1]`.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `recall` should be
added to.
updates_collections: An optional list of collections that `update_op` should
@@ -1206,7 +1206,7 @@ def streaming_recall_at_k(predictions, labels, k, weights=None,
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
- `recall_at_<k>`. Internally, an `in_top_k` operation computes an `Output` with
+ `recall_at_<k>`. Internally, an `in_top_k` operation computes a `Tensor` with
shape [batch_size] whose elements indicate whether or not the corresponding
label is in the top `k` `predictions`. Then `update_op` increments `total`
with the reduced sum of `weights` where `in_top_k` is `True`, and it
@@ -1219,7 +1219,7 @@ def streaming_recall_at_k(predictions, labels, k, weights=None,
labels: A tensor of dimension [batch_size] whose type is in `int32`,
`int64`.
k: The number of top elements to look at for computing recall.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `recall_at_k`
should be added to.
updates_collections: An optional list of collections `update_op` should be
@@ -1273,7 +1273,7 @@ def streaming_sparse_recall_at_k(predictions,
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
- `recall_at_<k>`. Internally, a `top_k` operation computes an `Output`
+ `recall_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false negatives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
@@ -1282,11 +1282,11 @@ def streaming_sparse_recall_at_k(predictions,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: Float `Output` with shape [D1, ... DN, num_classes] where
+ predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
- labels: `int64` `Output` or `SparseTensor` with shape
+ labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match `predictions`.
@@ -1297,7 +1297,7 @@ def streaming_sparse_recall_at_k(predictions,
class_id: Integer class ID for which we want binary metrics. This should be
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If class_id is outside this range, the method returns NAN.
- weights: An optional `Output` whose shape is broadcastable to the the first
+ weights: An optional `Tensor` whose shape is broadcastable to the the first
[D1, ... DN] dimensions of `predictions` and `labels`.
metrics_collections: An optional list of collections that values should
be added to.
@@ -1306,7 +1306,7 @@ def streaming_sparse_recall_at_k(predictions,
name: Name of new update operation, and namespace for other dependent ops.
Returns:
- recall: Scalar `float64` `Output` with the value of `true_positives` divided
+ recall: Scalar `float64` `Tensor` with the value of `true_positives` divided
by the sum of `true_positives` and `false_negatives`.
update_op: `Operation` that increments `true_positives` and
`false_negatives` variables appropriately, and whose value matches
@@ -1352,11 +1352,11 @@ def _streaming_sparse_precision_at_k(top_k_idx,
streaming_sparse_precision_at_top_k. Refer to those methods for more details.
Args:
- top_k_idx: Integer `Output` with shape [D1, ... DN, k] where
+ top_k_idx: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_idx has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
- labels: `int64` `Output` or `SparseTensor` with shape
+ labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
@@ -1368,7 +1368,7 @@ def _streaming_sparse_precision_at_k(top_k_idx,
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
- weights: An optional `Output` whose shape is broadcastable to the the first
+ weights: An optional `Tensor` whose shape is broadcastable to the the first
[D1, ... DN] dimensions of `predictions` and `labels`.
metrics_collections: An optional list of collections that values should
be added to.
@@ -1377,7 +1377,7 @@ def _streaming_sparse_precision_at_k(top_k_idx,
name: Name of the metric and of the enclosing scope.
Returns:
- precision: Scalar `float64` `Output` with the value of `true_positives`
+ precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
@@ -1434,7 +1434,7 @@ def streaming_sparse_precision_at_k(predictions,
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
- `precision_at_<k>`. Internally, a `top_k` operation computes an `Output`
+ `precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
@@ -1443,11 +1443,11 @@ def streaming_sparse_precision_at_k(predictions,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: Float `Output` with shape [D1, ... DN, num_classes] where
+ predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and predictions has shape [batch size, num_classes].
The final dimension contains the logit values for each class. [D1, ... DN]
must match `labels`.
- labels: `int64` `Output` or `SparseTensor` with shape
+ labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
@@ -1459,7 +1459,7 @@ def streaming_sparse_precision_at_k(predictions,
in range [0, num_classes], where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
- weights: An optional `Output` whose shape is broadcastable to the the first
+ weights: An optional `Tensor` whose shape is broadcastable to the the first
[D1, ... DN] dimensions of `predictions` and `labels`.
metrics_collections: An optional list of collections that values should
be added to.
@@ -1468,7 +1468,7 @@ def streaming_sparse_precision_at_k(predictions,
name: Name of new update operation, and namespace for other dependent ops.
Returns:
- precision: Scalar `float64` `Output` with the value of `true_positives`
+ precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
@@ -1528,11 +1528,11 @@ def streaming_sparse_precision_at_top_k(top_k_predictions,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- top_k_predictions: Integer `Output` with shape [D1, ... DN, k] where
+ top_k_predictions: Integer `Tensor` with shape [D1, ... DN, k] where
N >= 1. Commonly, N=1 and top_k_predictions has shape [batch size, k].
The final dimension contains the indices of top-k labels. [D1, ... DN]
must match `labels`.
- labels: `int64` `Output` or `SparseTensor` with shape
+ labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
@@ -1543,7 +1543,7 @@ def streaming_sparse_precision_at_top_k(top_k_predictions,
in range [0, num_classes), where num_classes is the last dimension of
`predictions`. If `class_id` is outside this range, the method returns
NAN.
- weights: An optional `Output` whose shape is broadcastable to the the first
+ weights: An optional `Tensor` whose shape is broadcastable to the the first
[D1, ... DN] dimensions of `predictions` and `labels`.
metrics_collections: An optional list of collections that values should
be added to.
@@ -1552,7 +1552,7 @@ def streaming_sparse_precision_at_top_k(top_k_predictions,
name: Name of new update operation, and namespace for other dependent ops.
Returns:
- precision: Scalar `float64` `Output` with the value of `true_positives`
+ precision: Scalar `float64` `Tensor` with the value of `true_positives`
divided by the sum of `true_positives` and `false_positives`.
update_op: `Operation` that increments `true_positives` and
`false_positives` variables appropriately, and whose value matches
@@ -1590,14 +1590,14 @@ def num_relevant(labels, k):
`num_labels` and `k`.
Args:
- labels: `int64` `Output` or `SparseTensor` with shape
+ labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels].
k: Integer, k for @k metric.
Returns:
- Integer `Output` of shape [D1, ... DN], where each value is the number of
+ Integer `Tensor` of shape [D1, ... DN], where each value is the number of
relevant values for that row.
Raises:
@@ -1627,13 +1627,13 @@ def expand_and_tile(tensor, multiple, dim=0, name=None):
tiled `multiple` times along the new dimension.
Args:
- tensor: Input `Output` or `SparseTensor`.
+ tensor: Input `Tensor` or `SparseTensor`.
multiple: Integer, number of times to tile.
dim: Integer, dimension along which to tile.
name: Name of operation.
Returns:
- `Output` result of expanding and tiling `tensor`.
+ `Tensor` result of expanding and tiling `tensor`.
Raises:
ValueError: if `multiple` is less than 1, or `dim` is not in
@@ -1683,20 +1683,20 @@ def sparse_average_precision_at_k(predictions, labels, k):
AveP = sum_{i=1...k} P_{i} * rel_{i} / num_relevant_items
A "row" is the elements in dimension [D1, ... DN] of `predictions`, `labels`,
- and the result `Output`s. In the common case, this is [batch_size]. Each row
+ and the result `Tensors`. In the common case, this is [batch_size]. Each row
of the results contains the average precision for that row.
- Internally, a `top_k` operation computes an `Output` indicating the top `k`
+ Internally, a `top_k` operation computes a `Tensor` indicating the top `k`
`predictions`. Set operations applied to `top_k` and `labels` calculate the
true positives, which are used to calculate the precision ("P_{i}" term,
above).
Args:
- predictions: Float `Output` with shape [D1, ... DN, num_classes] where
+ predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
- labels: `int64` `Output` or `SparseTensor` with shape
+ labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
@@ -1707,7 +1707,7 @@ def sparse_average_precision_at_k(predictions, labels, k):
range `[1,k]`, as documented above.
Returns:
- `float64` `Output` of shape [D1, ... DN], where each value is the average
+ `float64` `Tensor` of shape [D1, ... DN], where each value is the average
precision for that row.
Raises:
@@ -1785,7 +1785,7 @@ def streaming_sparse_average_precision_at_k(predictions,
For estimation of the metric over a stream of data, the function creates an
`update_op` operation that updates these variables and returns the
- `precision_at_<k>`. Internally, a `top_k` operation computes an `Output`
+ `precision_at_<k>`. Internally, a `top_k` operation computes a `Tensor`
indicating the top `k` `predictions`. Set operations applied to `top_k` and
`labels` calculate the true positives and false positives weighted by
`weights`. Then `update_op` increments `true_positive_at_<k>` and
@@ -1794,11 +1794,11 @@ def streaming_sparse_average_precision_at_k(predictions,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: Float `Output` with shape [D1, ... DN, num_classes] where
+ predictions: Float `Tensor` with shape [D1, ... DN, num_classes] where
N >= 1. Commonly, N=1 and `predictions` has shape
[batch size, num_classes]. The final dimension contains the logit values
for each class. [D1, ... DN] must match `labels`.
- labels: `int64` `Output` or `SparseTensor` with shape
+ labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
@@ -1807,7 +1807,7 @@ def streaming_sparse_average_precision_at_k(predictions,
range are ignored.
k: Integer, k for @k metric. This will calculate an average precision for
range `[1,k]`, as documented above.
- weights: An optional `Output` whose shape is broadcastable to the the first
+ weights: An optional `Tensor` whose shape is broadcastable to the the first
[D1, ... DN] dimensions of `predictions` and `labels`.
metrics_collections: An optional list of collections that values should
be added to.
@@ -1816,7 +1816,7 @@ def streaming_sparse_average_precision_at_k(predictions,
name: Name of new update operation, and namespace for other dependent ops.
Returns:
- mean_average_precision: Scalar `float64` `Output` with the mean average
+ mean_average_precision: Scalar `float64` `Tensor` with the mean average
precision values.
update: `Operation` that increments variables appropriately, and whose
value matches `metric`.
@@ -1871,7 +1871,7 @@ def _select_class_id(ids, selected_id):
"""Filter all but `selected_id` out of `ids`.
Args:
- ids: `int64` `Output` or `SparseTensor` of IDs.
+ ids: `int64` `Tensor` or `SparseTensor` of IDs.
selected_id: Int id to select.
Returns:
@@ -1904,12 +1904,12 @@ def _maybe_select_class_id(labels, predictions_idx, selected_id=None):
"""If class ID is specified, filter all other classes.
Args:
- labels: `int64` `Output` or `SparseTensor` with shape
+ labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
- predictions_idx: `int64` `Output` of class IDs, with shape [D1, ... DN, k]
+ predictions_idx: `int64` `Tensor` of class IDs, with shape [D1, ... DN, k]
where N >= 1. Commonly, N=1 and `predictions_idx` has shape
[batch size, k].
selected_id: Int id to select.
@@ -1936,21 +1936,21 @@ def _sparse_true_positive_at_k(predictions_idx,
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
- predictions_idx: 1-D or higher `int64` `Output` with last dimension `k`,
+ predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
- labels: `int64` `Output` or `SparseTensor` with shape
+ labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
class_id: Class for which we want binary metrics.
- weights: `Output` whose shape is broadcastable to the the first [D1, ... DN]
+ weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN]
dimensions of `predictions_idx` and `labels`.
name: Name of operation.
Returns:
- A [D1, ... DN] `Output` of true positive counts.
+ A [D1, ... DN] `Tensor` of true positive counts.
"""
with ops.name_scope(name, 'true_positives', (predictions_idx, labels)):
labels, predictions_idx = _maybe_select_class_id(
@@ -1979,17 +1979,17 @@ def _streaming_sparse_true_positive_at_k(predictions_idx,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions_idx: 1-D or higher `int64` `Output` with last dimension `k`,
+ predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
- labels: `int64` `Output` or `SparseTensor` with shape
+ labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
- weights: `Output` whose shape is broadcastable to the the first [D1, ... DN]
+ weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN]
dimensions of `predictions_idx` and `labels`.
name: Name of new variable, and namespace for other dependent ops.
@@ -2023,20 +2023,20 @@ def _sparse_false_positive_at_k(predictions_idx,
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
- predictions_idx: 1-D or higher `int64` `Output` with last dimension `k`,
+ predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
- labels: `int64` `Output` or `SparseTensor` with shape
+ labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
class_id: Class for which we want binary metrics.
- weights: `Output` whose shape is broadcastable to the the first [D1, ... DN]
+ weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN]
dimensions of `predictions_idx` and `labels`.
Returns:
- A [D1, ... DN] `Output` of false positive counts.
+ A [D1, ... DN] `Tensor` of false positive counts.
"""
with ops.name_scope(None, 'false_positives', (predictions_idx, labels)):
labels, predictions_idx = _maybe_select_class_id(labels,
@@ -2067,17 +2067,17 @@ def _streaming_sparse_false_positive_at_k(predictions_idx,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions_idx: 1-D or higher `int64` `Output` with last dimension `k`,
+ predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
- labels: `int64` `Output` or `SparseTensor` with shape
+ labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
- weights: `Output` whose shape is broadcastable to the the first [D1, ... DN]
+ weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN]
dimensions of `predictions_idx` and `labels`.
name: Name of new variable, and namespace for other dependent ops.
@@ -2111,20 +2111,20 @@ def _sparse_false_negative_at_k(predictions_idx,
`n` label classes, where `n` is the 2nd dimension of `labels_sparse`.
Args:
- predictions_idx: 1-D or higher `int64` `Output` with last dimension `k`,
+ predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
- labels: `int64` `Output` or `SparseTensor` with shape
+ labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
class_id: Class for which we want binary metrics.
- weights: `Output` whose shape is broadcastable to the the first [D1, ... DN]
+ weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN]
dimensions of `predictions_idx` and `labels`.
Returns:
- A [D1, ... DN] `Output` of false negative counts.
+ A [D1, ... DN] `Tensor` of false negative counts.
"""
with ops.name_scope(None, 'false_negatives', (predictions_idx, labels)):
labels, predictions_idx = _maybe_select_class_id(labels,
@@ -2156,17 +2156,17 @@ def _streaming_sparse_false_negative_at_k(predictions_idx,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions_idx: 1-D or higher `int64` `Output` with last dimension `k`,
+ predictions_idx: 1-D or higher `int64` `Tensor` with last dimension `k`,
top `k` predicted classes. For rank `n`, the first `n-1` dimensions must
match `labels`.
- labels: `int64` `Output` or `SparseTensor` with shape
+ labels: `int64` `Tensor` or `SparseTensor` with shape
[D1, ... DN, num_labels], where N >= 1 and num_labels is the number of
target classes for the associated prediction. Commonly, N=1 and `labels`
has shape [batch_size, num_labels]. [D1, ... DN] must match
`predictions_idx`.
k: Integer, k for @k metric. This is only used for default op name.
class_id: Class for which we want binary metrics.
- weights: `Output` whose shape is broadcastable to the the first [D1, ... DN]
+ weights: `Tensor` whose shape is broadcastable to the the first [D1, ... DN]
dimensions of `predictions_idx` and `labels`.
name: Name of new variable, and namespace for other dependent ops.
@@ -2211,9 +2211,9 @@ def streaming_mean_absolute_error(predictions, labels, weights=None,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: An `Output` of arbitrary shape.
- labels: An `Output` of the same shape as `predictions`.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ predictions: A `Tensor` of arbitrary shape.
+ labels: A `Tensor` of the same shape as `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that
`mean_absolute_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
@@ -2263,10 +2263,10 @@ def streaming_mean_relative_error(predictions, labels, normalizer, weights=None,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: An `Output` of arbitrary shape.
- labels: An `Output` of the same shape as `predictions`.
- normalizer: An `Output` of the same shape as `predictions`.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ predictions: A `Tensor` of arbitrary shape.
+ labels: A `Tensor` of the same shape as `predictions`.
+ normalizer: A `Tensor` of the same shape as `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that
`mean_relative_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
@@ -2323,9 +2323,9 @@ def streaming_mean_squared_error(predictions, labels, weights=None,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: An `Output` of arbitrary shape.
- labels: An `Output` of the same shape as `predictions`.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ predictions: A `Tensor` of arbitrary shape.
+ labels: A `Tensor` of the same shape as `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that
`mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
@@ -2375,9 +2375,9 @@ def streaming_root_mean_squared_error(predictions, labels, weights=None,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: An `Output` of arbitrary shape.
- labels: An `Output` of the same shape as `predictions`.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ predictions: A `Tensor` of arbitrary shape.
+ labels: A `Tensor` of the same shape as `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that
`root_mean_squared_error` should be added to.
updates_collections: An optional list of collections that `update_op` should
@@ -2448,8 +2448,8 @@ def streaming_covariance(predictions,
variables and returns the updated covariance.
Args:
- predictions: An `Output` of arbitrary size.
- labels: An `Output` of the same size as `predictions`.
+ predictions: A `Tensor` of arbitrary size.
+ labels: A `Tensor` of the same size as `predictions`.
weights: An optional set of weights which indicates the frequency with which
an example is sampled. Must be broadcastable with `labels`.
metrics_collections: An optional list of collections that the metric
@@ -2459,7 +2459,7 @@ def streaming_covariance(predictions,
name: An optional variable_scope name.
Returns:
- covariance: An `Output` representing the current unbiased sample covariance,
+ covariance: A `Tensor` representing the current unbiased sample covariance,
`comoment` / (`count` - 1).
update_op: An operation that updates the local variables appropriately.
@@ -2569,8 +2569,8 @@ def streaming_pearson_correlation(predictions,
https://wikipedia.org/wiki/Weighted_arithmetic_mean#Weighted_sample_variance
Args:
- predictions: An `Output` of arbitrary size.
- labels: An `Output` of the same size as predictions.
+ predictions: A `Tensor` of arbitrary size.
+ labels: A `Tensor` of the same size as predictions.
weights: An optional set of weights which indicates the frequency with which
an example is sampled. Must be broadcastable with `labels`.
metrics_collections: An optional list of collections that the metric
@@ -2641,10 +2641,10 @@ def streaming_mean_cosine_distance(predictions, labels, dim, weights=None,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- predictions: An `Output` of the same shape as `labels`.
- labels: An `Output` of arbitrary shape.
+ predictions: A `Tensor` of the same shape as `labels`.
+ labels: A `Tensor` of arbitrary shape.
dim: The dimension along which the cosine distance is computed.
- weights: An optional `Output` whose shape is broadcastable to `predictions`,
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`,
and whose dimension `dim` is 1.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
@@ -2706,9 +2706,9 @@ def streaming_percentage_less(values, threshold, weights=None,
If `weights` is `None`, weights default to 1. Use weights of 0 to mask values.
Args:
- values: A numeric `Output` of arbitrary size.
+ values: A numeric `Tensor` of arbitrary size.
threshold: A scalar threshold.
- weights: An optional `Output` whose shape is broadcastable to `values`.
+ weights: An optional `Tensor` whose shape is broadcastable to `values`.
metrics_collections: An optional list of collections that the metric
value variable should be added to.
updates_collections: An optional list of collections that the metric update
@@ -2765,7 +2765,7 @@ def streaming_mean_iou(predictions,
num_classes: The possible number of labels the prediction task can
have. This value must be provided, since a confusion matrix of
dimension = [num_classes, num_classes] will be allocated.
- weights: An optional `Output` whose shape is broadcastable to `predictions`.
+ weights: An optional `Tensor` whose shape is broadcastable to `predictions`.
metrics_collections: An optional list of collections that `mean_iou`
should be added to.
updates_collections: An optional list of collections `update_op` should be
@@ -3033,8 +3033,8 @@ def _remove_squeezable_dimensions(predictions, labels, weights):
operations, which could result in a performance hit.
Args:
- predictions: Predicted values, an `Output` of arbitrary dimensions.
- labels: Label values, an `Output` whose dimensions match `predictions`.
+ predictions: Predicted values, a `Tensor` of arbitrary dimensions.
+ labels: Label values, a `Tensor` whose dimensions match `predictions`.
weights: optional `weights` tensor. It will be squeezed if its rank is 1
more than the new rank of `predictions`
diff --git a/tensorflow/contrib/metrics/python/ops/set_ops.py b/tensorflow/contrib/metrics/python/ops/set_ops.py
index 650af4a8c9..dd737a14c2 100644
--- a/tensorflow/contrib/metrics/python/ops/set_ops.py
+++ b/tensorflow/contrib/metrics/python/ops/set_ops.py
@@ -43,7 +43,7 @@ def set_size(a, validate_indices=True):
in `a`.
Returns:
- `int32` `Output` of set sizes. For `a` ranked `n`, this is an `Output` with
+ `int32` `Tensor` of set sizes. For `a` ranked `n`, this is a `Tensor` with
rank `n-1`, and the same 1st `n-1` dimensions as `a`. Each value is the
number of unique elements in the corresponding `[0...n-1]` dimension of `a`.
@@ -72,9 +72,9 @@ def _set_operation(a, b, set_operation, validate_indices=True):
All but the last dimension of `a` and `b` must match.
Args:
- a: `Output` or `SparseTensor` of the same type as `b`. If sparse, indices
+ a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
- b: `Output` or `SparseTensor` of the same type as `a`. Must be
+ b: `Tensor` or `SparseTensor` of the same type as `a`. Must be
`SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be
sorted in row-major order.
set_operation: String indicating set operaiton. See
@@ -122,9 +122,9 @@ def set_intersection(a, b, validate_indices=True):
All but the last dimension of `a` and `b` must match.
Args:
- a: `Output` or `SparseTensor` of the same type as `b`. If sparse, indices
+ a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
- b: `Output` or `SparseTensor` of the same type as `a`. Must be
+ b: `Tensor` or `SparseTensor` of the same type as `a`. Must be
`SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be
sorted in row-major order.
validate_indices: Whether to validate the order and range of sparse indices
@@ -144,9 +144,9 @@ def set_difference(a, b, aminusb=True, validate_indices=True):
All but the last dimension of `a` and `b` must match.
Args:
- a: `Output` or `SparseTensor` of the same type as `b`. If sparse, indices
+ a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
- b: `Output` or `SparseTensor` of the same type as `a`. Must be
+ b: `Tensor` or `SparseTensor` of the same type as `a`. Must be
`SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be
sorted in row-major order.
aminusb: Whether to subtract `b` from `a`, vs vice versa.
@@ -167,9 +167,9 @@ def set_union(a, b, validate_indices=True):
All but the last dimension of `a` and `b` must match.
Args:
- a: `Output` or `SparseTensor` of the same type as `b`. If sparse, indices
+ a: `Tensor` or `SparseTensor` of the same type as `b`. If sparse, indices
must be sorted in row-major order.
- b: `Output` or `SparseTensor` of the same type as `a`. Must be
+ b: `Tensor` or `SparseTensor` of the same type as `a`. Must be
`SparseTensor` if `a` is `SparseTensor`. If sparse, indices must be
sorted in row-major order.
validate_indices: Whether to validate the order and range of sparse indices
diff --git a/tensorflow/contrib/opt/python/training/external_optimizer.py b/tensorflow/contrib/opt/python/training/external_optimizer.py
index e600fc1723..de539a46e2 100644
--- a/tensorflow/contrib/opt/python/training/external_optimizer.py
+++ b/tensorflow/contrib/opt/python/training/external_optimizer.py
@@ -48,13 +48,13 @@ class ExternalOptimizerInterface(object):
"""Initialize a new interface instance.
Args:
- loss: A scalar `Output` to be minimized.
+ loss: A scalar `Tensor` to be minimized.
var_list: Optional list of `Variable` objects to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKeys.TRAINABLE_VARIABLES`.
- equalities: Optional list of equality constraint scalar `Output`s to be
+ equalities: Optional list of equality constraint scalar `Tensor`s to be
held equal to zero.
- inequalities: Optional list of inequality constraint scalar `Output`s
+ inequalities: Optional list of inequality constraint scalar `Tensor`s
to be kept nonnegative.
**optimizer_kwargs: Other subclass-specific keyword arguments.
"""
@@ -101,7 +101,7 @@ class ExternalOptimizerInterface(object):
def minimize(self, session=None, feed_dict=None, fetches=None,
step_callback=None, loss_callback=None):
- """Minimize a scalar `Output`.
+ """Minimize a scalar `Tensor`.
Variables subject to optimization are updated in-place at the end of
optimization.
@@ -113,7 +113,7 @@ class ExternalOptimizerInterface(object):
Args:
session: A `Session` instance.
feed_dict: A feed dict to be passed to calls to `session.run`.
- fetches: A list of `Output`s to fetch and supply to `loss_callback`
+ fetches: A list of `Tensor`s to fetch and supply to `loss_callback`
as positional arguments.
step_callback: A function to be called at each optimization step;
arguments are the current values of all optimization variables
@@ -196,7 +196,7 @@ class ExternalOptimizerInterface(object):
@classmethod
def _pack(cls, tensors):
- """Pack a list of `Output`s into a single, flattened, rank-1 `Output`."""
+ """Pack a list of `Tensor`s into a single, flattened, rank-1 `Tensor`."""
if not tensors:
return None
elif len(tensors) == 1:
@@ -207,13 +207,13 @@ class ExternalOptimizerInterface(object):
def _make_eval_func(self, tensors, session, feed_dict, fetches,
callback=None):
- """Construct a function that evaluates an `Output` or list of `Output`s."""
+ """Construct a function that evaluates a `Tensor` or list of `Tensor`s."""
if not isinstance(tensors, list):
tensors = [tensors]
num_tensors = len(tensors)
def eval_func(x):
- """Function to evaluate an `Output`."""
+ """Function to evaluate a `Tensor`."""
augmented_feed_dict = {
var: x[packing_slice].reshape(_get_shape_tuple(var))
for var, packing_slice in zip(self._vars, self._packing_slices)
diff --git a/tensorflow/contrib/rnn/python/ops/lstm_ops.py b/tensorflow/contrib/rnn/python/ops/lstm_ops.py
index 82d0486f7e..3e8998f117 100644
--- a/tensorflow/contrib/rnn/python/ops/lstm_ops.py
+++ b/tensorflow/contrib/rnn/python/ops/lstm_ops.py
@@ -76,19 +76,19 @@ def _lstm_block_cell(x,
```
Args:
- x: An `Output`. Must be one of the following types: `float32`.
+ x: A `Tensor`. Must be one of the following types: `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
- cs_prev: An `Output`. Must have the same type as `x`.
+ cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
- h_prev: An `Output`. Must have the same type as `x`.
+ h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
- w: An `Output`. Must have the same type as `x`. The weight matrix.
- b: An `Output`. Must have the same type as `x`. The bias vector.
- wci: An `Output`. Must have the same type as `x`.
+ w: A `Tensor`. Must have the same type as `x`. The weight matrix.
+ b: A `Tensor`. Must have the same type as `x`. The bias vector.
+ wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
- wcf: An `Output`. Must have the same type as `x`.
+ wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
- wco: An `Output`. Must have the same type as `x`.
+ wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `3`.
@@ -98,14 +98,14 @@ def _lstm_block_cell(x,
name: A name for the operation (optional).
Returns:
- A tuple of `Output` objects (i, cs, f, o, ci, co, h).
- i: An `Output`. Has the same type as `x`. The input gate.
- cs: An `Output`. Has the same type as `x`. The cell state before the tanh.
- f: An `Output`. Has the same type as `x`. The forget gate.
- o: An `Output`. Has the same type as `x`. The output gate.
- ci: An `Output`. Has the same type as `x`. The cell input.
- co: An `Output`. Has the same type as `x`. The cell after the tanh.
- h: An `Output`. Has the same type as `x`. The output h vector.
+ A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
+ i: A `Tensor`. Has the same type as `x`. The input gate.
+ cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
+ f: A `Tensor`. Has the same type as `x`. The forget gate.
+ o: A `Tensor`. Has the same type as `x`. The output gate.
+ ci: A `Tensor`. Has the same type as `x`. The cell input.
+ co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
+ h: A `Tensor`. Has the same type as `x`. The output h vector.
Raises:
ValueError: If cell_size is None.
@@ -151,35 +151,35 @@ def _block_lstm(seq_len_max,
r"""TODO(williamchan): add doc.
Args:
- seq_len_max: An `Output` of type `int64`.
- x: A list of at least 1 `Output` objects of the same type in: `float32`.
- w: An `Output`. Must have the same type as `x`.
- b: An `Output`. Must have the same type as `x`.
- cs_prev: An `Output`. Must have the same type as `x`.
- h_prev: An `Output`. Must have the same type as `x`.
- wci: An `Output`. Must have the same type as `x`.
- wcf: An `Output`. Must have the same type as `x`.
- wco: An `Output`. Must have the same type as `x`.
+ seq_len_max: A `Tensor` of type `int64`.
+ x: A list of at least 1 `Tensor` objects of the same type in: `float32`.
+ w: A `Tensor`. Must have the same type as `x`.
+ b: A `Tensor`. Must have the same type as `x`.
+ cs_prev: A `Tensor`. Must have the same type as `x`.
+ h_prev: A `Tensor`. Must have the same type as `x`.
+ wci: A `Tensor`. Must have the same type as `x`.
+ wcf: A `Tensor`. Must have the same type as `x`.
+ wco: A `Tensor`. Must have the same type as `x`.
forget_bias: An optional `float`. Defaults to `1`.
cell_clip: An optional `float`. Defaults to `3`.
use_peephole: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
- A tuple of `Output` objects (i, cs, f, o, ci, co, h).
- i: A list with the same number of `Output` objects as `x` of `Output`
+ A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
+ i: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
- cs: A list with the same number of `Output` objects as `x` of `Output`
+ cs: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
- f: A list with the same number of `Output` objects as `x` of `Output`
+ f: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
- o: A list with the same number of `Output` objects as `x` of `Output`
+ o: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
- ci: A list with the same number of `Output` objects as `x` of `Output`
+ ci: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
- co: A list with the same number of `Output` objects as `x` of `Output`
+ co: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
- h: A list with the same number of `Output` objects as `x` of `Output`
+ h: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
Raises:
diff --git a/tensorflow/contrib/rnn/python/ops/rnn.py b/tensorflow/contrib/rnn/python/ops/rnn.py
index 214c73c124..d4df308042 100644
--- a/tensorflow/contrib/rnn/python/ops/rnn.py
+++ b/tensorflow/contrib/rnn/python/ops/rnn.py
@@ -157,7 +157,7 @@ def stack_bidirectional_dynamic_rnn(cells_fw,
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
- outputs: Output `Output` shaped:
+ outputs: Output `Tensor` shaped:
`batch_size, max_time, layers_output]`. Where layers_output
are depth-concatenated forward and backward outputs.
output_states_fw is the final states, one tensor per layer,
diff --git a/tensorflow/contrib/slim/python/slim/data/data_decoder.py b/tensorflow/contrib/slim/python/slim/data/data_decoder.py
index f09c2b43ec..2fa5db0153 100644
--- a/tensorflow/contrib/slim/python/slim/data/data_decoder.py
+++ b/tensorflow/contrib/slim/python/slim/data/data_decoder.py
@@ -54,8 +54,8 @@ class DataDecoder(object):
items: A list of strings, each of which indicate a particular data type.
Returns:
- A list of `Output`s, whose length matches the length of `items`, where
- each `Output` corresponds to each item.
+ A list of `Tensors`, whose length matches the length of `items`, where
+ each `Tensor` corresponds to each item.
Raises:
ValueError: If any of the items cannot be satisfied.
diff --git a/tensorflow/contrib/slim/python/slim/data/prefetch_queue.py b/tensorflow/contrib/slim/python/slim/data/prefetch_queue.py
index d3ad3bffa5..9cfd598b8d 100644
--- a/tensorflow/contrib/slim/python/slim/data/prefetch_queue.py
+++ b/tensorflow/contrib/slim/python/slim/data/prefetch_queue.py
@@ -46,7 +46,7 @@ def prefetch_queue(tensors,
loss = Loss(logits, labels)
Args:
- tensors: A list or dictionary of `Output`s to enqueue in the buffer.
+ tensors: A list or dictionary of `Tensors` to enqueue in the buffer.
capacity: An integer. The maximum number of elements in the queue.
shared_name: (optional). If set, this queue will be shared under the given
name across multiple sessions.
diff --git a/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py b/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py
index b952040c30..e0273096a0 100644
--- a/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py
+++ b/tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py
@@ -149,11 +149,11 @@ class Tensor(ItemHandler):
Tensors are, by default, returned without any reshaping. However, there are
two mechanisms which allow reshaping to occur at load time. If `shape_keys`
- is provided, both the `Output` corresponding to `tensor_key` and
- `shape_keys` is loaded and the former `Output` is reshaped with the values
- of the latter. Alternatively, if a fixed `shape` is provided, the `Output`
+ is provided, both the `Tensor` corresponding to `tensor_key` and
+ `shape_keys` is loaded and the former `Tensor` is reshaped with the values
+ of the latter. Alternatively, if a fixed `shape` is provided, the `Tensor`
corresponding to `tensor_key` is loaded and reshape appropriately.
- If neither `shape_keys` nor `shape` are provided, the `Output` will be
+ If neither `shape_keys` nor `shape` are provided, the `Tensor` will be
returned without any reshaping.
Args:
@@ -161,7 +161,7 @@ class Tensor(ItemHandler):
shape_keys: Optional name or list of names of the TF-Example feature in
which the tensor shape is stored. If a list, then each corresponds to
one dimension of the shape.
- shape: Optional output shape of the `Output`. If provided, the `Output` is
+ shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is
reshaped accordingly.
default_value: The value used when the `tensor_key` is not found in a
particular `TFExample`.
diff --git a/tensorflow/contrib/slim/python/slim/learning.py b/tensorflow/contrib/slim/python/slim/learning.py
index 9000c43229..6d8446bf88 100644
--- a/tensorflow/contrib/slim/python/slim/learning.py
+++ b/tensorflow/contrib/slim/python/slim/learning.py
@@ -386,9 +386,9 @@ def create_train_op(
"""Creates an `Operation` that evaluates the gradients and returns the loss.
Args:
- total_loss: An `Output` representing the total loss.
+ total_loss: A `Tensor` representing the total loss.
optimizer: A tf.Optimizer to use for computing the gradients.
- global_step: An `Output` representing the global step variable. If left as
+ global_step: A `Tensor` representing the global step variable. If left as
`None`, then slim.variables.global_step() is used.
update_ops: An optional list of updates to execute. If `update_ops` is
`None`, then the update ops are set to the contents of the
@@ -409,8 +409,8 @@ def create_train_op(
names to the coefficient by which the associated gradient should be
scaled.
Returns:
- An `Output` that when evaluated, computes the gradients and returns the
- total loss value.
+ A `Tensor` that when evaluated, computes the gradients and returns the total
+ loss value.
"""
if global_step is None:
global_step = variables.get_or_create_global_step()
@@ -503,7 +503,7 @@ def train_step(sess, train_op, global_step, train_step_kwargs):
sess: The current session.
train_op: An `Operation` that evaluates the gradients and returns the
total loss.
- global_step: An `Output` representing the global training step.
+ global_step: A `Tensor` representing the global training step.
train_step_kwargs: A dictionary of keyword arguments.
Returns:
@@ -600,13 +600,13 @@ def train(train_op,
synchronously. Otherwise, gradient updates are applied asynchronous.
Args:
- train_op: An `Output` that, when executed, will apply the gradients and
+ train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where training logs are written to. If None, model
checkpoints and summaries will not be written.
train_step_fn: The function to call in order to execute a single gradient
step. The function must have take exactly four arguments: the current
- session, the `train_op` `Output`, a global step `Output` and a dictionary.
+ session, the `train_op` `Tensor`, a global step `Tensor` and a dictionary.
train_step_kwargs: A dictionary which is passed to the `train_step_fn`. By
default, two `Boolean`, scalar ops called "should_stop" and "should_log"
are provided.
@@ -617,7 +617,7 @@ def train(train_op,
master: The address of the tensorflow master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
- global_step: The `Output` representing the global step. If left as `None`,
+ global_step: The `Tensor` representing the global step. If left as `None`,
then slim.variables.get_or_create_global_step() is used.
number_of_steps: The max number of gradient steps to take during training.
If the value is left as None, training proceeds indefinitely.
diff --git a/tensorflow/contrib/slim/python/slim/nets/resnet_utils.py b/tensorflow/contrib/slim/python/slim/nets/resnet_utils.py
index 2af5a753ba..de8c2effc2 100644
--- a/tensorflow/contrib/slim/python/slim/nets/resnet_utils.py
+++ b/tensorflow/contrib/slim/python/slim/nets/resnet_utils.py
@@ -48,8 +48,8 @@ class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])):
Its parts are:
scope: The scope of the `Block`.
- unit_fn: The ResNet unit function which takes as input an `Output` and
- returns another `Output` with the output of the ResNet unit.
+ unit_fn: The ResNet unit function which takes as input a `Tensor` and
+ returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list
contains one (depth, depth_bottleneck, stride) tuple for each unit in the
block to serve as argument to unit_fn.
@@ -60,13 +60,13 @@ def subsample(inputs, factor, scope=None):
"""Subsamples the input along the spatial dimensions.
Args:
- inputs: An `Output` of size [batch, height_in, width_in, channels].
+ inputs: A `Tensor` of size [batch, height_in, width_in, channels].
factor: The subsampling factor.
scope: Optional variable_scope.
Returns:
- output: An `Output` of size [batch, height_out, width_out, channels] with
- the input, either intact (if factor == 1) or subsampled (if factor > 1).
+ output: A `Tensor` of size [batch, height_out, width_out, channels] with the
+ input, either intact (if factor == 1) or subsampled (if factor > 1).
"""
if factor == 1:
return inputs
@@ -144,7 +144,7 @@ def stack_blocks_dense(net, blocks, output_stride=None,
Control of the output feature density is implemented by atrous convolution.
Args:
- net: An `Output` of size [batch, height, width, channels].
+ net: A `Tensor` of size [batch, height, width, channels].
blocks: A list of length equal to the number of ResNet `Blocks`. Each
element is a ResNet `Block` object describing the units in the `Block`.
output_stride: If `None`, then the output will be computed at the nominal
diff --git a/tensorflow/contrib/slim/python/slim/nets/resnet_v1_test.py b/tensorflow/contrib/slim/python/slim/nets/resnet_v1_test.py
index bc6b3d6347..a53694ac13 100644
--- a/tensorflow/contrib/slim/python/slim/nets/resnet_v1_test.py
+++ b/tensorflow/contrib/slim/python/slim/nets/resnet_v1_test.py
@@ -37,9 +37,9 @@ def create_test_input(batch_size, height, width, channels):
channels: The number of channels per image or `None` if unknown.
Returns:
- Either a placeholder `Output` of dimension
+ Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
- constant `Output` with the mesh grid values along the spatial dimensions.
+ constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
diff --git a/tensorflow/contrib/slim/python/slim/nets/resnet_v2_test.py b/tensorflow/contrib/slim/python/slim/nets/resnet_v2_test.py
index 54cf328f00..2c23471633 100644
--- a/tensorflow/contrib/slim/python/slim/nets/resnet_v2_test.py
+++ b/tensorflow/contrib/slim/python/slim/nets/resnet_v2_test.py
@@ -37,9 +37,9 @@ def create_test_input(batch_size, height, width, channels):
channels: The number of channels per image or `None` if unknown.
Returns:
- Either a placeholder `Output` of dimension
+ Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
- constant `Output` with the mesh grid values along the spatial dimensions.
+ constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
diff --git a/tensorflow/contrib/solvers/python/ops/lanczos.py b/tensorflow/contrib/solvers/python/ops/lanczos.py
index 0a6c17eea2..2dfc9763a3 100644
--- a/tensorflow/contrib/solvers/python/ops/lanczos.py
+++ b/tensorflow/contrib/solvers/python/ops/lanczos.py
@@ -46,7 +46,7 @@ def lanczos_bidiag(operator,
Args:
operator: An object representing a linear operator with attributes:
- - shape: Either a list of integers or a 1-D `Output` of type `int32` of
+ - shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
length 2. `shape[0]` is the dimension on the domain of the operator,
`shape[1]` is the dimension of the co-domain of the operator. On other
words, if operator represents an M x N matrix A, `shape` must contain
@@ -65,20 +65,20 @@ def lanczos_bidiag(operator,
may terminate before `k` steps have been run.
orthogonalize: If `True`, perform full orthogonalization. If `False` no
orthogonalization is performed.
- starting_vector: If not null, must be an `Output` of shape `[n]`.
+ starting_vector: If not null, must be a `Tensor` of shape `[n]`.
name: A name scope for the operation.
Returns:
output: A namedtuple representing a Lanczos bidiagonalization of
`operator` with attributes:
- u: A rank-2 `Output` of type `operator.dtype` and shape
+ u: A rank-2 `Tensor` of type `operator.dtype` and shape
`[operator.shape[0], k_actual+1]`, where `k_actual` is the number of
steps run.
- v: A rank-2 `Output` of type `operator.dtype` and shape
+ v: A rank-2 `Tensor` of type `operator.dtype` and shape
`[operator.shape[1], k_actual]`, where `k_actual` is the number of steps
run.
- alpha: A rank-1 `Output` of type `operator.dtype` and shape `[k]`.
- beta: A rank-1 `Output` of type `operator.dtype` and shape `[k]`.
+ alpha: A rank-1 `Tensor` of type `operator.dtype` and shape `[k]`.
+ beta: A rank-1 `Tensor` of type `operator.dtype` and shape `[k]`.
"""
def tarray(size, dtype, name):
@@ -209,9 +209,9 @@ def bidiag_matmul(matrix, alpha, beta, adjoint_b=False, name="bidiag_matmul"):
A * diag(alpha) + [zeros(m,1), A[:, :-1] * diag(beta[:-1])]
Args:
- matrix: A rank-2 `Output` representing matrix A.
- alpha: A rank-1 `Output` representing the diagonal of B.
- beta: A rank-1 `Output` representing the lower subdiagonal diagonal of B.
+ matrix: A rank-2 `Tensor` representing matrix A.
+ alpha: A rank-1 `Tensor` representing the diagonal of B.
+ beta: A rank-1 `Tensor` representing the lower subdiagonal diagonal of B.
adjoint_b: `bool` determining what to compute.
name: A name scope for the operation.
diff --git a/tensorflow/contrib/solvers/python/ops/least_squares.py b/tensorflow/contrib/solvers/python/ops/least_squares.py
index f80910ffe7..9a2d3b24dd 100644
--- a/tensorflow/contrib/solvers/python/ops/least_squares.py
+++ b/tensorflow/contrib/solvers/python/ops/least_squares.py
@@ -40,7 +40,7 @@ def cgls(operator, rhs, tol=1e-6, max_iter=20, name="cgls"):
Args:
operator: An object representing a linear operator with attributes:
- - shape: Either a list of integers or a 1-D `Output` of type `int32` of
+ - shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
length 2. `shape[0]` is the dimension on the domain of the operator,
`shape[1]` is the dimension of the co-domain of the operator. On other
words, if operator represents an M x N matrix A, `shape` must contain
@@ -55,7 +55,7 @@ def cgls(operator, rhs, tol=1e-6, max_iter=20, name="cgls"):
to `x`, i.e. if `operator` represents matrix `A`, `apply_adjoint` should
return `conj(transpose(A)) * x`.
- rhs: A rank-1 `Output` of shape `[M]` containing the right-hand size vector.
+ rhs: A rank-1 `Tensor` of shape `[M]` containing the right-hand size vector.
tol: A float scalar convergence tolerance.
max_iter: An integer giving the maximum number of iterations.
name: A name scope for the operation.
@@ -63,10 +63,10 @@ def cgls(operator, rhs, tol=1e-6, max_iter=20, name="cgls"):
Returns:
output: A namedtuple representing the final state with fields:
- - i: A scalar `int32` `Output`. Number of iterations executed.
- - x: A rank-1 `Output` of shape `[N]` containing the computed solution.
- - r: A rank-1 `Output` of shape `[M]` containing the residual vector.
- - p: A rank-1 `Output` of shape `[N]`. The next descent direction.
+ - i: A scalar `int32` `Tensor`. Number of iterations executed.
+ - x: A rank-1 `Tensor` of shape `[N]` containing the computed solution.
+ - r: A rank-1 `Tensor` of shape `[M]` containing the residual vector.
+ - p: A rank-1 `Tensor` of shape `[N]`. The next descent direction.
- gamma: \\(||A^* r||_2^2\\)
"""
# ephemeral class holding CGLS state.
diff --git a/tensorflow/contrib/solvers/python/ops/linear_equations.py b/tensorflow/contrib/solvers/python/ops/linear_equations.py
index 38c94addd0..41fd6e466b 100644
--- a/tensorflow/contrib/solvers/python/ops/linear_equations.py
+++ b/tensorflow/contrib/solvers/python/ops/linear_equations.py
@@ -41,7 +41,7 @@ def conjugate_gradient(operator,
Args:
operator: An object representing a linear operator with attributes:
- - shape: Either a list of integers or a 1-D `Output` of type `int32` of
+ - shape: Either a list of integers or a 1-D `Tensor` of type `int32` of
length 2. `shape[0]` is the dimension on the domain of the operator,
`shape[1]` is the dimension of the co-domain of the operator. On other
words, if operator represents an N x N matrix A, `shape` must contain
@@ -50,17 +50,17 @@ def conjugate_gradient(operator,
- apply: Callable object taking a vector `x` as input and returning a
vector with the result of applying the operator to `x`, i.e. if
`operator` represents matrix `A`, `apply` should return `A * x`.
- rhs: A rank-1 `Output` of shape `[N]` containing the right-hand size vector.
+ rhs: A rank-1 `Tensor` of shape `[N]` containing the right-hand size vector.
tol: A float scalar convergence tolerance.
max_iter: An integer giving the maximum number of iterations.
name: A name scope for the operation.
Returns:
output: A namedtuple representing the final state with fields:
- - i: A scalar `int32` `Output`. Number of iterations executed.
- - x: A rank-1 `Output` of shape `[N]` containing the computed solution.
- - r: A rank-1 `Output` of shape `[M]` containing the residual vector.
- - p: A rank-1 `Output` of shape `[N]`. `A`-conjugate basis vector.
+ - i: A scalar `int32` `Tensor`. Number of iterations executed.
+ - x: A rank-1 `Tensor` of shape `[N]` containing the computed solution.
+ - r: A rank-1 `Tensor` of shape `[M]` containing the residual vector.
+ - p: A rank-1 `Tensor` of shape `[N]`. `A`-conjugate basis vector.
- gamma: \\(||r||_2^2\\)
"""
# ephemeral class holding CG state.
diff --git a/tensorflow/contrib/tensor_forest/data/data_ops.py b/tensorflow/contrib/tensor_forest/data/data_ops.py
index af1cc95b74..8bcccaea34 100644
--- a/tensorflow/contrib/tensor_forest/data/data_ops.py
+++ b/tensorflow/contrib/tensor_forest/data/data_ops.py
@@ -64,7 +64,7 @@ def ParseDataTensorOrDict(data):
columns, which we turn into a single 2-D tensor.
Args:
- data: `Output` or `dict` of `Output` objects.
+ data: `Tensor` or `dict` of `Tensor` objects.
Returns:
A 2-D tensor for input to tensor_forest, a keys tensor for the
@@ -112,7 +112,7 @@ def ParseLabelTensorOrDict(labels):
Converts sparse tensors to dense ones.
Args:
- labels: `Output` or `dict` of `Output` objects.
+ labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A 2-D tensor for labels/outputs.
diff --git a/tensorflow/contrib/training/python/training/bucket_ops.py b/tensorflow/contrib/training/python/training/bucket_ops.py
index 8323830cfd..3f397d2401 100644
--- a/tensorflow/contrib/training/python/training/bucket_ops.py
+++ b/tensorflow/contrib/training/python/training/bucket_ops.py
@@ -273,7 +273,7 @@ def bucket_by_sequence_length(input_length,
`which_bucket` for details of the other arguments.
Args:
- input_length: `int32` scalar `Output`, the sequence length of tensors.
+ input_length: `int32` scalar `Tensor`, the sequence length of tensors.
tensors: The list or dictionary of tensors, representing a single element,
to bucket. Nested lists are not supported.
batch_size: The new batch size pulled from the queue
@@ -303,7 +303,7 @@ def bucket_by_sequence_length(input_length,
Returns:
A tuple `(sequence_length, outputs)` where `sequence_length` is
- a 1-D `Output` of size `batch_size` and `outputs` is a list or dictionary
+ a 1-D `Tensor` of size `batch_size` and `outputs` is a list or dictionary
of batched, bucketed, outputs corresponding to elements of `tensors`.
Raises:
diff --git a/tensorflow/contrib/training/python/training/device_setter.py b/tensorflow/contrib/training/python/training/device_setter.py
index cd4b6580e0..ae6ffb8f28 100644
--- a/tensorflow/contrib/training/python/training/device_setter.py
+++ b/tensorflow/contrib/training/python/training/device_setter.py
@@ -77,7 +77,7 @@ def byte_size_load_fn(op):
"""Load function that computes the byte size of a single-output `Operation`.
This is intended to be used with `"Variable"` ops, which have a single
- `Output` output with the contents of the variable. However, it can also be
+ `Tensor` output with the contents of the variable. However, it can also be
used for calculating the size of any op that has a single output.
Intended to be used with `GreedyLoadBalancingStrategy`.
@@ -86,7 +86,7 @@ def byte_size_load_fn(op):
op: An `Operation` with a single output, typically a "Variable" op.
Returns:
- The number of bytes in the output `Output`.
+ The number of bytes in the output `Tensor`.
Raises:
ValueError: if `op` does not have a single output, or if the shape of the
diff --git a/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py b/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
index 487d6b42df..4baece2e5d 100644
--- a/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
+++ b/tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
@@ -134,15 +134,15 @@ def _check_rank(value, expected_rank):
Args:
value: A Tensor, possibly with shape associated shape information.
- expected_rank: int32 scalar (optionally an `Output`).
+ expected_rank: int32 scalar (optionally a `Tensor`).
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
- assertions on its rank. If expected_rank is not an `Output`, then
+ assertions on its rank. If expected_rank is not a `Tensor`, then
new_value's shape's rank has been set.
Raises:
- ValueError: if `expected_rank` is not an `Output` and the rank of `value`
+ ValueError: if `expected_rank` is not a `Tensor` and the rank of `value`
is known and is not equal to `expected_rank`.
"""
assert isinstance(value, ops.Tensor)
@@ -173,15 +173,15 @@ def _check_shape(value, expected_shape):
Args:
value: A Tensor, possibly with shape associated shape information.
- expected_shape: a `TensorShape`, list of `int32`, or a vector `Output`.
+ expected_shape: a `TensorShape`, list of `int32`, or a vector `Tensor`.
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
- assertions on its shape. If expected_shape is not an `Output`, then
+ assertions on its shape. If expected_shape is not a `Tensor`, then
new_value's shape has been set.
Raises:
- ValueError: if `expected_shape` is not an `Output` and the shape of `value`
+ ValueError: if `expected_shape` is not a `Tensor` and the shape of `value`
is known and is not equal to `expected_shape`.
"""
assert isinstance(value, ops.Tensor)
@@ -221,27 +221,27 @@ def _check_dimensions(value, dimensions, expected_sizes, debug_prefix):
value: A Tensor, with optional / partial shape associated shape information.
dimensions: An int list, the dimensions to check.
expected_sizes: list of mixed ints and int32 scalar tensors.
- Optionally also a vector `Output`.
+ Optionally also a vector `Tensor`.
debug_prefix: A string, used for naming ops and printing debugging messages.
Returns:
new_value: A Tensor matching `value`. Accessing this tensor tests
- assertions on its shape. If expected_sizes is not an `Output`, then
+ assertions on its shape. If expected_sizes is not a `Tensor`, then
new_value's shape has been set for all `dimensions[i]` where
- `expected_sizes[i]` is not an `Output`.
+ `expected_sizes[i]` is not a `Tensor`.
Raises:
TypeError: if any of the input contains invalid types:
- if `value` is not an `Output`.
+ if `value` is not a `Tensor`.
if `dimensions` is not a `list` or `tuple`.
ValueError: if input has incorrect sizes or inferred shapes do not match:
if `dimensions` contains repeated dimensions.
- if `expected_sizes` is not an `Output` and its length does not match that
+ if `expected_sizes` is not a `Tensor` and its length does not match that
`dimensions`.
if `value`'s shape has a well-defined rank, and one of the values in
`dimensions` is equal to or above this rank.
if `value`'s shape is well defined for some `dimensions[i]`, and
- `expected_sizes[i]` is not an `Output`, and these two values do
+ `expected_sizes[i]` is not a `Tensor`, and these two values do
not match.
"""
@@ -301,7 +301,7 @@ def _prepare_sequence_inputs(inputs, states):
Raises:
ValueError: if the shapes of inputs.context.values(), states.values(),
or inputs.sequences.values() are not fully defined (with the exception
- of the dimension of any `Output` in inputs.sequences.values()).
+ of the dimension of any `Tensor` in inputs.sequences.values()).
TypeError: if the dtype of length is not int32.
"""
# Convert state initial values to tensors
@@ -461,7 +461,7 @@ class NextQueuedSequenceBatch(object):
are assigned to each split.
Returns:
- An int32 vector `Output`.
+ An int32 vector `Tensor`.
"""
return self._state_saver._received_sequence
@@ -473,7 +473,7 @@ class NextQueuedSequenceBatch(object):
`padded_length / num_unroll`. This is the sequence_count.
Returns:
- An int32 vector `Output`.
+ An int32 vector `Tensor`.
"""
return self._state_saver._received_sequence_count
@@ -526,7 +526,7 @@ class NextQueuedSequenceBatch(object):
state_name: string, matches a key provided in `initial_states`.
Returns:
- An `Output`: a batched set of states, either initial states (if this is
+ A `Tensor`: a batched set of states, either initial states (if this is
the first run of the given example), or a value as stored during
a previous iteration via `save_state` control flow.
Its type is the same as `initial_states["state_name"].dtype`.
@@ -553,7 +553,7 @@ class NextQueuedSequenceBatch(object):
Args:
state_name: string, matches a key provided in `initial_states`.
- value: An `Output`.
+ value: A `Tensor`.
Its type must match that of `initial_states[state_name].dtype`.
If we had at input:
@@ -728,24 +728,24 @@ class SequenceQueueingStateSaver(object):
"""Creates the SequenceQueueingStateSaver.
Args:
- batch_size: int or int32 scalar `Output`, how large minibatches should
+ batch_size: int or int32 scalar `Tensor`, how large minibatches should
be when accessing the `state()` method and `context`, `sequences`, etc,
properties.
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length `k` are then split into `k / num_unroll`
many segments.
- input_length: An int32 scalar `Output`, the length of the sequence prior
+ input_length: An int32 scalar `Tensor`, the length of the sequence prior
to padding. This value may be at most `padded_length` for any given
input (see below for the definition of `padded_length`).
Batched and total lengths of the current iteration are made accessible
via the `length` and `total_length` properties. The shape of
input_length (scalar) must be fully specified.
- input_key: A string scalar `Output`, the **unique** key for the given
+ input_key: A string scalar `Tensor`, the **unique** key for the given
input. This is used to keep track of the split minibatch elements
of this input. Batched keys of the current iteration are made
accessible via the `key` property. The shape of `input_key` (scalar)
must be fully specified.
- input_sequences: A dict mapping string names to `Output` values. The
+ input_sequences: A dict mapping string names to `Tensor` values. The
values must all have matching first dimension, called `padded_length`.
The `SequenceQueueingStateSaver` will split these tensors along
this first dimension into minibatch elements of dimension
@@ -755,7 +755,7 @@ class SequenceQueueingStateSaver(object):
**Note**: `padded_length` may be dynamic, and may vary from input
to input, but must always be a multiple of `num_unroll`. The remainder
of the shape (other than the first dimension) must be fully specified.
- input_context: A dict mapping string names to `Output` values. The values
+ input_context: A dict mapping string names to `Tensor` values. The values
are treated as "global" across all time splits of the given input,
and will be copied across for all minibatch elements accordingly.
Batched and copied context of the current iteration are made
@@ -963,9 +963,9 @@ class SequenceQueueingStateSaver(object):
These dictionaries are used to keep track of indices into the barrier.
Args:
- sequences: `OrderedDict` of string, `Output` pairs.
- context: `OrderedDict` of string, `Output` pairs.
- states: `OrderedDict` of string, `Output` pairs.
+ sequences: `OrderedDict` of string, `Tensor` pairs.
+ context: `OrderedDict` of string, `Tensor` pairs.
+ states: `OrderedDict` of string, `Tensor` pairs.
"""
assert isinstance(sequences, dict)
assert isinstance(context, dict)
@@ -1305,12 +1305,12 @@ def batch_sequences_with_states(input_key, input_sequences, input_context,
```
Args:
- input_key: A string scalar `Output`, the **unique** key for the given
+ input_key: A string scalar `Tensor`, the **unique** key for the given
input example. This is used to keep track of the split minibatch elements
of this input. Batched keys of the current iteration are made
accessible via the `key` property. The shape of `input_key` (scalar) must
be fully specified.
- input_sequences: A dict mapping string names to `Output` values. The values
+ input_sequences: A dict mapping string names to `Tensor` values. The values
must all have matching first dimension, called `value_length`. They may
vary from input to input. The remainder of the shape (other than the first
dimension) must be fully specified.
@@ -1321,14 +1321,14 @@ def batch_sequences_with_states(input_key, input_sequences, input_context,
**Note**: if `pad=False`, then `value_length` must always be a multiple
of `num_unroll`.
- input_context: A dict mapping string names to `Output` values. The values
+ input_context: A dict mapping string names to `Tensor` values. The values
are treated as "global" across all time splits of the given input example,
and will be copied across for all minibatch elements accordingly.
Batched and copied context of the current iteration are made
accessible via the `context` property.
**Note**: All input_context values must have fully defined shapes.
- input_length: None or an int32 scalar `Output`, the length of the sequence
+ input_length: None or an int32 scalar `Tensor`, the length of the sequence
prior to padding. If `input_length=None` and `pad=True` then the length
will be inferred and will be equal to `value_length`. If `pad=False` then
`input_length` cannot be `None`: `input_length` must be specified. Its
@@ -1345,7 +1345,7 @@ def batch_sequences_with_states(input_key, input_sequences, input_context,
num_unroll: Python integer, how many time steps to unroll at a time.
The input sequences of length k are then split into k / num_unroll many
segments.
- batch_size: int or int32 scalar `Output`, how large minibatches should
+ batch_size: int or int32 scalar `Tensor`, how large minibatches should
be when accessing the `state()` method and `context`, `sequences`, etc,
properties.
num_threads: The int number of threads enqueuing input examples into a
@@ -1426,15 +1426,15 @@ def _padding(sequences, num_unroll):
"""For a dictionary of sequences, pads tensors to a multiple of `num_unroll`.
Args:
- sequences: dictionary with `Output` values.
+ sequences: dictionary with `Tensor` values.
num_unroll: int specifying to what multiple to pad sequences to.
Returns:
- length: Scalar `Output` of dimension 0 of all the values in sequences.
+ length: Scalar `Tensor` of dimension 0 of all the values in sequences.
padded_sequence: Dictionary of sequences that are padded to a multiple of
`num_unroll`.
Raises:
ValueError: If `num_unroll` not an int or sequences not a dictionary from
- string to `Output`.
+ string to `Tensor`.
"""
if not isinstance(num_unroll, numbers.Integral):
raise ValueError("Unsupported num_unroll expected int, got: %s" %
diff --git a/tensorflow/contrib/training/python/training/training.py b/tensorflow/contrib/training/python/training/training.py
index 9aaa79d920..e65ef6ba11 100644
--- a/tensorflow/contrib/training/python/training/training.py
+++ b/tensorflow/contrib/training/python/training/training.py
@@ -146,9 +146,9 @@ def create_train_op(total_loss,
"""Creates an `Operation` that evaluates the gradients and returns the loss.
Args:
- total_loss: An `Output` representing the total loss.
+ total_loss: A `Tensor` representing the total loss.
optimizer: A tf.Optimizer to use for computing the gradients.
- global_step: An `Output` representing the global step variable. If left as
+ global_step: A `Tensor` representing the global step variable. If left as
`None`, then slim.variables.global_step() is used.
update_ops: An optional list of updates to execute. If `update_ops` is
`None`, then the update ops are set to the contents of the
@@ -169,8 +169,8 @@ def create_train_op(total_loss,
with the ops that generated them.
Returns:
- An `Output` that when evaluated, computes the gradients and returns the
- total loss value.
+ A `Tensor` that when evaluated, computes the gradients and returns the total
+ loss value.
"""
if global_step is None:
global_step = variables.get_or_create_global_step()
@@ -244,7 +244,7 @@ def train(
"""Runs the training loop.
Args:
- train_op: An `Output` that, when executed, will apply the gradients and
+ train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where the graph and checkpoints are saved.
master: The URL of the master.
diff --git a/tensorflow/examples/learn/multiple_gpu.py b/tensorflow/examples/learn/multiple_gpu.py
index 8b27859e39..6647ec3d42 100644
--- a/tensorflow/examples/learn/multiple_gpu.py
+++ b/tensorflow/examples/learn/multiple_gpu.py
@@ -35,8 +35,8 @@ def my_model(features, target):
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
Args:
- features: `Output` of input features.
- target: `Output` of targets.
+ features: `Tensor` of input features.
+ target: `Tensor` of targets.
Returns:
Tuple of predictions, loss and training op.
diff --git a/tensorflow/python/client/session.py b/tensorflow/python/client/session.py
index dd55154a3a..71c931037e 100644
--- a/tensorflow/python/client/session.py
+++ b/tensorflow/python/client/session.py
@@ -605,7 +605,7 @@ class BaseSession(SessionInterface):
Use with the `with` keyword to specify that calls to
[`Operation.run()`](../../api_docs/python/framework.md#Operation.run) or
- [`Output.eval()`](../../api_docs/python/framework.md#Output.eval) should be
+ [`Tensor.eval()`](../../api_docs/python/framework.md#Tensor.eval) should be
executed in this session.
```python
@@ -657,7 +657,7 @@ class BaseSession(SessionInterface):
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
- and evaluate every `Output` in `fetches`, substituting the values in
+ and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a single graph element, or an arbitrarily
@@ -666,7 +666,7 @@ class BaseSession(SessionInterface):
* An [`Operation`](../../api_docs/python/framework.md#Operation).
The corresponding fetched value will be `None`.
- * A [`Output`](../../api_docs/python/framework.md#Output).
+ * A [`Tensor`](../../api_docs/python/framework.md#Tensor).
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A [`SparseTensor`](../../api_docs/python/sparse_ops.md#SparseTensor).
@@ -707,7 +707,7 @@ class BaseSession(SessionInterface):
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
- * If the key is a [`Output`](../../api_docs/python/framework.md#Output), the
+ * If the key is a [`Tensor`](../../api_docs/python/framework.md#Tensor), the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
@@ -717,7 +717,7 @@ class BaseSession(SessionInterface):
[`SparseTensor`](../../api_docs/python/sparse_ops.md#SparseTensor),
the value should be a
[`SparseTensorValue`](../../api_docs/python/sparse_ops.md#SparseTensorValue).
- * If the key is a nested tuple of `Output`s or `SparseTensor`s, the value
+ * If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
@@ -752,7 +752,7 @@ class BaseSession(SessionInterface):
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
- `Output` that doesn't exist.
+ `Tensor` that doesn't exist.
"""
run_metadata_ptr = tf_session.TF_NewBuffer()
if options:
@@ -1102,7 +1102,7 @@ class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
- objects are executed, and `Output` objects are evaluated. For
+ objects are executed, and `Tensor` objects are evaluated. For
example:
```python
@@ -1252,7 +1252,7 @@ class InteractiveSession(BaseSession):
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
- The methods [`Output.eval()`](../../api_docs/python/framework.md#Output.eval)
+ The methods [`Tensor.eval()`](../../api_docs/python/framework.md#Tensor.eval)
and [`Operation.run()`](../../api_docs/python/framework.md#Operation.run)
will use that session to run ops.
diff --git a/tensorflow/python/framework/dtypes.py b/tensorflow/python/framework/dtypes.py
index 7ea0c17f91..a91223749d 100644
--- a/tensorflow/python/framework/dtypes.py
+++ b/tensorflow/python/framework/dtypes.py
@@ -24,7 +24,7 @@ from tensorflow.core.framework import types_pb2
class DType(object):
- """Represents the type of the elements in an `Output`.
+ """Represents the type of the elements in a `Tensor`.
The following `DType` objects are defined:
diff --git a/tensorflow/python/framework/function.py b/tensorflow/python/framework/function.py
index 92a85d261e..3faf79859c 100644
--- a/tensorflow/python/framework/function.py
+++ b/tensorflow/python/framework/function.py
@@ -745,7 +745,7 @@ class Defun(object):
function.
The decorated function must add ops to the default graph and return zero or
- more `Output` objects. Call the decorator with named arguments, one for each
+ more `Tensor` objects. Call the decorator with named arguments, one for each
argument of the function to decorate, with the expected type of the argument
as value.
diff --git a/tensorflow/python/framework/gen_docs_combined.py b/tensorflow/python/framework/gen_docs_combined.py
index e68a43eb7b..241b15a1ab 100644
--- a/tensorflow/python/framework/gen_docs_combined.py
+++ b/tensorflow/python/framework/gen_docs_combined.py
@@ -39,7 +39,7 @@ FLAGS = tf.flags.FLAGS
PREFIX_TEXT = """
-Note: Functions taking `Output` arguments can also take anything accepted by
+Note: Functions taking `Tensor` arguments can also take anything accepted by
[`tf.convert_to_tensor`](framework.md#convert_to_tensor).
"""
diff --git a/tensorflow/python/framework/importer.py b/tensorflow/python/framework/importer.py
index 01f1f7add3..5459a19584 100644
--- a/tensorflow/python/framework/importer.py
+++ b/tensorflow/python/framework/importer.py
@@ -156,7 +156,7 @@ def import_graph_def(graph_def, input_map=None, return_elements=None,
This function provides a way to import a serialized TensorFlow
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and extract individual objects in the `GraphDef` as
- [`Output`](#Output) and [`Operation`](#Operation) objects. See
+ [`Tensor`](#Tensor) and [`Operation`](#Operation) objects. See
[`Graph.as_graph_def()`](#Graph.as_graph_def) for a way to create a
`GraphDef` proto.
@@ -164,11 +164,11 @@ def import_graph_def(graph_def, input_map=None, return_elements=None,
graph_def: A `GraphDef` proto containing operations to be imported into
the default graph.
input_map: A dictionary mapping input names (as strings) in `graph_def`
- to `Output` objects. The values of the named input tensors in the
- imported graph will be re-mapped to the respective `Output` values.
+ to `Tensor` objects. The values of the named input tensors in the
+ imported graph will be re-mapped to the respective `Tensor` values.
return_elements: A list of strings containing operation names in
`graph_def` that will be returned as `Operation` objects; and/or
- tensor names in `graph_def` that will be returned as `Output` objects.
+ tensor names in `graph_def` that will be returned as `Tensor` objects.
name: (Optional.) A prefix that will be prepended to the names in
`graph_def`. Defaults to `"import"`.
op_dict: (Optional.) A dictionary mapping op type names to `OpDef` protos.
@@ -182,12 +182,12 @@ def import_graph_def(graph_def, input_map=None, return_elements=None,
earlier binaries.
Returns:
- A list of `Operation` and/or `Output` objects from the imported graph,
+ A list of `Operation` and/or `Tensor` objects from the imported graph,
corresponding to the names in `return_elements`.
Raises:
TypeError: If `graph_def` is not a `GraphDef` proto,
- `input_map` is not a dictionary mapping strings to `Output` objects,
+ `input_map` is not a dictionary mapping strings to `Tensor` objects,
or `return_elements` is not a list of strings.
ValueError: If `input_map`, or `return_elements` contains names that
do not appear in `graph_def`, or `graph_def` is not well-formed (e.g.
diff --git a/tensorflow/python/framework/meta_graph.py b/tensorflow/python/framework/meta_graph.py
index 6fd93f44bc..172cb8946c 100644
--- a/tensorflow/python/framework/meta_graph.py
+++ b/tensorflow/python/framework/meta_graph.py
@@ -454,8 +454,8 @@ def import_scoped_meta_graph(meta_graph_or_file,
import_scope: Optional `string`. Name scope into which to import the
subgraph. If `None`, the graph is imported to the root name scope.
input_map: A dictionary mapping input names (as strings) in `graph_def` to
- `Output` objects. The values of the named input tensors in the imported
- graph will be re-mapped to the respective `Output` values.
+ `Tensor` objects. The values of the named input tensors in the imported
+ graph will be re-mapped to the respective `Tensor` values.
unbound_inputs_col_name: Collection name for looking up unbound inputs.
Returns:
diff --git a/tensorflow/python/framework/op_def_library.py b/tensorflow/python/framework/op_def_library.py
index cd1553ace3..2c27cef3a9 100644
--- a/tensorflow/python/framework/op_def_library.py
+++ b/tensorflow/python/framework/op_def_library.py
@@ -245,7 +245,7 @@ def _MaybeColocateWith(inputs):
"""A context manager for (maybe) colocating with a list of input tensors.
Args:
- inputs: A list of `Output` or `Operation` objects.
+ inputs: A list of `Tensor` or `Operation` objects.
Returns:
A context manager.
diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py
index e8dcdf4e97..ed7389e2f3 100644
--- a/tensorflow/python/framework/ops.py
+++ b/tensorflow/python/framework/ops.py
@@ -597,10 +597,10 @@ def convert_to_tensor(value,
name=None,
as_ref=False,
preferred_dtype=None):
- """Converts the given `value` to an `Output`.
+ """Converts the given `value` to a `Tensor`.
- This function converts Python objects of various types to `Output`
- objects. It accepts `Output` objects, numpy arrays, Python lists,
+ This function converts Python objects of various types to `Tensor`
+ objects. It accepts `Tensor` objects, numpy arrays, Python lists,
and Python scalars. For example:
```python
@@ -620,15 +620,15 @@ def convert_to_tensor(value,
(such as `my_func` in the example above). All standard Python op
constructors apply this function to each of their Tensor-valued
inputs, which allows those ops to accept numpy arrays, Python lists,
- and scalars in addition to `Output` objects.
+ and scalars in addition to `Tensor` objects.
Args:
- value: An object whose type has a registered `Output` conversion function.
+ value: An object whose type has a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing, the
type is inferred from the type of `value`.
- name: Optional name to use if a new `Output` is created.
+ name: Optional name to use if a new `Tensor` is created.
as_ref: True if we want the result as a ref tensor. Only used if a new
- `Output` is created.
+ `Tensor` is created.
preferred_dtype: Optional element type for the returned tensor,
used when dtype is None. In some cases, a caller may not have a
dtype in mind when converting to a tensor, so preferred_dtype
@@ -636,7 +636,7 @@ def convert_to_tensor(value,
`preferred_dtype` is not possible, this argument has no effect.
Returns:
- An `Output` based on `value`.
+ A `Tensor` based on `value`.
Raises:
TypeError: If no conversion function is registered for `value`.
@@ -695,12 +695,12 @@ def convert_n_to_tensor(values,
name=None,
as_ref=False,
preferred_dtype=None):
- """Converts `values` to a list of `Output` objects.
+ """Converts `values` to a list of `Tensor` objects.
Args:
values: A list of objects that can be consumed by `tf.convert_to_tensor()`.
- dtype: (Optional.) The required `DType` of the returned `Output` objects.
- name: (Optional.) A name prefix to used when a new `Output` is
+ dtype: (Optional.) The required `DType` of the returned `Tensor` objects.
+ name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
@@ -711,7 +711,7 @@ def convert_n_to_tensor(values,
`preferred_dtype` is not possible, this argument has no effect.
Returns:
- A list of `Output` and/or `IndexedSlices` objects.
+ A list of `Tensor` and/or `IndexedSlices` objects.
Raises:
TypeError: If no conversion function is registered for an element in
@@ -736,22 +736,22 @@ def convert_n_to_tensor(values,
def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None,
as_ref=False):
- """Converts the given object to an `Output` or an `IndexedSlices`.
+ """Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
- unmodified. Otherwise, it is converted to an `Output` using
+ unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
- dtype: (Optional.) The required `DType` of the returned `Output` or
+ dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
- name: (Optional.) A name to use if a new `Output` is created.
+ name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
- An `Output`, `IndexedSlices`, or `SparseTensor` based on `value`.
+ An `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
@@ -768,7 +768,7 @@ def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None,
def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None,
as_ref=False):
- """Converts `values` to a list of `Output` or `IndexedSlices` objects.
+ """Converts `values` to a list of `Tensor` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
@@ -776,15 +776,15 @@ def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None,
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
- dtype: (Optional.) The required `DType` of the returned `Output`
+ dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
- name: (Optional.) A name prefix to used when a new `Output` is
+ name: (Optional.) A name prefix to used when a new `Tensor` is
created, in which case element `i` will be given the name `name
+ '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
- A list of `Output`, `IndexedSlices`, and/or `SparseTensor` objects.
+ A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
@@ -808,7 +808,7 @@ def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None,
def register_tensor_conversion_function(base_type, conversion_func,
priority=100):
- """Registers a function for converting objects of `base_type` to `Output`.
+ """Registers a function for converting objects of `base_type` to `Tensor`.
The conversion function must have the following signature:
@@ -817,15 +817,15 @@ def register_tensor_conversion_function(base_type, conversion_func,
# ...
```
- It must return an `Output` with the given `dtype` if specified. If the
- conversion function creates a new `Output`, it should use the given
+ It must return a `Tensor` with the given `dtype` if specified. If the
+ conversion function creates a new `Tensor`, it should use the given
`name` if specified. All exceptions will be propagated to the caller.
The conversion function may return `NotImplemented` for some
inputs. In this case, the conversion process will continue to try
subsequent conversion functions.
- If `as_ref` is true, the function must return an `Output` reference,
+ If `as_ref` is true, the function must return a `Tensor` reference,
such as a `Variable`.
NOTE: The conversion functions will execute in order of priority,
@@ -837,7 +837,7 @@ def register_tensor_conversion_function(base_type, conversion_func,
base_type: The base type or tuple of base types for all objects that
`conversion_func` accepts.
conversion_func: A function that converts instances of `base_type` to
- `Output`.
+ `Tensor`.
priority: Optional integer that indicates the priority for applying this
conversion function. Conversion functions with smaller priority values
run earlier than conversion functions with larger priority values.
@@ -865,10 +865,10 @@ def register_tensor_conversion_function(base_type, conversion_func,
class IndexedSlices(_TensorLike):
"""A sparse representation of a set of tensor slices at given indices.
- This class is a simple wrapper for a pair of `Output` objects:
+ This class is a simple wrapper for a pair of `Tensor` objects:
- * `values`: An `Output` of any dtype with shape `[D0, D1, ..., Dn]`.
- * `indices`: A 1-D integer `Output` with shape `[D0]`.
+ * `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`.
+ * `indices`: A 1-D integer `Tensor` with shape `[D0]`.
An `IndexedSlices` is typically used to represent a subset of a larger
tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`.
@@ -910,17 +910,17 @@ class IndexedSlices(_TensorLike):
@property
def values(self):
- """An `Output` containing the values of the slices."""
+ """A `Tensor` containing the values of the slices."""
return self._values
@property
def indices(self):
- """A 1-D `Output` containing the indices of the slices."""
+ """A 1-D `Tensor` containing the indices of the slices."""
return self._indices
@property
def dense_shape(self):
- """A 1-D `Output` containing the shape of the corresponding dense tensor."""
+ """A 1-D `Tensor` containing the shape of the corresponding dense tensor."""
return self._dense_shape
@property
@@ -1008,7 +1008,7 @@ class Operation(object):
"""Represents a graph node that performs computation on tensors.
An `Operation` is a node in a TensorFlow `Graph` that takes zero or
- more `Output` objects as input, and produces zero or more `Output`
+ more `Tensor` objects as input, and produces zero or more `Tensor`
objects as output. Objects of type `Operation` are created by
calling a Python op constructor (such as
[`tf.matmul()`](../../api_docs/python/math_ops.md#matmul))
@@ -1054,9 +1054,9 @@ class Operation(object):
`op`, and `device`. The `input` attribute is irrelevant here
as it will be computed when generating the model.
g: `Graph`. The parent graph.
- inputs: list of `Output` objects. The inputs to this `Operation`.
+ inputs: list of `Tensor` objects. The inputs to this `Operation`.
output_types: list of `DType` objects. List of the types of the
- `Output`s computed by this operation. The length of this list indicates
+ `Tensors` computed by this operation. The length of this list indicates
the number of output endpoints of the `Operation`.
control_inputs: list of operations or tensors from which to have a
control dependency.
@@ -1313,7 +1313,7 @@ class Operation(object):
@property
def outputs(self):
- """The list of `Output` objects representing the outputs of this op."""
+ """The list of `Tensor` objects representing the outputs of this op."""
return self._outputs
# pylint: disable=protected-access
@@ -1341,7 +1341,7 @@ class Operation(object):
@property
def inputs(self):
- """The list of `Output` objects representing the data inputs of this op."""
+ """The list of `Tensor` objects representing the data inputs of this op."""
return Operation._InputList(self)
@property
@@ -1443,7 +1443,7 @@ class Operation(object):
available, or `session` must be specified explicitly.
Args:
- feed_dict: A dictionary that maps `Output` objects to feed values.
+ feed_dict: A dictionary that maps `Tensor` objects to feed values.
See [`Session.run()`](../../api_docs/python/client.md#Session.run)
for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
@@ -1460,9 +1460,9 @@ class RegisterGradient(object):
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
- that takes the original `Operation` and `n` `Output` objects
+ that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
- and returns `m` `Output` objects (representing the partial gradients
+ and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
@@ -1783,7 +1783,7 @@ class Graph(object):
A `Graph` contains a set of
[`Operation`](../../api_docs/python/framework.md#Operation) objects,
which represent units of computation; and
- [`Output`](../../api_docs/python/framework.md#Output) objects, which represent
+ [`Tensor`](../../api_docs/python/framework.md#Tensor) objects, which represent
the units of data that flow between operations.
A default `Graph` is always registered, and accessible by calling
@@ -2171,7 +2171,7 @@ class Graph(object):
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
- inputs: A list of `Output` objects that will be inputs to the `Operation`.
+ inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: A list of `DType` objects that will be the types of the tensors
that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
@@ -2192,7 +2192,7 @@ class Graph(object):
to compute the device property of the Operation.
Raises:
- TypeError: if any of the inputs is not an `Output`.
+ TypeError: if any of the inputs is not a `Tensor`.
ValueError: if colocation conflicts with existing device assignment.
Returns:
@@ -2286,7 +2286,7 @@ class Graph(object):
return ret
def as_graph_element(self, obj, allow_tensor=True, allow_operation=True):
- """Returns the object referred to by `obj`, as an `Operation` or `Output`.
+ """Returns the object referred to by `obj`, as an `Operation` or `Tensor`.
This function validates that `obj` represents an element of this
graph, and gives an informative error message if it is not.
@@ -2298,14 +2298,14 @@ class Graph(object):
This method may be called concurrently from multiple threads.
Args:
- obj: An `Output`, an `Operation`, or the name of a tensor or operation.
+ obj: A `Tensor`, an `Operation`, or the name of a tensor or operation.
Can also be any object with an `_as_graph_element()` method that returns
a value of one of these types.
- allow_tensor: If true, `obj` may refer to an `Output`.
+ allow_tensor: If true, `obj` may refer to a `Tensor`.
allow_operation: If true, `obj` may refer to an `Operation`.
Returns:
- The `Output` or `Operation` in the Graph corresponding to `obj`.
+ The `Tensor` or `Operation` in the Graph corresponding to `obj`.
Raises:
TypeError: If `obj` is not a type we support attempting to convert
@@ -2449,15 +2449,15 @@ class Graph(object):
return self.as_graph_element(name, allow_tensor=False, allow_operation=True)
def get_tensor_by_name(self, name):
- """Returns the `Output` with the given `name`.
+ """Returns the `Tensor` with the given `name`.
This method may be called concurrently from multiple threads.
Args:
- name: The name of the `Output` to return.
+ name: The name of the `Tensor` to return.
Returns:
- The `Output` with the given `name`.
+ The `Tensor` with the given `name`.
Raises:
TypeError: If `name` is not a string.
@@ -3196,7 +3196,7 @@ class Graph(object):
```
Args:
- control_inputs: A list of `Operation` or `Output` objects which
+ control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
@@ -3207,7 +3207,7 @@ class Graph(object):
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
- `Output` objects.
+ `Tensor` objects.
"""
if control_inputs is None:
return self._ControlDependenciesController(self, None)
@@ -3478,7 +3478,7 @@ def control_dependencies(control_inputs):
for more details.
Args:
- control_inputs: A list of `Operation` or `Output` objects which
+ control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
@@ -3707,7 +3707,7 @@ def reset_default_graph():
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.Session` or `tf.InteractiveSession` is active will result in undefined
- behavior. Using any previously created `tf.Operation` or `tf.Output` objects
+ behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
"""
_default_graph_stack.reset()
@@ -3763,7 +3763,7 @@ def _get_graph_from_inputs(op_input_list, graph=None):
"op_input_list", we attempt to use the default graph.
Args:
- op_input_list: A list of inputs to an operation, which may include `Output`,
+ op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
@@ -3841,7 +3841,7 @@ class GraphKeys(object):
be trained by an optimizer. See
[`tf.trainable_variables()`](../../api_docs/python/state_ops.md#trainable_variables)
for more details.
- * `SUMMARIES`: the summary `Output` objects that have been created in the
+ * `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
[`tf.merge_all_summaries()`](../../api_docs/python/train.md#merge_all_summaries)
for more details.
@@ -4036,7 +4036,7 @@ def name_scope(name, default_name=None, values=None):
Args:
name: The name argument that is passed to the op function.
default_name: The default name to use if the `name` argument is `None`.
- values: The list of `Output` arguments that are passed to the op function.
+ values: The list of `Tensor` arguments that are passed to the op function.
Returns:
A context manager for use in defining Python ops. Yields the name scope.
diff --git a/tensorflow/python/framework/sparse_tensor.py b/tensorflow/python/framework/sparse_tensor.py
index 2d69644dc1..4ce92c4225 100644
--- a/tensorflow/python/framework/sparse_tensor.py
+++ b/tensorflow/python/framework/sparse_tensor.py
@@ -210,7 +210,7 @@ class SparseTensor(_TensorLike):
available, or `session` must be specified explicitly.
Args:
- feed_dict: A dictionary that maps `Output` objects to feed values.
+ feed_dict: A dictionary that maps `Tensor` objects to feed values.
See [`Session.run()`](../../api_docs/python/client.md#Session.run) for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this sparse
diff --git a/tensorflow/python/framework/subscribe.py b/tensorflow/python/framework/subscribe.py
index 0df118f43a..1f3bca71b2 100644
--- a/tensorflow/python/framework/subscribe.py
+++ b/tensorflow/python/framework/subscribe.py
@@ -27,13 +27,13 @@ def _recursive_apply(tensors, apply_fn):
"""Helper method to recursively apply a function to structure of tensors.
The structure of the tensors should take the form similar to fetches in
- `tf.Session` and includes single `Output`, `list`, nested `list`, `tuple`,
+ `tf.Session` and includes single `Tensor`, `list`, nested `list`, `tuple`,
`namedtuple`, or `dict`.
Args:
- tensors: Single `Output`, `list`, nested `list, `tuple`,
+ tensors: Single `Tensor`, `list`, nested `list, `tuple`,
`namedtuple`, or `dict`.
- apply_fn: Function to apply to each `Output` and should return an `Output`.
+ apply_fn: Function to apply to each `Tensor` and should return a `Tensor`.
Returns:
Returns the modified tensors with the same structure.
Raises:
@@ -99,7 +99,7 @@ def _subscribe(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
Args:
- tensor: `tf.Output`
+ tensor: `tf.Tensor`
side_effects: List of side_effect functions see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
@@ -138,7 +138,7 @@ def subscribe(tensors, side_effects):
This method will attach side effect graphs to a given set
of tensors. Set of tensors follows from session.run and supports
- single `Output`, `list`, nested `list`, `tuple`, `namedtuple`, or `dict`. It
+ single `Tensor`, `list`, nested `list`, `tuple`, `namedtuple`, or `dict`. It
returns the tensors in the same passed in structure, but as clones with
side effects applied. The supplied side effect graphs are specified
as a constructor function which takes the target tensor and
@@ -152,10 +152,10 @@ def subscribe(tensors, side_effects):
your tensorflow code.
Args:
- tensors: `Output` or set of tensors to subscribe to. Set of tensors format
- follows from `Session.run` and supports single `Output`, `list`, nested
+ tensors: `Tensor` or set of tensors to subscribe to. Set of tensors format
+ follows from `Session.run` and supports single `Tensor`, `list`, nested
`list`, `tuple`, `namedtuple`, or `dict`.
- side_effects: Function(s) that takes an `Output`, construct a subgraph, and
+ side_effects: Function(s) that takes a `Tensor`, construct a subgraph, and
return a nonempty list of control dependencies. This can be a single
function or list of functions.
Returns:
diff --git a/tensorflow/python/framework/tensor_shape.py b/tensorflow/python/framework/tensor_shape.py
index abcf384042..b2c015c0b6 100644
--- a/tensorflow/python/framework/tensor_shape.py
+++ b/tensorflow/python/framework/tensor_shape.py
@@ -379,10 +379,10 @@ def as_dimension(value):
class TensorShape(object):
- """Represents the shape of an `Output`.
+ """Represents the shape of a `Tensor`.
A `TensorShape` represents a possibly-partial shape specification for a
- `Output`. It may be one of the following:
+ `Tensor`. It may be one of the following:
* *Fully-known shape:* has a known number of dimensions and a known size
for each dimension.
@@ -397,7 +397,7 @@ class TensorShape(object):
C++`](../../how_tos/adding_an_op/index.md#shape-functions-in-c) for
details of shape functions and how to register them. Alternatively,
the shape may be set explicitly using
- [`Output.set_shape()`](../../api_docs/python/framework.md#Output.set_shape).
+ [`Tensor.set_shape()`](../../api_docs/python/framework.md#Tensor.set_shape).
@@merge_with
@@concatenate
diff --git a/tensorflow/python/kernel_tests/cwise_ops_test.py b/tensorflow/python/kernel_tests/cwise_ops_test.py
index 3322d22b9f..5b1c567f37 100644
--- a/tensorflow/python/kernel_tests/cwise_ops_test.py
+++ b/tensorflow/python/kernel_tests/cwise_ops_test.py
@@ -1270,8 +1270,8 @@ class LogicalOpTest(tf.test.TestCase):
f(x, y)
def testUsingAsPythonValueFails(self):
- # Ensure that we raise an error when the user attempts to treat an
- # `Output` as a Python `bool`.
+ # Ensure that we raise an error when the user attempts to treat a
+ # `Tensor` as a Python `bool`.
b = tf.constant(False)
with self.assertRaises(TypeError):
if b:
diff --git a/tensorflow/python/ops/array_ops.py b/tensorflow/python/ops/array_ops.py
index 9df0ffce1a..cefd164a74 100644
--- a/tensorflow/python/ops/array_ops.py
+++ b/tensorflow/python/ops/array_ops.py
@@ -172,13 +172,13 @@ def shape(input, name=None, out_type=dtypes.int32):
```
Args:
- input: An `Output` or `SparseTensor`.
+ input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to `tf.int32`.
Returns:
- An `Output` of type `out_type`.
+ A `Tensor` of type `out_type`.
"""
return shape_internal(input, name, optimize=True, out_type=out_type)
@@ -188,14 +188,14 @@ def shape_internal(input, name=None, optimize=True, out_type=dtypes.int32):
"""Returns the shape of a tensor.
Args:
- input: An `Output` or `SparseTensor`.
+ input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the shape as a constant when possible.
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
- An `Output` of type `out_type`.
+ A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Shape", [input]) as name:
@@ -225,13 +225,13 @@ def size(input, name=None, out_type=dtypes.int32):
```
Args:
- input: An `Output` or `SparseTensor`.
+ input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
- An `Output` of type `out_type`. Defaults to tf.int32.
+ A `Tensor` of type `out_type`. Defaults to tf.int32.
"""
return size_internal(input, name, optimize=True, out_type=out_type)
@@ -241,14 +241,14 @@ def size_internal(input, name=None, optimize=True, out_type=dtypes.int32):
"""Returns the size of a tensor.
Args:
- input: An `Output` or `SparseTensor`.
+ input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the size as a constant when possible.
out_type: (Optional) The specified output type of the operation
(`int32` or `int64`). Defaults to tf.int32.
Returns:
- An `Output` of type `out_type`.
+ A `Tensor` of type `out_type`.
"""
with ops.name_scope(name, "Size", [input]) as name:
if isinstance(
@@ -282,11 +282,11 @@ def rank(input, name=None):
element of the tensor. Rank is also known as "order", "degree", or "ndims."
Args:
- input: An `Output` or `SparseTensor`.
+ input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
- An `Output` of type `int32`.
+ A `Tensor` of type `int32`.
@compatibility(numpy)
Equivalent to np.ndim
@@ -300,12 +300,12 @@ def rank_internal(input, name=None, optimize=True):
"""Returns the rank of a tensor.
Args:
- input: An `Output` or `SparseTensor`.
+ input: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
optimize: if true, encode the rank as a constant when possible.
Returns:
- An `Output` of type `int32`.
+ A `Tensor` of type `int32`.
"""
with ops.name_scope(name, "Rank", [input]) as name:
if isinstance(
@@ -320,7 +320,7 @@ def rank_internal(input, name=None, optimize=True):
def _SliceHelper(tensor, slice_spec, var=None):
- """Overload for Output.__getitem__.
+ """Overload for Tensor.__getitem__.
This operation extracts the specified region from the tensor.
The notation is similar to NumPy with the restriction that
@@ -357,7 +357,7 @@ def _SliceHelper(tensor, slice_spec, var=None):
Args:
tensor: An ops.Tensor object.
- slice_spec: The arguments to Output.__getitem__.
+ slice_spec: The arguments to Tensor.__getitem__.
var: In the case of variable slice assignment, the Variable
object to slice (i.e. tensor is the read-only view of this
variable).
@@ -472,13 +472,13 @@ def slice(input_, begin, size, name=None):
```
Args:
- input_: An `Output`.
- begin: An `int32` or `int64` `Output`.
- size: An `int32` or `int64` `Output`.
+ input_: A `Tensor`.
+ begin: An `int32` or `int64` `Tensor`.
+ size: An `int32` or `int64` `Tensor`.
name: A name for the operation (optional).
Returns:
- An `Output` the same type as `input`.
+ A `Tensor` the same type as `input`.
"""
return gen_array_ops._slice(input_, begin, size, name=name)
@@ -561,10 +561,10 @@ def strided_slice(input_,
```
Args:
- input_: An `Output`.
- begin: An `int32` or `int64` `Output`.
- end: An `int32` or `int64` `Output`.
- strides: An `int32` or `int64` `Output`.
+ input_: A `Tensor`.
+ begin: An `int32` or `int64` `Tensor`.
+ end: An `int32` or `int64` `Tensor`.
+ strides: An `int32` or `int64` `Tensor`.
begin_mask: An `int32` mask.
end_mask: An `int32` mask.
ellipsis_mask: An `int32` mask.
@@ -574,7 +574,7 @@ def strided_slice(input_,
name: A name for the operation (optional).
Returns:
- An `Output` the same type as `input`.
+ A `Tensor` the same type as `input`.
"""
op = gen_array_ops.strided_slice(
input=input_,
@@ -617,7 +617,7 @@ def _SliceHelperVar(var, slice_spec):
This allows creating a sub-tensor from part of the current contents
of a variable.
See
- [`Output.__getitem__`](../../api_docs/python/framework.md#Output.__getitem__)
+ [`Tensor.__getitem__`](../../api_docs/python/framework.md#Tensor.__getitem__)
for detailed examples of slicing.
This function in addition also allows assignment to a sliced range.
@@ -642,7 +642,7 @@ def _SliceHelperVar(var, slice_spec):
Args:
var: An `ops.Variable` object.
- slice_spec: The arguments to `Output.__getitem__`.
+ slice_spec: The arguments to `Tensor.__getitem__`.
Returns:
The appropriate slice of "tensor", based on "slice_spec".
@@ -686,13 +686,13 @@ def stack(values, axis=0, name="stack"):
tf.stack([x, y, z]) = np.asarray([x, y, z])
Args:
- values: A list of `Output` objects with the same shape and type.
+ values: A list of `Tensor` objects with the same shape and type.
axis: An `int`. The axis to stack along. Defaults to the first dimension.
Supports negative indexes.
name: A name for this operation (optional).
Returns:
- output: A stacked `Output` with the same type as `values`.
+ output: A stacked `Tensor` with the same type as `values`.
Raises:
ValueError: If `axis` is out of the range [-(R+1), R+1).
@@ -740,13 +740,13 @@ def pack(values, axis=0, name="pack"):
tf.pack([x, y, z]) = np.asarray([x, y, z])
Args:
- values: A list of `Output` objects with the same shape and type.
+ values: A list of `Tensor` objects with the same shape and type.
axis: An `int`. The axis to pack along. Defaults to the first dimension.
Supports negative indexes.
name: A name for this operation (optional).
Returns:
- output: A packed `Output` with the same type as `values`.
+ output: A packed `Tensor` with the same type as `values`.
Raises:
ValueError: If `axis` is out of the range [-(R+1), R+1).
@@ -764,7 +764,7 @@ def _autopacking_helper(list_or_tuple, dtype, name):
name: A name for the returned tensor.
Returns:
- A `tf.Output` with value equivalent to `list_or_tuple`.
+ A `tf.Tensor` with value equivalent to `list_or_tuple`.
"""
must_pack = False
converted_elems = []
@@ -805,7 +805,7 @@ def _get_dtype_from_nested_lists(list_or_tuple):
Args:
list_or_tuple: A list or tuple representing an object that can be
- converted to a `tf.Output`.
+ converted to a `tf.Tensor`.
Returns:
The dtype of any tensor-like object in `list_or_tuple`, or `None` if no
@@ -864,7 +864,7 @@ def unstack(value, num=None, axis=0, name="unstack"):
tf.unstack(x, n) = list(x)
Args:
- value: A rank `R > 0` `Output` to be unstacked.
+ value: A rank `R > 0` `Tensor` to be unstacked.
num: An `int`. The length of the dimension `axis`. Automatically inferred
if `None` (the default).
axis: An `int`. The axis to unstack along. Defaults to the first
@@ -872,7 +872,7 @@ def unstack(value, num=None, axis=0, name="unstack"):
name: A name for the operation (optional).
Returns:
- The list of `Output` objects unstacked from `value`.
+ The list of `Tensor` objects unstacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
@@ -915,7 +915,7 @@ def unpack(value, num=None, axis=0, name="unpack"):
tf.unpack(x, n) = list(x)
Args:
- value: A rank `R > 0` `Output` to be unpacked.
+ value: A rank `R > 0` `Tensor` to be unpacked.
num: An `int`. The length of the dimension `axis`. Automatically inferred
if `None` (the default).
axis: An `int`. The axis to unpack along. Defaults to the first
@@ -923,7 +923,7 @@ def unpack(value, num=None, axis=0, name="unpack"):
name: A name for the operation (optional).
Returns:
- The list of `Output` objects unpacked from `value`.
+ The list of `Tensor` objects unpacked from `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
@@ -979,12 +979,12 @@ def concat(concat_dim, values, name="concat"):
```
Args:
- concat_dim: 0-D `int32` `Output`. Dimension along which to concatenate.
- values: A list of `Output` objects or a single `Output`.
+ concat_dim: 0-D `int32` `Tensor`. Dimension along which to concatenate.
+ values: A list of `Tensor` objects or a single `Tensor`.
name: A name for the operation (optional).
Returns:
- An `Output` resulting from concatenation of the input tensors.
+ A `Tensor` resulting from concatenation of the input tensors.
"""
if not isinstance(values, (list, tuple)):
values = [values]
@@ -1144,14 +1144,14 @@ def split(split_dim, num_split, value, name="split"):
```
Args:
- split_dim: A 0-D `int32` `Output`. The dimension along which to split.
+ split_dim: A 0-D `int32` `Tensor`. The dimension along which to split.
Must be in the range `[0, rank(value))`.
num_split: A Python integer. The number of ways to split.
- value: The `Output` to split.
+ value: The `Tensor` to split.
name: A name for the operation (optional).
Returns:
- `num_split` `Output` objects resulting from splitting `value`.
+ `num_split` `Tensor` objects resulting from splitting `value`.
"""
return gen_array_ops._split(split_dim=split_dim,
num_split=num_split,
@@ -1185,20 +1185,20 @@ def split_v(value, size_splits, split_dim=0, num=None, name="split_v"):
```
Args:
- value: The `Output` to split.
+ value: The `Tensor` to split.
size_splits: Either an integer indicating the number of splits along
split_dim or a 1-D Tensor containing the sizes of each output tensor
along split_dim. If an integer then it must evenly divide
value.shape[split_dim]; otherwise the sum of sizes along the split
dimension must match that of the input.
- split_dim: A 0-D `int32` `Output`. The dimension along which to split.
+ split_dim: A 0-D `int32` `Tensor`. The dimension along which to split.
Must be in the range `[0, rank(value))`. Defaults to 0.
num: Optional, used to specify the number of outputs when it cannot be
inferred from the shape of size_splits.
name: A name for the operation (optional).
Returns:
- `len(size_splits)` `Output` objects resulting from splitting `value`.
+ `len(size_splits)` `Tensor` objects resulting from splitting `value`.
Raises:
ValueError: If `num` is unspecified and cannot be inferred.
@@ -1259,12 +1259,12 @@ def transpose(a, perm=None, name="transpose"):
```
Args:
- a: An `Output`.
+ a: A `Tensor`.
perm: A permutation of the dimensions of `a`.
name: A name for the operation (optional).
Returns:
- A transposed `Output`.
+ A transposed `Tensor`.
"""
with ops.name_scope(name, "transpose", [a]) as name:
if perm is None:
@@ -1301,11 +1301,11 @@ def matrix_transpose(a, name="matrix_transpose"):
```
Args:
- a: An `Output` with `rank >= 2`.
+ a: A `Tensor` with `rank >= 2`.
name: A name for the operation (optional).
Returns:
- A transposed batch matrix `Output`.
+ A transposed batch matrix `Tensor`.
Raises:
ValueError: If `a` is determined statically to have `rank < 2`.
@@ -1347,12 +1347,12 @@ def zeros(shape, dtype=dtypes.float32, name=None):
```
Args:
- shape: Either a list of integers, or a 1-D `Output` of type `int32`.
- dtype: The type of an element in the resulting `Output`.
+ shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
+ dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
- An `Output` with all elements set to zero.
+ A `Tensor` with all elements set to zero.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "zeros", [shape]) as name:
@@ -1382,15 +1382,15 @@ def zeros_like(tensor, dtype=None, name=None, optimize=True):
```
Args:
- tensor: An `Output`.
- dtype: A type for the returned `Output`. Must be `float32`, `float64`,
+ tensor: A `Tensor`.
+ dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, or `complex128`.
name: A name for the operation (optional).
optimize: if true, attempt to statically determine the shape of 'tensor'
and encode it as a constant.
Returns:
- An `Output` with all elements set to zero.
+ A `Tensor` with all elements set to zero.
"""
with ops.name_scope(name, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
@@ -1417,8 +1417,8 @@ def ones_like(tensor, dtype=None, name=None, optimize=True):
```
Args:
- tensor: An `Output`.
- dtype: A type for the returned `Output`. Must be `float32`, `float64`,
+ tensor: A `Tensor`.
+ dtype: A type for the returned `Tensor`. Must be `float32`, `float64`,
`int8`, `int16`, `int32`, `int64`, `uint8`, `complex64`, `complex128` or
`bool`.
name: A name for the operation (optional).
@@ -1426,7 +1426,7 @@ def ones_like(tensor, dtype=None, name=None, optimize=True):
and encode it as a constant.
Returns:
- An `Output` with all elements set to 1.
+ A `Tensor` with all elements set to 1.
"""
with ops.name_scope(name, "ones_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
@@ -1451,12 +1451,12 @@ def ones(shape, dtype=dtypes.float32, name=None):
```
Args:
- shape: Either a list of integers, or a 1-D `Output` of type `int32`.
- dtype: The type of an element in the resulting `Output`.
+ shape: Either a list of integers, or a 1-D `Tensor` of type `int32`.
+ dtype: The type of an element in the resulting `Tensor`.
name: A name for the operation (optional).
Returns:
- An `Output` with all elements set to 1.
+ A `Tensor` with all elements set to 1.
"""
dtype = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "ones", [shape]) as name:
@@ -1476,7 +1476,7 @@ def placeholder(dtype, shape=None, name=None):
**Important**: This tensor will produce an error if evaluated. Its value must
be fed using the `feed_dict` optional argument to `Session.run()`,
- `Output.eval()`, or `Operation.run()`.
+ `Tensor.eval()`, or `Operation.run()`.
For example:
@@ -1498,7 +1498,7 @@ def placeholder(dtype, shape=None, name=None):
name: A name for the operation (optional).
Returns:
- An `Output` that may be used as a handle for feeding a value, but not
+ A `Tensor` that may be used as a handle for feeding a value, but not
evaluated directly.
"""
shape = tensor_shape.as_shape(shape)
@@ -1516,7 +1516,7 @@ def placeholder(dtype, shape=None, name=None):
# pylint: disable=redefined-outer-name
def _normalize_sparse_shape(shape, name):
- """Takes numpy array or Tensor or None and returns either None or Output."""
+ """Takes numpy array or Tensor or None and returns either None or Tensor."""
if shape is None: return None
if not isinstance(shape, ops.Tensor):
for el in shape:
@@ -1530,7 +1530,7 @@ def sparse_placeholder(dtype, shape=None, name=None):
**Important**: This sparse tensor will produce an error if evaluated.
Its value must be fed using the `feed_dict` optional argument to
- `Session.run()`, `Output.eval()`, or `Operation.run()`.
+ `Session.run()`, `Tensor.eval()`, or `Operation.run()`.
For example:
@@ -1620,13 +1620,13 @@ def pad(tensor, paddings, mode="CONSTANT", name=None): # pylint: disable=invali
```
Args:
- tensor: An `Output`.
- paddings: An `Output` of type `int32`.
+ tensor: A `Tensor`.
+ paddings: A `Tensor` of type `int32`.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC" (case-insensitive)
name: A name for the operation (optional).
Returns:
- An `Output`. Has the same type as `tensor`.
+ A `Tensor`. Has the same type as `tensor`.
Raises:
ValueError: When mode is not one of "CONSTANT", "REFLECT", or "SYMMETRIC".
@@ -1683,12 +1683,12 @@ def meshgrid(*args, **kwargs):
```
Args:
- *args: `Output`s with rank 1
+ *args: `Tensor`s with rank 1
indexing: Either 'xy' or 'ij' (optional, default: 'xy')
name: A name for the operation (optional).
Returns:
- outputs: A list of N `Output`s with rank N
+ outputs: A list of N `Tensor`s with rank N
"""
indexing = kwargs.pop("indexing", "xy")
@@ -1841,7 +1841,7 @@ def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
name: A name for the operation (optional).
Returns:
- A dense `Output` with rank `R - 1`, where R is the rank of the
+ A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
@@ -2103,7 +2103,7 @@ def one_hot(indices, depth, on_value=None, off_value=None,
```
Args:
- indices: An `Output` of indices.
+ indices: A `Tensor` of indices.
depth: A scalar defining the depth of the one hot dimension.
on_value: A scalar defining the value to fill in output when `indices[j]
= i`. (default: 1)
@@ -2234,7 +2234,7 @@ def squeeze(input, axis=None, name=None, squeeze_dims=None):
```
Args:
- input: An `Output`. The `input` to squeeze.
+ input: A `Tensor`. The `input` to squeeze.
axis: An optional list of `ints`. Defaults to `[]`.
If specified, only squeezes the dimensions listed. The dimension
index starts at 0. It is an error to squeeze a dimension that is not 1.
@@ -2242,7 +2242,7 @@ def squeeze(input, axis=None, name=None, squeeze_dims=None):
squeeze_dims: Deprecated keyword argument that is now axis.
Returns:
- An `Output`. Has the same type as `input`.
+ A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
@@ -2285,7 +2285,7 @@ def where(condition, x=None, y=None, name=None):
`x` and `y`.
Args:
- condition: An `Output` of type `bool`
+ condition: A `Tensor` of type `bool`
x: A Tensor which may have the same shape as `condition`. If `condition` is
rank 1, `x` may have higher rank, but its first dimension must match the
size of `condition`.
@@ -2293,8 +2293,8 @@ def where(condition, x=None, y=None, name=None):
name: A name of the operation (optional)
Returns:
- An `Output` with the same type and shape as `x`, `y` if they are non-None.
- An `Output` with shape `(num_true, dim_size(condition))`.
+ A `Tensor` with the same type and shape as `x`, `y` if they are non-None.
+ A `Tensor` with shape `(num_true, dim_size(condition))`.
Raises:
ValueError: When exactly one of `x` or `y` is non-None.
diff --git a/tensorflow/python/ops/candidate_sampling_ops.py b/tensorflow/python/ops/candidate_sampling_ops.py
index 29594a2227..285c199b10 100644
--- a/tensorflow/python/ops/candidate_sampling_ops.py
+++ b/tensorflow/python/ops/candidate_sampling_ops.py
@@ -50,7 +50,7 @@ def uniform_candidate_sampler(true_classes, num_true, num_sampled, unique,
compute them approximately.
Args:
- true_classes: An `Output` of type `int64` and shape `[batch_size,
+ true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of classes to randomly sample per batch.
@@ -108,7 +108,7 @@ def log_uniform_candidate_sampler(true_classes, num_true, num_sampled, unique,
compute them approximately.
Args:
- true_classes: An `Output` of type `int64` and shape `[batch_size,
+ true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of classes to randomly sample per batch.
@@ -163,7 +163,7 @@ def learned_unigram_candidate_sampler(true_classes, num_true, num_sampled,
compute them approximately.
Args:
- true_classes: An `Output` of type `int64` and shape `[batch_size,
+ true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of classes to randomly sample per batch.
@@ -227,7 +227,7 @@ def fixed_unigram_candidate_sampler(true_classes,
compute them approximately.
Args:
- true_classes: An `Output` of type `int64` and shape `[batch_size,
+ true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of classes to randomly sample per batch.
@@ -290,7 +290,7 @@ def all_candidate_sampler(true_classes, num_true, num_sampled, unique,
well use full softmax or full logistic regression.
Args:
- true_classes: An `Output` of type `int64` and shape `[batch_size,
+ true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_true: An `int`. The number of target classes per training example.
num_sampled: An `int`. The number of possible classes.
@@ -341,7 +341,7 @@ def compute_accidental_hits(true_classes, sampled_candidates, num_true,
target classes as noise classes for the same example.
Args:
- true_classes: An `Output` of type `int64` and shape `[batch_size,
+ true_classes: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
sampled_candidates: A tensor of type `int64` and shape `[num_sampled]`.
The sampled_candidates output of CandidateSampler.
@@ -350,11 +350,11 @@ def compute_accidental_hits(true_classes, sampled_candidates, num_true,
name: A name for the operation (optional).
Returns:
- indices: An `Output` of type `int32` and shape `[num_accidental_hits]`.
+ indices: A `Tensor` of type `int32` and shape `[num_accidental_hits]`.
Values indicate rows in `true_classes`.
- ids: An `Output` of type `int64` and shape `[num_accidental_hits]`.
+ ids: A `Tensor` of type `int64` and shape `[num_accidental_hits]`.
Values indicate positions in `sampled_candidates`.
- weights: An `Output` of type `float` and shape `[num_accidental_hits]`.
+ weights: A `Tensor` of type `float` and shape `[num_accidental_hits]`.
Each value is `-FLOAT_MAX`.
"""
diff --git a/tensorflow/python/ops/check_ops.py b/tensorflow/python/ops/check_ops.py
index 0cffdd3899..54bd79fefd 100644
--- a/tensorflow/python/ops/check_ops.py
+++ b/tensorflow/python/ops/check_ops.py
@@ -78,15 +78,15 @@ __all__ = [
def assert_proper_iterable(values):
"""Static assert that values is a "proper" iterable.
- `Ops` that expect iterables of `Output` can call this to validate input.
- Useful since `Output`, `ndarray`, byte/text type are all iterables themselves.
+ `Ops` that expect iterables of `Tensor` can call this to validate input.
+ Useful since `Tensor`, `ndarray`, byte/text type are all iterables themselves.
Args:
values: Object to be checked.
Raises:
TypeError: If `values` is not iterable or is one of
- `Output`, `SparseTensor`, `np.array`, `tf.compat.bytes_or_text_types`.
+ `Tensor`, `SparseTensor`, `np.array`, `tf.compat.bytes_or_text_types`.
"""
unintentional_iterables = (
(ops.Tensor, sparse_tensor.SparseTensor, np.ndarray)
@@ -122,7 +122,7 @@ def assert_negative(x, data=None, summarize=None, message=None, name=None):
If `x` is empty this is trivially satisfied.
Args:
- x: Numeric `Output`.
+ x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
@@ -162,7 +162,7 @@ def assert_positive(x, data=None, summarize=None, message=None, name=None):
If `x` is empty this is trivially satisfied.
Args:
- x: Numeric `Output`.
+ x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
@@ -202,7 +202,7 @@ def assert_non_negative(x, data=None, summarize=None, message=None, name=None):
If `x` is empty this is trivially satisfied.
Args:
- x: Numeric `Output`.
+ x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
@@ -244,7 +244,7 @@ def assert_non_positive(x, data=None, summarize=None, message=None, name=None):
If `x` is empty this is trivially satisfied.
Args:
- x: Numeric `Output`.
+ x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
@@ -287,8 +287,8 @@ def assert_equal(x, y, data=None, summarize=None, message=None, name=None):
If both `x` and `y` are empty, this is trivially satisfied.
Args:
- x: Numeric `Output`.
- y: Numeric `Output`, same dtype as and broadcastable to `x`.
+ x: Numeric `Tensor`.
+ y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
@@ -333,8 +333,8 @@ def assert_less(x, y, data=None, summarize=None, message=None, name=None):
If both `x` and `y` are empty, this is trivially satisfied.
Args:
- x: Numeric `Output`.
- y: Numeric `Output`, same dtype as and broadcastable to `x`.
+ x: Numeric `Tensor`.
+ y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
@@ -379,8 +379,8 @@ def assert_less_equal(x, y, data=None, summarize=None, message=None, name=None):
If both `x` and `y` are empty, this is trivially satisfied.
Args:
- x: Numeric `Output`.
- y: Numeric `Output`, same dtype as and broadcastable to `x`.
+ x: Numeric `Tensor`.
+ y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
@@ -425,8 +425,8 @@ def assert_greater(x, y, data=None, summarize=None, message=None, name=None):
If both `x` and `y` are empty, this is trivially satisfied.
Args:
- x: Numeric `Output`.
- y: Numeric `Output`, same dtype as and broadcastable to `x`.
+ x: Numeric `Tensor`.
+ y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
@@ -472,8 +472,8 @@ def assert_greater_equal(x, y, data=None, summarize=None, message=None,
If both `x` and `y` are empty, this is trivially satisfied.
Args:
- x: Numeric `Output`.
- y: Numeric `Output`, same dtype as and broadcastable to `x`.
+ x: Numeric `Tensor`.
+ y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
@@ -503,8 +503,8 @@ def _assert_rank_condition(
"""Assert `x` has a rank that satisfies a given condition.
Args:
- x: Numeric `Output`.
- rank: Scalar `Output`.
+ x: Numeric `Tensor`.
+ rank: Scalar `Tensor`.
static_condition: A python function that takes `[actual_rank, given_rank]`
and returns `True` if the condition is satisfied, `False` otherwise.
dynamic_condition: An `op` that takes [actual_rank, given_rank]
@@ -564,8 +564,8 @@ def assert_rank(x, rank, data=None, summarize=None, message=None, name=None):
```
Args:
- x: Numeric `Output`.
- rank: Scalar integer `Output`.
+ x: Numeric `Tensor`.
+ rank: Scalar integer `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
@@ -627,8 +627,8 @@ def assert_rank_at_least(
```
Args:
- x: Numeric `Output`.
- rank: Scalar `Output`.
+ x: Numeric `Tensor`.
+ rank: Scalar `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
@@ -689,7 +689,7 @@ def assert_integer(x, message=None, name=None):
```
Args:
- x: `Output` whose basetype is integer and is not quantized.
+ x: `Tensor` whose basetype is integer and is not quantized.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_integer".
@@ -712,10 +712,10 @@ def assert_integer(x, message=None, name=None):
def assert_type(tensor, tf_type, message=None, name=None):
- """Statically asserts that the given `Output` is of the specified type.
+ """Statically asserts that the given `Tensor` is of the specified type.
Args:
- tensor: A tensorflow `Output`.
+ tensor: A tensorflow `Tensor`.
tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,
etc).
message: A string to prefix to the default message.
@@ -767,11 +767,11 @@ def is_non_decreasing(x, name=None):
See also: `is_strictly_increasing`
Args:
- x: Numeric `Output`.
+ x: Numeric `Tensor`.
name: A name for this operation (optional). Defaults to "is_non_decreasing"
Returns:
- Boolean `Output`, equal to `True` iff `x` is non-decreasing.
+ Boolean `Tensor`, equal to `True` iff `x` is non-decreasing.
Raises:
TypeError: if `x` is not a numeric tensor.
@@ -793,12 +793,12 @@ def is_strictly_increasing(x, name=None):
See also: `is_non_decreasing`
Args:
- x: Numeric `Output`.
+ x: Numeric `Tensor`.
name: A name for this operation (optional).
Defaults to "is_strictly_increasing"
Returns:
- Boolean `Output`, equal to `True` iff `x` is strictly increasing.
+ Boolean `Tensor`, equal to `True` iff `x` is strictly increasing.
Raises:
TypeError: if `x` is not a numeric tensor.
diff --git a/tensorflow/python/ops/clip_ops.py b/tensorflow/python/ops/clip_ops.py
index 85bbe38272..65e05626f0 100644
--- a/tensorflow/python/ops/clip_ops.py
+++ b/tensorflow/python/ops/clip_ops.py
@@ -40,13 +40,13 @@ def clip_by_value(t, clip_value_min, clip_value_max,
greater than `clip_value_max` are set to `clip_value_max`.
Args:
- t: An `Output`.
- clip_value_min: A 0-D (scalar) `Output`. The minimum value to clip by.
- clip_value_max: A 0-D (scalar) `Output`. The maximum value to clip by.
+ t: A `Tensor`.
+ clip_value_min: A 0-D (scalar) `Tensor`. The minimum value to clip by.
+ clip_value_max: A 0-D (scalar) `Tensor`. The maximum value to clip by.
name: A name for the operation (optional).
Returns:
- A clipped `Output`.
+ A clipped `Tensor`.
"""
with ops.name_scope(name, "clip_by_value",
[t, clip_value_min, clip_value_max]) as name:
@@ -82,15 +82,15 @@ def clip_by_norm(t, clip_norm, axes=None, name=None):
an optimizer.
Args:
- t: An `Output`.
- clip_norm: A 0-D (scalar) `Output` > 0. A maximum clipping value.
- axes: A 1-D (vector) `Output` of type int32 containing the dimensions
+ t: A `Tensor`.
+ clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.
+ axes: A 1-D (vector) `Tensor` of type int32 containing the dimensions
to use for computing the L2-norm. If `None` (the default), uses all
dimensions.
name: A name for the operation (optional).
Returns:
- A clipped `Output`.
+ A clipped `Tensor`.
"""
with ops.name_scope(name, "clip_by_norm", [t, clip_norm]) as name:
t = ops.convert_to_tensor(t, name="t")
@@ -116,11 +116,11 @@ def global_norm(t_list, name=None):
Any entries in `t_list` that are of type None are ignored.
Args:
- t_list: A tuple or list of mixed `Output`s, `IndexedSlices`, or None.
+ t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.
name: A name for the operation (optional).
Returns:
- A 0-D (scalar) `Output` of type `float`.
+ A 0-D (scalar) `Tensor` of type `float`.
Raises:
TypeError: If `t_list` is not a sequence.
@@ -181,15 +181,15 @@ def clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None):
ready before the clipping operation can be performed.
Args:
- t_list: A tuple or list of mixed `Output`s, `IndexedSlices`, or None.
- clip_norm: A 0-D (scalar) `Output` > 0. The clipping ratio.
- use_norm: A 0-D (scalar) `Output` of type `float` (optional). The global
+ t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.
+ clip_norm: A 0-D (scalar) `Tensor` > 0. The clipping ratio.
+ use_norm: A 0-D (scalar) `Tensor` of type `float` (optional). The global
norm to use. If not provided, `global_norm()` is used to compute the norm.
name: A name for the operation (optional).
Returns:
- list_clipped: A list of `Output`s of the same type as `list_t`.
- global_norm: A 0-D (scalar) `Output` representing the global norm.
+ list_clipped: A list of `Tensors` of the same type as `list_t`.
+ global_norm: A 0-D (scalar) `Tensor` representing the global norm.
Raises:
TypeError: If `t_list` is not a sequence.
@@ -251,12 +251,12 @@ def clip_by_average_norm(t, clip_norm, name=None):
an optimizer.
Args:
- t: An `Output`.
- clip_norm: A 0-D (scalar) `Output` > 0. A maximum clipping value.
+ t: A `Tensor`.
+ clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.
name: A name for the operation (optional).
Returns:
- A clipped `Output`.
+ A clipped `Tensor`.
"""
with ops.name_scope(name, "clip_by_average_norm", [t, clip_norm]) as name:
t = ops.convert_to_tensor(t, name="t")
diff --git a/tensorflow/python/ops/control_flow_ops.py b/tensorflow/python/ops/control_flow_ops.py
index 1a00004a8b..25a6c44869 100644
--- a/tensorflow/python/ops/control_flow_ops.py
+++ b/tensorflow/python/ops/control_flow_ops.py
@@ -297,7 +297,7 @@ def switch(data, pred, dtype=None, name=None):
If `pred` is true, the `data` input is forwared to the first output.
Otherwise, the data goes to the second output.
- This op handles `Output`s and `IndexedSlices`.
+ This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
@@ -345,7 +345,7 @@ def _SwitchRefOrTensor(data, pred, name="Switch"):
If `pred` is true, the `data` input is forwared to the first output.
Otherwise, the data goes to the second output.
- This op handles `Output`s and `IndexedSlices`.
+ This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
@@ -393,8 +393,8 @@ def merge(inputs, name=None):
It is an error if more than one tensor in `inputs` is available. If no tensor
in `inputs` is available, the returned tensor and index are not set.
- This op handles both `Output`s and `IndexedSlices`. If inputs has a mix of
- `Output`s and `IndexedSlices`, all inputs are converted to IndexedSlices
+ This op handles both `Tensor`s and `IndexedSlices`. If inputs has a mix of
+ `Tensor`s and `IndexedSlices`, all inputs are converted to IndexedSlices
before merging.
Args:
@@ -2534,7 +2534,7 @@ def while_loop(cond, body, loop_vars, shape_invariants=None,
`loop_vars` is the same in every iteration. The `shape_invariants` argument
allows the caller to specify a less specific shape invariant for each loop
variable, which is needed if the shape varies between iterations. The
- [`Output.set_shape()`](../../api_docs/python/framework.md#Output.set_shape)
+ [`Tensor.set_shape()`](../../api_docs/python/framework.md#Tensor.set_shape)
function may also be used in the `body` function to indicate that
the output loop variable has a particular shape. The shape invariant for
SparseTensor and IndexedSlices are treated specially as follows:
@@ -2568,7 +2568,7 @@ def while_loop(cond, body, loop_vars, shape_invariants=None,
cond: A callable that represents the termination condition of the loop.
body: A callable that represents the loop body.
loop_vars: A (possibly nested) tuple, namedtuple or list of numpy array,
- `Output`, and `TensorArray` objects.
+ `Tensor`, and `TensorArray` objects.
shape_invariants: The shape invariants for the loop variables.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer.
@@ -2690,14 +2690,14 @@ def with_dependencies(dependencies, output_tensor, name=None):
Args:
dependencies: A list of operations to run before this op finishes.
- output_tensor: An `Output` or `IndexedSlices` that will be returned.
+ output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
name: (Optional) A name for this operation.
Returns:
Same as `output_tensor`.
Raises:
- TypeError: if `output_tensor` is not an `Output` or `IndexedSlices`.
+ TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
"""
with ops.name_scope(name, "control_dependency",
dependencies + [output_tensor]) as name:
@@ -2794,7 +2794,7 @@ def tuple(tensors, name=None, control_inputs=None):
See also `group` and `with_dependencies`.
Args:
- tensors: A list of `Output`s or `IndexedSlices`, some entries can be `None`.
+ tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`.
name: (optional) A name to use as a `name_scope` for the operation.
control_inputs: List of additional ops to finish before returning.
@@ -2802,8 +2802,8 @@ def tuple(tensors, name=None, control_inputs=None):
Same as `tensors`.
Raises:
- ValueError: If `tensors` does not contain any `Output` or `IndexedSlices`.
- TypeError: If `control_inputs` is not a list of `Operation` or `Output`
+ ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`.
+ TypeError: If `control_inputs` is not a list of `Operation` or `Tensor`
objects.
"""
diff --git a/tensorflow/python/ops/ctc_ops.py b/tensorflow/python/ops/ctc_ops.py
index d7f13dbfb2..ea664e063c 100644
--- a/tensorflow/python/ops/ctc_ops.py
+++ b/tensorflow/python/ops/ctc_ops.py
@@ -97,10 +97,10 @@ def ctc_loss(inputs, labels, sequence_length,
Untested. Very likely will not learn to output repeated classes.
Args:
- inputs: 3-D `float` `Output`.
- If time_major == False, this will be an `Output` shaped:
+ inputs: 3-D `float` `Tensor`.
+ If time_major == False, this will be a `Tensor` shaped:
`[batch_size x max_time x num_classes]`.
- If time_major == True (default), this will be an `Output` shaped:
+ If time_major == True (default), this will be a `Tensor` shaped:
`[max_time x batch_size x num_classes]`.
The logits.
labels: An `int32` `SparseTensor`.
@@ -114,18 +114,15 @@ def ctc_loss(inputs, labels, sequence_length,
If True, repeated labels are collapsed prior to the CTC calculation.
ctc_merge_repeated: Boolean. Default: True.
time_major: The shape format of the `inputs` Tensors.
- If True, these `Output`s must be shaped
- `[max_time, batch_size, num_classes]`.
- If False, these `Output`s must be shaped
- `[batch_size, max_time, num_classes]`.
- Using `time_major = True` (default) is a bit more efficient because it
- avoids transposes at the beginning of the ctc_loss calculation. However,
- most TensorFlow data is batch-major, so by this function also accepts
- inputs in batch-major form.
+ If True, these `Tensors` must be shaped `[max_time, batch_size, num_classes]`.
+ If False, these `Tensors` must be shaped `[batch_size, max_time, num_classes]`.
+ Using `time_major = True` (default) is a bit more efficient because it avoids
+ transposes at the beginning of the ctc_loss calculation. However, most
+ TensorFlow data is batch-major, so by this function also accepts inputs
+ in batch-major form.
Returns:
- A 1-D `float` `Output`, size `[batch]`, containing the negative log
- probabilities.
+ A 1-D `float` `Tensor`, size `[batch]`, containing the negative log probabilities.
Raises:
TypeError: if labels is not a `SparseTensor`.
@@ -185,7 +182,7 @@ def ctc_greedy_decoder(inputs, sequence_length, merge_repeated=True):
* `A B B B B B` if `merge_repeated=False`.
Args:
- inputs: 3-D `float` `Output` sized
+ inputs: 3-D `float` `Tensor` sized
`[max_time x batch_size x num_classes]`. The logits.
sequence_length: 1-D `int32` vector containing sequence lengths,
having size `[batch_size]`.
@@ -228,7 +225,7 @@ def ctc_beam_search_decoder(inputs, sequence_length, beam_width=100,
* `A B B B B` if `merge_repeated = False`.
Args:
- inputs: 3-D `float` `Output`, size
+ inputs: 3-D `float` `Tensor`, size
`[max_time x batch_size x num_classes]`. The logits.
sequence_length: 1-D `int32` vector containing sequence lengths,
having size `[batch_size]`.
diff --git a/tensorflow/python/ops/data_flow_ops.py b/tensorflow/python/ops/data_flow_ops.py
index e1139244ba..718355422b 100644
--- a/tensorflow/python/ops/data_flow_ops.py
+++ b/tensorflow/python/ops/data_flow_ops.py
@@ -236,7 +236,7 @@ class QueueBase(object):
return self._names
def _check_enqueue_dtypes(self, vals):
- """Validate and convert `vals` to a list of `Output`s.
+ """Validate and convert `vals` to a list of `Tensor`s.
The `vals` argument can be a Tensor, a list or tuple of tensors, or a
dictionary with tensor values.
@@ -250,7 +250,7 @@ class QueueBase(object):
vals: A tensor, a list or tuple of tensors, or a dictionary..
Returns:
- A list of `Output` objects.
+ A list of `Tensor` objects.
Raises:
ValueError: If `vals` is invalid.
@@ -445,7 +445,7 @@ class QueueBase(object):
`tf.errors.CancelledError` will be raised.
Args:
- n: A scalar `Output` containing the number of elements to dequeue.
+ n: A scalar `Tensor` containing the number of elements to dequeue.
name: A name for the operation (optional).
Returns:
@@ -486,7 +486,7 @@ class QueueBase(object):
Otherwise the behavior is identical to `dequeue_many`.
Args:
- n: A scalar `Output` containing the number of elements to dequeue.
+ n: A scalar `Tensor` containing the number of elements to dequeue.
name: A name for the operation (optional).
Returns:
diff --git a/tensorflow/python/ops/embedding_ops.py b/tensorflow/python/ops/embedding_ops.py
index ee345cbd08..a15fa61069 100644
--- a/tensorflow/python/ops/embedding_ops.py
+++ b/tensorflow/python/ops/embedding_ops.py
@@ -67,8 +67,8 @@ def embedding_lookup(params, ids, partition_strategy="mod", name=None,
along dimension 0. Alternatively, a `PartitionedVariable`, created by
partitioning along dimension 0. Each element must be appropriately sized
for the given `partition_strategy`.
- ids: An `Output` with type `int32` or `int64` containing the ids to be
- looked up in `params`.
+ ids: A `Tensor` with type `int32` or `int64` containing the ids to be looked
+ up in `params`.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`.
@@ -78,7 +78,7 @@ def embedding_lookup(params, ids, partition_strategy="mod", name=None,
max_norm.
Returns:
- An `Output` with the same type as the tensors in `params`.
+ A `Tensor` with the same type as the tensors in `params`.
Raises:
ValueError: If `params` is empty.
diff --git a/tensorflow/python/ops/gradients_impl.py b/tensorflow/python/ops/gradients_impl.py
index 2e02f545e1..4899eadccb 100644
--- a/tensorflow/python/ops/gradients_impl.py
+++ b/tensorflow/python/ops/gradients_impl.py
@@ -326,13 +326,13 @@ def gradients(ys,
aggregation_method=None):
"""Constructs symbolic partial derivatives of sum of `ys` w.r.t. x in `xs`.
- `ys` and `xs` are each an `Output` or a list of tensors. `grad_ys`
- is a list of `Output`, holding the gradients received by the
+ `ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
+ is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the partial
derivatives of `ys` with respect to `xs`. It returns a list of
- `Output` of length `len(xs)` where each tensor is the `sum(dy/dx)`
+ `Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`
for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
@@ -344,9 +344,9 @@ def gradients(ys,
each y).
Args:
- ys: An `Output` or list of tensors to be differentiated.
- xs: An `Output` or list of tensors to be used for differentiation.
- grad_ys: Optional. An `Output` or list of tensors the same size as
+ ys: A `Tensor` or list of tensors to be differentiated.
+ xs: A `Tensor` or list of tensors to be used for differentiation.
+ grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
@@ -836,7 +836,7 @@ def hessians(ys, xs, name="hessians", colocate_gradients_with_ops=False,
"""Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
`hessians()` adds ops to the graph to output the Hessian matrix of `ys`
- with respect to `xs`. It returns a list of `Output` of length `len(xs)`
+ with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`
where each tensor is the Hessian of `sum(ys)`. This function currently
only supports evaluating the Hessian with respect to (a list of) one-
dimensional tensors.
@@ -845,8 +845,8 @@ def hessians(ys, xs, name="hessians", colocate_gradients_with_ops=False,
tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
Args:
- ys: An `Output` or list of tensors to be differentiated.
- xs: An `Output` or list of tensors to be used for differentiation.
+ ys: A `Tensor` or list of tensors to be differentiated.
+ xs: A `Tensor` or list of tensors to be used for differentiation.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'hessians'.
colocate_gradients_with_ops: See `gradients()` documentation for details.
diff --git a/tensorflow/python/ops/histogram_ops.py b/tensorflow/python/ops/histogram_ops.py
index 8936ec8c8c..5e1f322b2c 100644
--- a/tensorflow/python/ops/histogram_ops.py
+++ b/tensorflow/python/ops/histogram_ops.py
@@ -41,8 +41,8 @@ def histogram_fixed_width(values,
equal width and determined by the arguments `value_range` and `nbins`.
Args:
- values: Numeric `Output`.
- value_range: Shape [2] `Output`. new_values <= value_range[0] will be
+ values: Numeric `Tensor`.
+ value_range: Shape [2] `Tensor`. new_values <= value_range[0] will be
mapped to hist[0], values >= value_range[1] will be mapped to hist[-1].
Must be same dtype as new_values.
nbins: Scalar `int32 Tensor`. Number of histogram bins.
@@ -50,7 +50,7 @@ def histogram_fixed_width(values,
name: A name for this operation (defaults to 'histogram_fixed_width').
Returns:
- A 1-D `Output` holding histogram of values.
+ A 1-D `Tensor` holding histogram of values.
Examples:
diff --git a/tensorflow/python/ops/image_ops_impl.py b/tensorflow/python/ops/image_ops_impl.py
index 3090327d95..85b91b3bfe 100644
--- a/tensorflow/python/ops/image_ops_impl.py
+++ b/tensorflow/python/ops/image_ops_impl.py
@@ -83,7 +83,7 @@ def _is_tensor(x):
x: A python object to check.
Returns:
- `True` if `x` is a `tf.Output` or `tf.Variable`, otherwise `False`.
+ `True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`.
"""
return isinstance(x, (ops.Tensor, variables.Variable))
diff --git a/tensorflow/python/ops/init_ops.py b/tensorflow/python/ops/init_ops.py
index bbe9977d4b..849e88bf28 100644
--- a/tensorflow/python/ops/init_ops.py
+++ b/tensorflow/python/ops/init_ops.py
@@ -20,14 +20,14 @@ following signature:
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
Args:
- shape: List of `int` representing the shape of the output `Output`. Some
- initializers may also be able to accept an `Output`.
- dtype: (Optional) Type of the output `Output`.
+ shape: List of `int` representing the shape of the output `Tensor`. Some
+ initializers may also be able to accept a `Tensor`.
+ dtype: (Optional) Type of the output `Tensor`.
partition_info: (Optional) variable_scope._PartitionInfo object holding
additional information about how the variable is partitioned. May be
`None` if the variable is not partitioned.
Returns:
- An `Output` of type `dtype` and `shape`.
+ A `Tensor` of type `dtype` and `shape`.
"""
from __future__ import absolute_import
from __future__ import division
diff --git a/tensorflow/python/ops/linalg_ops.py b/tensorflow/python/ops/linalg_ops.py
index caaa5f8098..f894cb3766 100644
--- a/tensorflow/python/ops/linalg_ops.py
+++ b/tensorflow/python/ops/linalg_ops.py
@@ -52,12 +52,12 @@ def cholesky_solve(chol, rhs, name=None):
```
Args:
- chol: An `Output`. Must be `float32` or `float64`, shape is `[..., M, M]`.
+ chol: A `Tensor`. Must be `float32` or `float64`, shape is `[..., M, M]`.
Cholesky factorization of `A`, e.g. `chol = tf.cholesky(A)`.
For that reason, only the lower triangular parts (including the diagonal)
of the last two dimensions of `chol` are used. The strictly upper part is
assumed to be zero and not accessed.
- rhs: An `Output`, same type as `chol`, shape is `[..., M, K]`.
+ rhs: A `Tensor`, same type as `chol`, shape is `[..., M, K]`.
name: A name to give this `Op`. Defaults to `cholesky_solve`.
Returns:
@@ -99,17 +99,17 @@ def eye(
```
Args:
- num_rows: Non-negative `int32` scalar `Output` giving the number of rows
+ num_rows: Non-negative `int32` scalar `Tensor` giving the number of rows
in each batch matrix.
- num_columns: Optional non-negative `int32` scalar `Output` giving the number
+ num_columns: Optional non-negative `int32` scalar `Tensor` giving the number
of columns in each batch matrix. Defaults to `num_rows`.
- batch_shape: `int32` `Output`. If provided, returned `Output` will have
+ batch_shape: `int32` `Tensor`. If provided, returned `Tensor` will have
leading batch dimensions of this shape.
- dtype: The type of an element in the resulting `Output`
+ dtype: The type of an element in the resulting `Tensor`
name: A name for this `Op`. Defaults to "eye".
Returns:
- An `Output` of shape `batch_shape + [num_rows, num_columns]`
+ A `Tensor` of shape `batch_shape + [num_rows, num_columns]`
"""
with ops.name_scope(
name, default_name="eye", values=[num_rows, num_columns, batch_shape]):
@@ -139,7 +139,7 @@ def matrix_solve_ls(matrix, rhs, l2_regularizer=0.0, fast=True, name=None):
`matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
form `M`-by-`N` matrices. Rhs is a tensor of shape `[..., M, K]` whose
inner-most 2 dimensions form `M`-by-`K` matrices. The computed output is a
- `Output` of shape `[..., N, K]` whose inner-most 2 dimensions form `M`-by-`K`
+ `Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form `M`-by-`K`
matrices that solve the equations
`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least squares
sense.
@@ -172,14 +172,14 @@ def matrix_solve_ls(matrix, rhs, l2_regularizer=0.0, fast=True, name=None):
`l2_regularizer` is ignored.
Args:
- matrix: `Output` of shape `[..., M, N]`.
- rhs: `Output` of shape `[..., M, K]`.
- l2_regularizer: 0-D `double` `Output`. Ignored if `fast=False`.
+ matrix: `Tensor` of shape `[..., M, N]`.
+ rhs: `Tensor` of shape `[..., M, K]`.
+ l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`.
fast: bool. Defaults to `True`.
name: string, optional name of the operation.
Returns:
- output: `Output` of shape `[..., N, K]` whose inner-most 2 dimensions form
+ output: `Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form
`M`-by-`K` matrices that solve the equations
`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least
squares sense.
@@ -197,7 +197,7 @@ def self_adjoint_eig(tensor, name=None):
`tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i]`, for i=0...N-1.
Args:
- tensor: `Output` of shape `[..., N, N]`. Only the lower triangular part of
+ tensor: `Tensor` of shape `[..., N, N]`. Only the lower triangular part of
each inner inner matrix is referenced.
name: string, optional name of the operation.
@@ -215,7 +215,7 @@ def self_adjoint_eigvals(tensor, name=None):
"""Computes the eigenvalues of one or more self-adjoint matrices.
Args:
- tensor: `Output` of shape `[..., N, N]`.
+ tensor: `Tensor` of shape `[..., N, N]`.
name: string, optional name of the operation.
Returns:
@@ -244,7 +244,7 @@ def svd(tensor, full_matrices=False, compute_uv=True, name=None):
```
Args:
- matrix: `Output` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
+ matrix: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
`N`.
full_matrices: If true, compute full-sized `u` and `v`. If false
(the default), compute only the leading `P` singular vectors.
diff --git a/tensorflow/python/ops/logging_ops.py b/tensorflow/python/ops/logging_ops.py
index 93306de163..038bb1e81b 100644
--- a/tensorflow/python/ops/logging_ops.py
+++ b/tensorflow/python/ops/logging_ops.py
@@ -80,15 +80,15 @@ def histogram_summary(tag, values, collections=None, name=None):
This op reports an `InvalidArgument` error if any value is not finite.
Args:
- tag: A `string` `Output`. 0-D. Tag to use for the summary value.
- values: A real numeric `Output`. Any shape. Values to use to
+ tag: A `string` `Tensor`. 0-D. Tag to use for the summary value.
+ values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
- A scalar `Output` of type `string`. The serialized `Summary` protocol
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "HistogramSummary", [tag, values]) as scope:
@@ -121,7 +121,7 @@ def image_summary(tag, tensor, max_images=3, collections=None, name=None):
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
- The `tag` argument is a scalar `Output` of type `string`. It is used to
+ The `tag` argument is a scalar `Tensor` of type `string`. It is used to
build the `tag` of the summary values:
* If `max_images` is 1, the summary value tag is '*tag*/image'.
@@ -129,9 +129,9 @@ def image_summary(tag, tensor, max_images=3, collections=None, name=None):
generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
Args:
- tag: A scalar `Output` of type `string`. Used to build the `tag`
+ tag: A scalar `Tensor` of type `string`. Used to build the `tag`
of the summary values.
- tensor: A 4-D `uint8` or `float32` `Output` of shape `[batch_size, height,
+ tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_images: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
@@ -139,7 +139,7 @@ def image_summary(tag, tensor, max_images=3, collections=None, name=None):
name: A name for the operation (optional).
Returns:
- A scalar `Output` of type `string`. The serialized `Summary` protocol
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "ImageSummary", [tag, tensor]) as scope:
@@ -163,7 +163,7 @@ def audio_summary(tag,
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
`sample_rate`.
- The `tag` argument is a scalar `Output` of type `string`. It is used to
+ The `tag` argument is a scalar `Tensor` of type `string`. It is used to
build the `tag` of the summary values:
* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
@@ -171,11 +171,11 @@ def audio_summary(tag,
generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
Args:
- tag: A scalar `Output` of type `string`. Used to build the `tag`
+ tag: A scalar `Tensor` of type `string`. Used to build the `tag`
of the summary values.
- tensor: A 3-D `float32` `Output` of shape `[batch_size, frames, channels]`
- or a 2-D `float32` `Output` of shape `[batch_size, frames]`.
- sample_rate: A Scalar `float32` `Output` indicating the sample rate of the
+ tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
+ or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
+ sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
@@ -183,7 +183,7 @@ def audio_summary(tag,
name: A name for the operation (optional).
Returns:
- A scalar `Output` of type `string`. The serialized `Summary` protocol
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "AudioSummary", [tag, tensor]) as scope:
@@ -211,14 +211,14 @@ def merge_summary(inputs, collections=None, name=None):
in the summaries to merge use the same tag.
Args:
- inputs: A list of `string` `Output` objects containing serialized `Summary`
+ inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
- A scalar `Output` of type `string`. The serialized `Summary` protocol
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
"""
with ops.name_scope(name, "MergeSummary", inputs):
@@ -236,7 +236,7 @@ def merge_all_summaries(key=ops.GraphKeys.SUMMARIES):
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
- `Output` of type `string` containing the serialized `Summary` protocol
+ `Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_ops = ops.get_collection(key)
@@ -254,7 +254,7 @@ def get_summary_op():
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
- `Output` of type `string` containing the serialized `Summary` protocol
+ `Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_op = ops.get_collection(ops.GraphKeys.SUMMARY_OP)
@@ -277,14 +277,14 @@ def scalar_summary(tags, values, collections=None, name=None):
summary has a summary value for each tag-value pair in `tags` and `values`.
Args:
- tags: A `string` `Output`. Tags for the summaries.
+ tags: A `string` `Tensor`. Tags for the summaries.
values: A real numeric Tensor. Values for the summaries.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
- A scalar `Output` of type `string`. The serialized `Summary` protocol
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "ScalarSummary", [tags, values]) as scope:
diff --git a/tensorflow/python/ops/math_ops.py b/tensorflow/python/ops/math_ops.py
index d9a723a163..c2aab4c945 100644
--- a/tensorflow/python/ops/math_ops.py
+++ b/tensorflow/python/ops/math_ops.py
@@ -280,12 +280,12 @@ def abs(x, name=None):
number.
Args:
- x: An `Output` or `SparseTensor` of type `float32`, `float64`, `int32`, or
+ x: A `Tensor` or `SparseTensor` of type `float32`, `float64`, `int32`, or
`int64`.
name: A name for the operation (optional).
Returns:
- An `Output` or `SparseTensor` the same size and type as `x` with absolute
+ A `Tensor` or `SparseTensor` the same size and type as `x` with absolute
values.
"""
with ops.name_scope(name, "Abs", [x]) as name:
@@ -323,12 +323,12 @@ def neg(x, name=None):
I.e., \\(y = -x\\).
Args:
- x: An `Output` or `SparseTensor`. Must be one of the following types:
- `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
+ x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
+ `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
- An `Output` or `SparseTensor`, respectively. Has the same type as `x`.
+ A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Neg", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
@@ -347,12 +347,12 @@ def sign(x, name=None):
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
Args:
- x: An `Output` or `SparseTensor`. Must be one of the following types:
- `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
+ x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
+ `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
- An `Output` or `SparseTensor`, respectively. Has the same type as `x`.
+ A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sign", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
@@ -369,12 +369,12 @@ def square(x, name=None):
I.e., \\(y = x * x = x^2\\).
Args:
- x: An `Output` or `SparseTensor`. Must be one of the following types:
- `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
+ x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
+ `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
- An `Output` or `SparseTensor`. Has the same type as `x`.
+ A `Tensor` or `SparseTensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Square", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
@@ -391,12 +391,12 @@ def sqrt(x, name=None):
I.e., \\(y = \sqrt{x} = x^{1/2}\\).
Args:
- x: An `Output` or `SparseTensor`. Must be one of the following types:
- `half`, `float32`, `float64`, `complex64`, `complex128`.
+ x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
+ `float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
- An `Output` or `SparseTensor`, respectively. Has the same type as `x`.
+ A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sqrt", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
@@ -411,12 +411,12 @@ def erf(x, name=None):
"""Computes the Gauss error function of `x` element-wise.
Args:
- x: An `Output` of `SparseTensor`. Must be one of the following types:
- `half`, `float32`, `float64`.
+ x: A `Tensor` of `SparseTensor`. Must be one of the following types: `half`,
+ `float32`, `float64`.
name: A name for the operation (optional).
Returns:
- An `Output` or `SparseTensor`, respectively. Has the same type as `x`.
+ A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Erf", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
@@ -443,28 +443,28 @@ def complex_abs(x, name=None):
```
Args:
- x: An `Output` of type `complex64` or `complex128`.
+ x: A `Tensor` of type `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
- An `Output` of type `float32` or `float64`.
+ A `Tensor` of type `float32` or `float64`.
"""
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
def scalar_mul(scalar, x):
- """Multiplies a scalar times an `Output` or `IndexedSlices` object.
+ """Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
- scalar: A 0-D scalar `Output`. Must have known shape.
- x: An `Output` or `IndexedSlices` to be scaled.
+ scalar: A 0-D scalar `Tensor`. Must have known shape.
+ x: A `Tensor` or `IndexedSlices` to be scaled.
Returns:
- `scalar * x` of the same type (`Output` or `IndexedSlices`) as `x`.
+ `scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
@@ -494,14 +494,14 @@ def pow(x, y, name=None):
```
Args:
- x: An `Output` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
+ x: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
- y: An `Output` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
+ y: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
name: A name for the operation (optional).
Returns:
- An `Output`.
+ A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
@@ -526,12 +526,12 @@ def complex(real, imag, name=None):
```
Args:
- real: An `Output`. Must be one of the following types: `float32`, `float64`.
- imag: An `Output`. Must have the same type as `real`.
+ real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
+ imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
- An `Output` of type `complex64` or `complex128`.
+ A `Tensor` of type `complex64` or `complex128`.
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
@@ -566,11 +566,11 @@ def real(input, name=None):
If `input` is already real, it is returned unchanged.
Args:
- input: An `Output`. Must have numeric type.
+ input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
- An `Output` of type `float32` or `float64`.
+ A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
real_dtype = input.dtype.real_dtype
@@ -596,12 +596,12 @@ def imag(input, name=None):
```
Args:
- input: An `Output`. Must be one of the following types:
- `complex64`, `complex128`.
+ input: A `Tensor`. Must be one of the following types: `complex64`,
+ `complex128`.
name: A name for the operation (optional).
Returns:
- An `Output` of type `float32` or `float64`.
+ A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
@@ -620,11 +620,11 @@ def round(x, name=None):
```
Args:
- x: An `Output` of type `float32` or `float64`.
+ x: A `Tensor` of type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
- An `Output` of same shape and type as `x`.
+ A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
@@ -636,7 +636,7 @@ def round(x, name=None):
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
- The operation casts `x` (in case of `Output`) or `x.values`
+ The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
@@ -647,12 +647,12 @@ def cast(x, dtype, name=None):
```
Args:
- x: An `Output` or `SparseTensor`.
+ x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
- An `Output` or `SparseTensor` with same shape as `x`.
+ A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
@@ -682,7 +682,7 @@ def saturate_cast(value, dtype, name=None):
applies the appropriate clamping before the cast.
Args:
- value: An `Output`.
+ value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
@@ -711,11 +711,11 @@ def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
- x: An `Output` or `SparseTensor`.
+ x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
- An `Output` or `SparseTensor` with same shape as `x` with type `float32`.
+ A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
@@ -727,11 +727,11 @@ def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
- x: An `Output` or `SparseTensor`.
+ x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
- An `Output` or `SparseTensor` with same shape as `x` with type `float64`.
+ A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
@@ -743,11 +743,11 @@ def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
- x: An `Output` or `SparseTensor`.
+ x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
- An `Output` or `SparseTensor` with same shape as `x` with type `int32`.
+ A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
@@ -759,11 +759,11 @@ def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
- x: An `Output` or `SparseTensor`.
+ x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
- An `Output` or `SparseTensor` with same shape as `x` with type `int64`.
+ A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
@@ -775,11 +775,11 @@ def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
- x: An `Output` or `SparseTensor`.
+ x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
- An `Output` or `SparseTensor` with same shape as `x` with type `bfloat16`.
+ A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
@@ -804,7 +804,7 @@ def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
Args:
func: the operator
op_name: name of the operator being overridden
- clazz_object: class to override for. Either `Output` or `SparseTensor`.
+ clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
@@ -903,8 +903,8 @@ def truediv(x, y, name=None):
and `int64` (matching the behavior of Numpy).
Args:
- x: `Output` numerator of numeric type.
- y: `Output` denominator of numeric type.
+ x: `Tensor` numerator of numeric type.
+ y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
@@ -949,8 +949,8 @@ def floordiv(x, y, name=None):
as well.
Args:
- x: `Output` numerator of real numeric type.
- y: `Output` denominator of real numeric type.
+ x: `Tensor` numerator of real numeric type.
+ y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
@@ -1060,19 +1060,19 @@ def range(start, limit=None, delta=1, dtype=None, name="range"):
```
Args:
- start: A 0-D `Output` (scalar). Acts as first entry in the range if
+ start: A 0-D `Tensor` (scalar). Acts as first entry in the range if
`limit` is not None; otherwise, acts as range limit and first entry
defaults to 0.
- limit: A 0-D `Output` (scalar). Upper limit of sequence,
+ limit: A 0-D `Tensor` (scalar). Upper limit of sequence,
exclusive. If None, defaults to the value of `start` while the first
entry of the range defaults to 0.
- delta: A 0-D `Output` (scalar). Number that increments
+ delta: A 0-D `Tensor` (scalar). Number that increments
`start`. Defaults to 1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
- An 1-D `Output` of type `dtype`.
+ An 1-D `Tensor` of type `dtype`.
@compatibility(numpy)
Equivalent to np.arange
@@ -1649,9 +1649,9 @@ def matmul(a,
```
Args:
- a: `Output` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
+ a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
`complex128` and rank > 1.
- b: `Output` with same type as `a`.
+ b: `Tensor` with same type and rank as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
@@ -1663,7 +1663,7 @@ def matmul(a,
name: Name for the operation (optional).
Returns:
- An `Output` of the same type as `a` and `b` where each inner-most matrix is
+ A `Tensor` of the same type as `a` and `b` where each inner-most matrix is
the product of the corresponding matrices in `a` and `b, e.g. if all
transpose or adjoint attributes are `False`:
@@ -1813,11 +1813,11 @@ def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
Args:
- inputs: A list of `Output` objects, each with same shape and type.
+ inputs: A list of `Tensor` objects, each with same shape and type.
name: A name for the operation (optional).
Returns:
- An `Output` of same shape and type as the elements of `inputs`.
+ A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
@@ -1860,13 +1860,13 @@ def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
```
Args:
- inputs: A list of `Output` objects, each with same shape and type.
+ inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
- An `Output` of same shape and type as the elements of `inputs`.
+ A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
@@ -1981,15 +1981,15 @@ def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
```
Args:
- x: An `Output`. Must be one of the following types: `float32`, `float64`,
+ x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
- axis: An `Output` of type `int32` (default: 0).
+ axis: A `Tensor` of type `int32` (default: 0).
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
- An `Output`. Has the same type as `x`.
+ A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
@@ -2027,15 +2027,15 @@ def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
```
Args:
- x: An `Output`. Must be one of the following types: `float32`, `float64`,
+ x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
- axis: An `Output` of type `int32` (default: 0).
+ axis: A `Tensor` of type `int32` (default: 0).
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
- An `Output`. Has the same type as `x`.
+ A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
@@ -2061,11 +2061,11 @@ def conj(x, name=None):
If `x` is real, it is returned unchanged.
Args:
- x: `Output` to conjugate. Must have numeric type.
+ x: `Tensor` to conjugate. Must have numeric type.
name: A name for the operation (optional).
Returns:
- An `Output` that is the conjugate of `x` (with the same type).
+ A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
diff --git a/tensorflow/python/ops/nn.py b/tensorflow/python/ops/nn.py
index 50f8c288ff..d8a431f1c2 100644
--- a/tensorflow/python/ops/nn.py
+++ b/tensorflow/python/ops/nn.py
@@ -351,14 +351,14 @@ def log_poisson_loss(log_input, targets, compute_full_loss=False, name=None):
= exp(c) - z * c [+ z * log(z) - z + 0.5 * log(2 * pi * z)]
Args:
- log_input: An `Output` of type `float32` or `float64`.
- targets: An `Output` of the same type and shape as `log_input`.
+ log_input: A `Tensor` of type `float32` or `float64`.
+ targets: A `Tensor` of the same type and shape as `log_input`.
compute_full_loss: whether to compute the full loss. If false, a constant
term is dropped in favor of more efficient optimization.
name: A name for the operation (optional).
Returns:
- An `Output` of the same shape as `log_input` with the componentwise
+ A `Tensor` of the same shape as `log_input` with the componentwise
logistic losses.
Raises:
@@ -421,12 +421,12 @@ def sigmoid_cross_entropy_with_logits(logits, targets, name=None):
`logits` and `targets` must have the same type and shape.
Args:
- logits: An `Output` of type `float32` or `float64`.
- targets: An `Output` of the same type and shape as `logits`.
+ logits: A `Tensor` of type `float32` or `float64`.
+ targets: A `Tensor` of the same type and shape as `logits`.
name: A name for the operation (optional).
Returns:
- An `Output` of the same shape as `logits` with the componentwise
+ A `Tensor` of the same shape as `logits` with the componentwise
logistic losses.
Raises:
@@ -492,13 +492,13 @@ def weighted_cross_entropy_with_logits(logits, targets, pos_weight, name=None):
`logits` and `targets` must have the same type and shape.
Args:
- logits: An `Output` of type `float32` or `float64`.
- targets: An `Output` of the same type and shape as `logits`.
+ logits: A `Tensor` of type `float32` or `float64`.
+ targets: A `Tensor` of the same type and shape as `logits`.
pos_weight: A coefficient to use on the positive examples.
name: A name for the operation (optional).
Returns:
- An `Output` of the same shape as `logits` with the componentwise
+ A `Tensor` of the same shape as `logits` with the componentwise
weighted logistic losses.
Raises:
@@ -560,7 +560,7 @@ def l2_normalize(x, dim, epsilon=1e-12, name=None):
dimension `dim`.
Args:
- x: An `Output`.
+ x: A `Tensor`.
dim: Dimension along which to normalize. A scalar or a vector of
integers.
epsilon: A lower bound value for the norm. Will use `sqrt(epsilon)` as the
@@ -568,7 +568,7 @@ def l2_normalize(x, dim, epsilon=1e-12, name=None):
name: A name for this operation (optional).
Returns:
- An `Output` with the same shape as `x`.
+ A `Tensor` with the same shape as `x`.
"""
with ops.name_scope(name, "l2_normalize", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
@@ -636,7 +636,7 @@ def depthwise_conv2d(input, filter, strides, padding, name=None):
name: A name for this operation (optional).
Returns:
- A 4-D `Output` of shape
+ A 4-D `Tensor` of shape
`[batch, out_height, out_width, in_channels * channel_multiplier].`
"""
with ops.name_scope(name, "depthwise", [input, filter]) as name:
@@ -672,11 +672,11 @@ def separable_conv2d(input, depthwise_filter, pointwise_filter, strides,
horizontal and vertical strides, `strides = [1, stride, stride, 1]`.
Args:
- input: 4-D `Output` with shape `[batch, in_height, in_width, in_channels]`.
- depthwise_filter: 4-D `Output` with shape
+ input: 4-D `Tensor` with shape `[batch, in_height, in_width, in_channels]`.
+ depthwise_filter: 4-D `Tensor` with shape
`[filter_height, filter_width, in_channels, channel_multiplier]`.
Contains `in_channels` convolutional filters of depth 1.
- pointwise_filter: 4-D `Output` with shape
+ pointwise_filter: 4-D `Tensor` with shape
`[1, 1, channel_multiplier * in_channels, out_channels]`. Pointwise
filter to mix channels after `depthwise_filter` has convolved spatially.
strides: 1-D of size 4. The strides for the depthwise convolution for
@@ -687,7 +687,7 @@ def separable_conv2d(input, depthwise_filter, pointwise_filter, strides,
name: A name for this operation (optional).
Returns:
- A 4-D `Output` of shape `[batch, out_height, out_width, out_channels]`.
+ A 4-D `Tensor` of shape `[batch, out_height, out_width, out_channels]`.
Raises:
ValueError: If channel_multiplier * in_channels > out_channels,
@@ -737,16 +737,16 @@ def sufficient_statistics(x, axes, shift=None, keep_dims=False, name=None):
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Computing_shifted_data
Args:
- x: An `Output`.
+ x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and variance.
- shift: An `Output` containing the value by which to shift the data for
+ shift: A `Tensor` containing the value by which to shift the data for
numerical stability, or `None` if no shift is to be performed. A shift
close to the true mean provides the most numerically stable results.
keep_dims: produce statistics with the same dimensionality as the input.
name: Name used to scope the operations that compute the sufficient stats.
Returns:
- Four `Output` objects of the same type as `x`:
+ Four `Tensor` objects of the same type as `x`:
* the count (number of elements to average over).
* the (possibly shifted) sum of the elements in the array.
@@ -782,17 +782,17 @@ def normalize_moments(counts, mean_ss, variance_ss, shift, name=None):
"""Calculate the mean and variance of based on the sufficient statistics.
Args:
- counts: An `Output` containing a the total count of the data (one value).
- mean_ss: An `Output` containing the mean sufficient statistics: the
- (possibly shifted) sum of the elements to average over.
- variance_ss: An `Output` containing the variance sufficient statistics: the
+ counts: A `Tensor` containing a the total count of the data (one value).
+ mean_ss: A `Tensor` containing the mean sufficient statistics: the (possibly
+ shifted) sum of the elements to average over.
+ variance_ss: A `Tensor` containing the variance sufficient statistics: the
(possibly shifted) squared sum of the data to compute the variance over.
- shift: An `Output` containing the value by which the data is shifted for
+ shift: A `Tensor` containing the value by which the data is shifted for
numerical stability, or `None` if no shift was performed.
name: Name used to scope the operations that compute the moments.
Returns:
- Two `Output` objects: `mean` and `variance`.
+ Two `Tensor` objects: `mean` and `variance`.
"""
with ops.name_scope(name, "normalize", [counts, mean_ss, variance_ss, shift]):
divisor = math_ops.reciprocal(counts, name="divisor")
@@ -823,17 +823,17 @@ def moments(x, axes, shift=None, name=None, keep_dims=False):
* for simple batch normalization pass `axes=[0]` (batch only).
Args:
- x: An `Output`.
+ x: A `Tensor`.
axes: Array of ints. Axes along which to compute mean and
variance.
- shift: An `Output` containing the value by which to shift the data for
+ shift: A `Tensor` containing the value by which to shift the data for
numerical stability, or `None` if no shift is to be performed. A shift
close to the true mean provides the most numerically stable results.
name: Name used to scope the operations that compute the moments.
keep_dims: produce moments with the same dimensionality as the input.
Returns:
- Two `Output` objects: `mean` and `variance`.
+ Two `Tensor` objects: `mean` and `variance`.
"""
with ops.name_scope(name, "moments", [x, axes, shift]):
# The dynamic range of fp16 is too limited to support the collection of
@@ -965,12 +965,12 @@ def batch_normalization(x,
thereof during inference.
Args:
- x: Input `Output` of arbitrary dimensionality.
- mean: A mean `Output`.
- variance: A variance `Output`.
- offset: An offset `Output`, often denoted \\(\beta\\) in equations, or
+ x: Input `Tensor` of arbitrary dimensionality.
+ mean: A mean `Tensor`.
+ variance: A variance `Tensor`.
+ offset: An offset `Tensor`, often denoted \\(\beta\\) in equations, or
None. If present, will be added to the normalized tensor.
- scale: A scale `Output`, often denoted \\(\gamma\\) in equations, or
+ scale: A scale `Tensor`, often denoted \\(\gamma\\) in equations, or
`None`. If present, the scale is applied to the normalized tensor.
variance_epsilon: A small float number to avoid dividing by 0.
name: A name for this operation (optional).
@@ -998,11 +998,11 @@ def fused_batch_norm(x, scale, offset, # pylint: disable=invalid-name
As described in http://arxiv.org/abs/1502.03167.
Args:
- x: Input `Output` of 4 dimensions.
- scale: An `Output` of 1 dimension for scaling.
- offset: An `Output` of 1 dimension for bias.
- mean: An `Output` of 1 dimension for population mean used for inference.
- variance: An `Output` of 1 dimension for population variance
+ x: Input `Tensor` of 4 dimensions.
+ scale: A `Tensor` of 1 dimension for scaling.
+ offset: A `Tensor` of 1 dimension for bias.
+ mean: A `Tensor` of 1 dimension for population mean used for inference.
+ variance: A `Tensor` of 1 dimension for population variance
used for inference.
epsilon: A small float number added to the variance of x.
data_format: The data format for x. Either "NHWC" (default) or "NCHW".
@@ -1117,13 +1117,13 @@ def _compute_sampled_logits(weights,
sum to 1 per-example.
Args:
- weights: An `Output` of shape `[num_classes, dim]`, or a list of `Output`
+ weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
`[num_classes, dim]`. The (possibly-partitioned) class embeddings.
- biases: An `Output` of shape `[num_classes]`. The class biases.
- inputs: An `Output` of shape `[batch_size, dim]`. The forward
+ biases: A `Tensor` of shape `[num_classes]`. The class biases.
+ inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
- labels: An `Output` of type `int64` and shape `[batch_size,
+ labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
num_sampled: An `int`. The number of classes to randomly sample per batch.
@@ -1143,7 +1143,7 @@ def _compute_sampled_logits(weights,
Default is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
- out_logits, out_labels: `Output` objects each with shape
+ out_logits, out_labels: `Tensor` objects each with shape
`[batch_size, num_true + num_sampled]`, for passing to either
`nn.sigmoid_cross_entropy_with_logits` (NCE) or
`nn.softmax_cross_entropy_with_logits` (sampled softmax).
@@ -1292,13 +1292,13 @@ def nce_loss(weights,
with an otherwise unused class.
Args:
- weights: An `Output` of shape `[num_classes, dim]`, or a list of `Output`
+ weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-partitioned) class embeddings.
- biases: An `Output` of shape `[num_classes]`. The class biases.
- inputs: An `Output` of shape `[batch_size, dim]`. The forward
+ biases: A `Tensor` of shape `[num_classes]`. The class biases.
+ inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
- labels: An `Output` of type `int64` and shape `[batch_size,
+ labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes.
num_sampled: An `int`. The number of classes to randomly sample per batch.
num_classes: An `int`. The number of possible classes.
@@ -1370,13 +1370,13 @@ def sampled_softmax_loss(weights,
([pdf](http://arxiv.org/pdf/1412.2007.pdf)) for the math.
Args:
- weights: An `Output` of shape `[num_classes, dim]`, or a list of `Output`
+ weights: A `Tensor` of shape `[num_classes, dim]`, or a list of `Tensor`
objects whose concatenation along dimension 0 has shape
[num_classes, dim]. The (possibly-sharded) class embeddings.
- biases: An `Output` of shape `[num_classes]`. The class biases.
- inputs: An `Output` of shape `[batch_size, dim]`. The forward
+ biases: A `Tensor` of shape `[num_classes]`. The class biases.
+ inputs: A `Tensor` of shape `[batch_size, dim]`. The forward
activations of the input network.
- labels: An `Output` of type `int64` and shape `[batch_size,
+ labels: A `Tensor` of type `int64` and shape `[batch_size,
num_true]`. The target classes. Note that this format differs from
the `labels` argument of `nn.softmax_cross_entropy_with_logits`.
num_sampled: An `int`. The number of classes to randomly sample per batch.
diff --git a/tensorflow/python/ops/nn_ops.py b/tensorflow/python/ops/nn_ops.py
index d450e91658..31db4e9d56 100644
--- a/tensorflow/python/ops/nn_ops.py
+++ b/tensorflow/python/ops/nn_ops.py
@@ -530,12 +530,12 @@ def convolution(input, filter, # pylint: disable=redefined-builtin
It is required that 1 <= N <= 3.
Args:
- input: An N-D `Output` of type `T`, of shape
+ input: An N-D `Tensor` of type `T`, of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
- filter: An N-D `Output` with the same type as `input` and shape
+ filter: An N-D `Tensor` with the same type as `input` and shape
`spatial_filter_shape + [in_channels, out_channels]`.
padding: A string, either `"VALID"` or `"SAME"`. The padding algorithm.
strides: Optional. Sequence of N ints >= 1. Specifies the output stride.
@@ -558,7 +558,7 @@ def convolution(input, filter, # pylint: disable=redefined-builtin
N=3, the valid value is "NDHWC".
Returns:
- An `Output` with the same type as `input` of shape
+ A `Tensor` with the same type as `input` of shape
`[batch_size] + output_spatial_shape + [out_channels]`
@@ -890,9 +890,9 @@ def atrous_conv2d(value, filters, rate, padding, name=None):
inputs are identical.
Args:
- value: A 4-D `Output` of type `float`. It needs to be in the default "NHWC"
+ value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC"
format. Its shape is `[batch, in_height, in_width, in_channels]`.
- filters: A 4-D `Output` with the same type as `value` and shape
+ filters: A 4-D `Tensor` with the same type as `value` and shape
`[filter_height, filter_width, in_channels, out_channels]`. `filters`'
`in_channels` dimension must match that of `value`. Atrous convolution is
equivalent to standard convolution with upsampled filters with effective
@@ -909,7 +909,7 @@ def atrous_conv2d(value, filters, rate, padding, name=None):
name: Optional name for the returned tensor.
Returns:
- An `Output` with the same type as `value`.
+ A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filters`' shape, or if
@@ -1019,13 +1019,13 @@ def conv2d_transpose(value,
deconvolution.
Args:
- value: A 4-D `Output` of type `float` and shape
+ value: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]` for `NHWC` data format or
`[batch, in_channels, height, width]` for `NCHW` data format.
- filter: A 4-D `Output` with the same type as `value` and shape
+ filter: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
- output_shape: A 1-D `Output` representing the output shape of the
+ output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
@@ -1035,7 +1035,7 @@ def conv2d_transpose(value,
name: Optional name for the returned tensor.
Returns:
- An `Output` with the same type as `value`.
+ A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
@@ -1092,12 +1092,12 @@ def conv3d_transpose(value,
deconvolution.
Args:
- value: A 5-D `Output` of type `float` and shape
+ value: A 5-D `Tensor` of type `float` and shape
`[batch, depth, height, width, in_channels]`.
- filter: A 5-D `Output` with the same type as `value` and shape
+ filter: A 5-D `Tensor` with the same type as `value` and shape
`[depth, height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
- output_shape: A 1-D `Output` representing the output shape of the
+ output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
@@ -1106,7 +1106,7 @@ def conv3d_transpose(value,
name: Optional name for the returned tensor.
Returns:
- An `Output` with the same type as `value`.
+ A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
@@ -1155,16 +1155,16 @@ def bias_add(value, bias, data_format=None, name=None):
case where both types are quantized.
Args:
- value: An `Output` with type `float`, `double`, `int64`, `int32`, `uint8`,
+ value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
- bias: A 1-D `Output` with size matching the last dimension of `value`.
+ bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: A name for the operation (optional).
Returns:
- An `Output` with the same type as `value`.
+ A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAdd", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
@@ -1184,15 +1184,15 @@ def bias_add_v1(value, bias, name=None):
case where both types are quantized.
Args:
- value: An `Output` with type `float`, `double`, `int64`, `int32`, `uint8`,
+ value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, `complex64`, or `complex128`.
- bias: A 1-D `Output` with size matching the last dimension of `value`.
+ bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
- An `Output` with the same type as `value`.
+ A `Tensor` with the same type as `value`.
"""
with ops.name_scope(name, "BiasAddV1", [value, bias]) as name:
value = ops.convert_to_tensor(value, name="input")
@@ -1209,12 +1209,12 @@ def crelu(features, name=None):
Source: https://arxiv.org/abs/1603.05201
Args:
- features: An `Output` with type `float`, `double`, `int32`, `int64`,
- `uint8`, `int16`, or `int8`.
+ features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
+ `int16`, or `int8`.
name: A name for the operation (optional).
Returns:
- An `Output` with the same type as `features`.
+ A `Tensor` with the same type as `features`.
"""
with ops.name_scope(name, "CRelu", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
@@ -1226,12 +1226,12 @@ def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Args:
- features: An `Output` with type `float`, `double`, `int32`, `int64`,
- `uint8`, `int16`, or `int8`.
+ features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
+ `int16`, or `int8`.
name: A name for the operation (optional).
Returns:
- An `Output` with the same type as `features`.
+ A `Tensor` with the same type as `features`.
"""
with ops.name_scope(name, "Relu6", [features]) as name:
features = ops.convert_to_tensor(features, name="features")
@@ -1272,7 +1272,7 @@ def _softmax(logits, compute_op, dim=-1, name=None):
transposed and reshaped back.
Args:
- logits: A non-empty `Output`. Must be one of the following types: `half`,
+ logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
compute_op: Either gen_nn_ops._softmax or gen_nn_ops._log_softmax
dim: The dimension softmax would be performed on. The default is -1 which
@@ -1280,7 +1280,7 @@ def _softmax(logits, compute_op, dim=-1, name=None):
name: A name for the operation (optional).
Returns:
- An `Output`. Has the same type as `logits`. Same shape as `logits`.
+ A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `dim` is beyond the last
dimension of `logits`.
@@ -1340,14 +1340,14 @@ def softmax(logits, dim=-1, name=None):
softmax = exp(logits) / reduce_sum(exp(logits), dim)
Args:
- logits: A non-empty `Output`. Must be one of the following types: `half`,
+ logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
dim: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
- An `Output`. Has the same type as `logits`. Same shape as `logits`.
+ A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `dim` is beyond the last
dimension of `logits`.
@@ -1363,14 +1363,14 @@ def log_softmax(logits, dim=-1, name=None):
logsoftmax = logits - log(reduce_sum(exp(logits), dim))
Args:
- logits: A non-empty `Output`. Must be one of the following types: `half`,
+ logits: A non-empty `Tensor`. Must be one of the following types: `half`,
`float32`, `float64`.
dim: The dimension softmax would be performed on. The default is -1 which
indicates the last dimension.
name: A name for the operation (optional).
Returns:
- An `Output`. Has the same type as `logits`. Same shape as `logits`.
+ A `Tensor`. Has the same type as `logits`. Same shape as `logits`.
Raises:
InvalidArgumentError: if `logits` is empty or `dim` is beyond the last
@@ -1409,7 +1409,7 @@ def softmax_cross_entropy_with_logits(logits, labels, dim=-1, name=None):
name: A name for the operation (optional).
Returns:
- A 1-D `Output` of length `batch_size` of the same type as `logits` with the
+ A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the
softmax cross entropy loss.
"""
# TODO(pcmurray) Raise an error when the labels do not sum to 1. Note: This
@@ -1492,7 +1492,7 @@ def sparse_softmax_cross_entropy_with_logits(logits, labels, name=None):
logits: Unscaled log probabilities of rank `r` and shape
`[d_0, d_1, ..., d_{r-2}, num_classes]` and dtype `float32` or `float64`.
- labels: `Output` of shape `[d_0, d_1, ..., d_{r-2}]` and dtype `int32` or
+ labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-2}]` and dtype `int32` or
`int64`. Each entry in `labels` must be an index in `[0, num_classes)`.
Other values will raise an exception when this op is run on CPU, and
return `NaN` for corresponding corresponding loss and gradient rows
@@ -1500,7 +1500,7 @@ def sparse_softmax_cross_entropy_with_logits(logits, labels, name=None):
name: A name for the operation (optional).
Returns:
- An `Output` of the same shape as `labels` and of the same type as `logits`
+ A `Tensor` of the same shape as `labels` and of the same type as `logits`
with the softmax cross entropy loss.
Raises:
@@ -1564,7 +1564,7 @@ def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
window in `value`.
Args:
- value: A 4-D `Output` of shape `[batch, height, width, channels]` and type
+ value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: A list of ints that has length >= 4.
The size of the window for each dimension of the input tensor.
@@ -1577,8 +1577,7 @@ def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
name: Optional name for the operation.
Returns:
- An `Output` with the same type as `value`. The average pooled output
- tensor.
+ A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.name_scope(name, "AvgPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
@@ -1594,7 +1593,7 @@ def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the max pooling on the input.
Args:
- value: A 4-D `Output` with shape `[batch, height, width, channels]` and
+ value: A 4-D `Tensor` with shape `[batch, height, width, channels]` and
type `tf.float32`.
ksize: A list of ints that has length >= 4. The size of the window for
each dimension of the input tensor.
@@ -1606,7 +1605,7 @@ def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
name: Optional name for the operation.
Returns:
- An `Output` with type `tf.float32`. The max pooled output tensor.
+ A `Tensor` with type `tf.float32`. The max pooled output tensor.
"""
with ops.name_scope(name, "MaxPool", [value]) as name:
value = ops.convert_to_tensor(value, name="input")
@@ -1725,9 +1724,9 @@ def dropout(x, keep_prob, noise_shape=None, seed=None, name=None):
Args:
x: A tensor.
- keep_prob: A scalar `Output` with the same type as x. The probability
+ keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
- noise_shape: A 1-D `Output` of type `int32`, representing the
+ noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
@@ -1782,8 +1781,8 @@ def top_k(input, k=1, sorted=True, name=None):
If two elements are equal, the lower-index element appears first.
Args:
- input: 1-D or higher `Output` with last dimension at least `k`.
- k: 0-D `int32` `Output`. Number of top elements to look for along the last
+ input: 1-D or higher `Tensor` with last dimension at least `k`.
+ k: 0-D `int32` `Tensor`. Number of top elements to look for along the last
dimension (along each row for matrices).
sorted: If true the resulting `k` elements will be sorted by the values in
descending order.
@@ -1824,8 +1823,8 @@ def conv1d(value, filters, stride, padding,
returned to the caller.
Args:
- value: A 3D `Output`. Must be of type `float32` or `float64`.
- filters: A 3D `Output`. Must have the same type as `input`.
+ value: A 3D `Tensor`. Must be of type `float32` or `float64`.
+ filters: A 3D `Tensor`. Must have the same type as `input`.
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: 'SAME' or 'VALID'
@@ -1837,7 +1836,7 @@ def conv1d(value, filters, stride, padding,
name: A name for the operation (optional).
Returns:
- An `Output`. Has the same type as input.
+ A `Tensor`. Has the same type as input.
Raises:
ValueError: if `data_format` is invalid.
@@ -1901,8 +1900,8 @@ def erosion2d(value, kernel, strides, rates, padding, name=None):
the dilation of `-value` by the reflected `kernel`.
Args:
- value: An `Output`. 4-D with shape `[batch, in_height, in_width, depth]`.
- kernel: An `Output`. Must have the same type as `value`.
+ value: A `Tensor`. 4-D with shape `[batch, in_height, in_width, depth]`.
+ kernel: A `Tensor`. Must have the same type as `value`.
3-D with shape `[kernel_height, kernel_width, depth]`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. The stride of the sliding window for each dimension of
@@ -1916,7 +1915,7 @@ def erosion2d(value, kernel, strides, rates, padding, name=None):
is used.
Returns:
- An `Output`. Has the same type as `value`.
+ A `Tensor`. Has the same type as `value`.
4-D with shape `[batch, out_height, out_width, depth]`.
Raises:
diff --git a/tensorflow/python/ops/parsing_ops.py b/tensorflow/python/ops/parsing_ops.py
index 5d9a3da895..21b957380a 100644
--- a/tensorflow/python/ops/parsing_ops.py
+++ b/tensorflow/python/ops/parsing_ops.py
@@ -160,17 +160,17 @@ def parse_example(serialized, features, name=None, example_names=None):
protos. These may be useful for debugging purposes, but they have no effect on
the output. If not `None`, `example_names` must be the same length as `serialized`.
- This op parses serialized examples into a dictionary mapping keys to `Output`
+ This op parses serialized examples into a dictionary mapping keys to `Tensor`
and `SparseTensor` objects. `features` is a dict from keys to `VarLenFeature`
and `FixedLenFeature` objects. Each `VarLenFeature` is mapped to a
- `SparseTensor`, and each `FixedLenFeature` is mapped to an `Output`.
+ `SparseTensor`, and each `FixedLenFeature` is mapped to a `Tensor`.
Each `VarLenFeature` maps to a `SparseTensor` of the specified type
representing a ragged matrix. Its indices are `[batch, index]` where `batch`
is the batch entry the value is from in `serialized`, and `index` is the
value's index in the list of values associated with that feature and example.
- Each `FixedLenFeature` `df` maps to an `Output` of the specified type (or
+ Each `FixedLenFeature` `df` maps to a `Tensor` of the specified type (or
`tf.float32` if not specified) and shape `(serialized.size(),) + df.shape`.
`FixedLenFeature` entries with a `default_value` are optional. With no default
@@ -291,7 +291,7 @@ def parse_example(serialized, features, name=None, example_names=None):
the serialized protos in the batch.
Returns:
- A `dict` mapping feature keys to `Output` and `SparseTensor` values.
+ A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
@@ -328,11 +328,11 @@ def _parse_example_raw(serialized,
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
dense_keys: A list of string keys in the examples' features.
- The results for these keys will be returned as `Output`s
+ The results for these keys will be returned as `Tensor`s
dense_types: A list of DTypes of the same length as `dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
- dense_defaults: A dict mapping string keys to `Output`s.
+ dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the dense_keys of the feature.
dense_shapes: A list of tuples with the same length as `dense_keys`.
The shape of the data for each dense feature referenced by `dense_keys`.
@@ -341,7 +341,7 @@ def _parse_example_raw(serialized,
name: A name for this operation (optional).
Returns:
- A `dict` mapping keys to `Output`s and `SparseTensor`s.
+ A `dict` mapping keys to `Tensor`s and `SparseTensor`s.
Raises:
ValueError: If sparse and dense key sets intersect, or input lengths do not
@@ -419,7 +419,7 @@ def parse_single_example(serialized, features, name=None, example_names=None):
Similar to `parse_example`, except:
- For dense tensors, the returned `Output` is identical to the output of
+ For dense tensors, the returned `Tensor` is identical to the output of
`parse_example`, except there is no batch dimension, the output shape is the
same as the shape given in `dense_shape`.
@@ -438,7 +438,7 @@ def parse_single_example(serialized, features, name=None, example_names=None):
See `_parse_single_example_raw` documentation for more details.
Returns:
- A `dict` mapping feature keys to `Output` and `SparseTensor` values.
+ A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
@@ -478,7 +478,7 @@ def _parse_single_example_raw(serialized,
name: A name for this operation (optional).
Returns:
- A `dict` mapping feature keys to `Output` and `SparseTensor` values.
+ A `dict` mapping feature keys to `Tensor` and `SparseTensor` values.
Raises:
ValueError: if any feature is invalid.
@@ -550,7 +550,7 @@ def parse_single_sequence_example(
proto given in `serialized`.
This op parses a serialize sequence example into a tuple of dictionaries
- mapping keys to `Output` and `SparseTensor` objects respectively.
+ mapping keys to `Tensor` and `SparseTensor` objects respectively.
The first dictionary contains mappings for keys appearing in
`context_features`, and the second dictionary contains mappings for keys
appearing in `sequence_features`.
@@ -568,16 +568,16 @@ def parse_single_sequence_example(
`context_features` contains `VarLenFeature` and `FixedLenFeature` objects.
Each `VarLenFeature` is mapped to a `SparseTensor`, and each `FixedLenFeature`
- is mapped to an `Output`, of the specified type, shape, and default value.
+ is mapped to a `Tensor`, of the specified type, shape, and default value.
`sequence_features` contains `VarLenFeature` and `FixedLenSequenceFeature`
objects. Each `VarLenFeature` is mapped to a `SparseTensor`, and each
- `FixedLenSequenceFeature` is mapped to an `Output`, each of the specified type.
+ `FixedLenSequenceFeature` is mapped to a `Tensor`, each of the specified type.
The shape will be `(T,) + df.shape` for `FixedLenSequenceFeature` `df`, where
`T` is the length of the associated `FeatureList` in the `SequenceExample`.
- For instance, `FixedLenSequenceFeature([])` yields a scalar 1-D `Output` of
+ For instance, `FixedLenSequenceFeature([])` yields a scalar 1-D `Tensor` of
static shape `[None]` and dynamic shape `[T]`, while
- `FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 2-D matrix `Output`
+ `FixedLenSequenceFeature([k])` (for `int k >= 1`) yields a 2-D matrix `Tensor`
of static shape `[None, k]` and dynamic shape `[T, k]`.
Each `SparseTensor` corresponding to `sequence_features` represents a ragged
@@ -608,7 +608,7 @@ def parse_single_sequence_example(
name: A name for this operation (optional).
Returns:
- A tuple of two `dict`s, each mapping keys to `Output`s and `SparseTensor`s.
+ A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.
The first dict contains the context key/values.
The second dict contains the feature_list key/values.
@@ -663,11 +663,11 @@ def _parse_single_sequence_example_raw(serialized,
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
context_dense_keys: A list of string keys in the examples' features.
- The results for these keys will be returned as `Output`s
+ The results for these keys will be returned as `Tensor`s
context_dense_types: A list of DTypes, same length as `context_dense_keys`.
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
- context_dense_defaults: A dict mapping string keys to `Output`s.
+ context_dense_defaults: A dict mapping string keys to `Tensor`s.
The keys of the dict must match the context_dense_keys of the feature.
context_dense_shapes: A list of tuples, same length as `context_dense_keys`.
The shape of the data for each context_dense feature referenced by
@@ -680,7 +680,7 @@ def _parse_single_sequence_example_raw(serialized,
Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`),
and `tf.string` (`BytesList`) are supported.
feature_list_dense_keys: A list of string keys in the `SequenceExample`'s
- features_lists. The results for these keys will be returned as `Output`s.
+ features_lists. The results for these keys will be returned as `Tensor`s.
feature_list_dense_types: A list of `DTypes`, same length as
`feature_list_dense_keys`. Only `tf.float32` (`FloatList`),
`tf.int64` (`Int64List`), and `tf.string` (`BytesList`) are supported.
@@ -696,7 +696,7 @@ def _parse_single_sequence_example_raw(serialized,
name: A name for this operation (optional).
Returns:
- A tuple of two `dict`s, each mapping keys to `Output`s and `SparseTensor`s.
+ A tuple of two `dict`s, each mapping keys to `Tensor`s and `SparseTensor`s.
The first dict contains the context key/values.
The second dict contains the feature_list key/values.
diff --git a/tensorflow/python/ops/partitioned_variables.py b/tensorflow/python/ops/partitioned_variables.py
index 362a5bd30a..edcc0e1d7c 100644
--- a/tensorflow/python/ops/partitioned_variables.py
+++ b/tensorflow/python/ops/partitioned_variables.py
@@ -255,11 +255,11 @@ def create_partitioned_variables(
0 until all slack is absorbed. The adjustment rules may change in the
future, but as you can save/restore these variables with different
slicing specifications this should not be a problem.
- initializer: An `Output` of shape `shape` or a variable initializer
+ initializer: A `Tensor` of shape `shape` or a variable initializer
function. If a function, it will be called once for each slice,
passing the shape and data type of the slice as parameters. The
function must return a tensor with the same shape as the slice.
- dtype: Type of the variables. Ignored if `initializer` is an `Output`.
+ dtype: Type of the variables. Ignored if `initializer` is a `Tensor`.
trainable: If True also add all the variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
collections: List of graph collections keys to add the variables to.
diff --git a/tensorflow/python/ops/random_ops.py b/tensorflow/python/ops/random_ops.py
index a50ba9e159..30483b3c7a 100644
--- a/tensorflow/python/ops/random_ops.py
+++ b/tensorflow/python/ops/random_ops.py
@@ -420,8 +420,8 @@ def random_gamma(shape,
name: Optional name for the operation.
Returns:
- samples: an `Output` of shape `tf.concat(shape, tf.shape(alpha + beta))`
- with values of type `dtype`.
+ samples: a `Tensor` of shape `tf.concat(shape, tf.shape(alpha + beta))` with
+ values of type `dtype`.
"""
with ops.name_scope(name, "random_gamma", [shape, alpha, beta]):
shape = ops.convert_to_tensor(shape, name="shape", dtype=dtypes.int32)
diff --git a/tensorflow/python/ops/resource_variable_ops.py b/tensorflow/python/ops/resource_variable_ops.py
index dd7f6c68d8..8962fe7e4a 100644
--- a/tensorflow/python/ops/resource_variable_ops.py
+++ b/tensorflow/python/ops/resource_variable_ops.py
@@ -59,7 +59,7 @@ class ResourceVariable(object):
"""Creates a variable.
Args:
- initial_value: An `Output` or Python object convertible to an `Output`
+ initial_value: A `Tensor` or Python object convertible to a `Tensor`
representing the initial value of this variable.
name: The name of this variable. Automatically uniquified.
trainable: Whether the global read of this variable will be used for
diff --git a/tensorflow/python/ops/rnn.py b/tensorflow/python/ops/rnn.py
index 706d58f0ed..f67f4f35e8 100644
--- a/tensorflow/python/ops/rnn.py
+++ b/tensorflow/python/ops/rnn.py
@@ -101,11 +101,11 @@ def rnn(cell, inputs, initial_state=None, dtype=None,
Args:
cell: An instance of RNNCell.
- inputs: A length T list of inputs, each an `Output` of shape
+ inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`, or a nested tuple of such elements.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
- an `Output` of appropriate type and shape `[batch_size, cell.state_size]`.
+ a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
@@ -236,7 +236,7 @@ def state_saving_rnn(cell, inputs, state_saver, state_name,
Args:
cell: An instance of `RNNCell`.
- inputs: A length T list of inputs, each an `Output` of shape
+ inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`.
state_saver: A state saver object with methods `state` and `save_state`.
state_name: Python string or tuple of strings. The name to use with the
@@ -333,15 +333,15 @@ def _rnn_step(
Args:
time: Python int, the current time step
- sequence_length: int32 `Output` vector of size [batch_size]
- min_sequence_length: int32 `Output` scalar, min of sequence_length
- max_sequence_length: int32 `Output` scalar, max of sequence_length
- zero_output: `Output` vector of shape [output_size]
- state: Either a single `Output` matrix of shape `[batch_size, state_size]`,
+ sequence_length: int32 `Tensor` vector of size [batch_size]
+ min_sequence_length: int32 `Tensor` scalar, min of sequence_length
+ max_sequence_length: int32 `Tensor` scalar, max of sequence_length
+ zero_output: `Tensor` vector of shape [output_size]
+ state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
or a list/tuple of such tensors.
call_cell: lambda returning tuple of (new_output, new_state) where
- new_output is an `Output` matrix of shape `[batch_size, output_size]`.
- new_state is an `Output` matrix of shape `[batch_size, state_size]`.
+ new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.
+ new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.
state_size: The `cell.state_size` associated with the state.
skip_conditionals: Python bool, whether to skip using the conditional
calculations. This is useful for `dynamic_rnn`, where the input tensor
@@ -350,8 +350,8 @@ def _rnn_step(
Returns:
A tuple of (`final_output`, `final_state`) as given by the pseudocode above:
- final_output is an `Output` matrix of shape [batch_size, output_size]
- final_state is either a single `Output` matrix, or a tuple of such
+ final_output is a `Tensor` matrix of shape [batch_size, output_size]
+ final_state is either a single `Tensor` matrix, or a tuple of such
matrices (matching length and shapes of input `state`).
Raises:
@@ -438,7 +438,7 @@ def _reverse_seq(input_seq, lengths):
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
or nested tuples of tensors.
- lengths: An `Output` of dimension batch_size, containing lengths for each
+ lengths: A `Tensor` of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
@@ -602,8 +602,8 @@ def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
- If true, these `Output`s must be shaped `[max_time, batch_size, depth]`.
- If false, these `Output`s must be shaped `[batch_size, max_time, depth]`.
+ If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
+ If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
@@ -615,18 +615,18 @@ def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
Returns:
A tuple (outputs, output_states) where:
outputs: A tuple (output_fw, output_bw) containing the forward and
- the backward rnn output `Output`.
+ the backward rnn output `Tensor`.
If time_major == False (default),
- output_fw will be an `Output` shaped:
+ output_fw will be a `Tensor` shaped:
`[batch_size, max_time, cell_fw.output_size]`
- and output_bw will be an `Output` shaped:
+ and output_bw will be a `Tensor` shaped:
`[batch_size, max_time, cell_bw.output_size]`.
If time_major == True,
- output_fw will be an `Output` shaped:
+ output_fw will be a `Tensor` shaped:
`[max_time, batch_size, cell_fw.output_size]`
- and output_bw will be an `Output` shaped:
+ and output_bw will be a `Tensor` shaped:
`[max_time, batch_size, cell_bw.output_size]`.
- It returns a tuple instead of a single concatenated `Output`, unlike
+ It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
`tf.concat(2, outputs)`.
@@ -687,12 +687,12 @@ def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
This function is functionally identical to the function `rnn` above, but
performs fully dynamic unrolling of `inputs`.
- Unlike `rnn`, the input `inputs` is not a Python list of `Output`s, one for
- each frame. Instead, `inputs` may be a single `Output` where
+ Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`, one for
+ each frame. Instead, `inputs` may be a single `Tensor` where
the maximum time is either the first or second dimension (see the parameter
`time_major`). Alternatively, it may be a (possibly nested) tuple of
Tensors, each of them having matching batch and time dimensions.
- The corresponding output is either a single `Output` having the same number
+ The corresponding output is either a single `Tensor` having the same number
of time steps and batch size, or a (possibly nested) tuple of such tensors,
matching the nested structure of `cell.output_size`.
@@ -704,11 +704,11 @@ def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
cell: An instance of RNNCell.
inputs: The RNN inputs.
- If `time_major == False` (default), this must be an `Output` of shape:
+ If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
- If `time_major == True`, this must be an `Output` of shape:
+ If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
@@ -719,12 +719,12 @@ def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
structure of these tuples, except for the time dimension (from which the
time is taken).
- The input to `cell` at each time step will be an `Output` or (possibly
+ The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
- an `Output` of appropriate type and shape `[batch_size, cell.state_size]`.
+ a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
@@ -740,8 +740,8 @@ def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
- If true, these `Output`s must be shaped `[max_time, batch_size, depth]`.
- If false, these `Output`s must be shaped `[batch_size, max_time, depth]`.
+ If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
+ If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
@@ -751,12 +751,12 @@ def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
Returns:
A pair (outputs, state) where:
- outputs: The RNN output `Output`.
+ outputs: The RNN output `Tensor`.
- If time_major == False (default), this will be an `Output` shaped:
+ If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
- If time_major == True, this will be an `Output` shaped:
+ If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
@@ -869,26 +869,26 @@ def _dynamic_rnn_loop(cell,
Args:
cell: An instance of RNNCell.
- inputs: An `Output` of shape [time, batch_size, input_size], or a nested
+ inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
- initial_state: An `Output` of shape `[batch_size, state_size]`, or if
+ initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
- sequence_length: (optional) An `int32` `Output` of shape [batch_size].
+ sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
- An `Output` of shape `[time, batch_size, cell.output_size]`. If
+ A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nsted) tuple of Tensors matching
the corresponding shapes.
final_state:
- An `Output`, or possibly nested tuple of Tensors, matching in length
+ A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
@@ -1040,7 +1040,7 @@ def raw_rnn(cell, loop_fn,
For example, it can be used to implement the dynamic decoder of a seq2seq
model.
- Instead of working with `Output` objects, most operations work with
+ Instead of working with `Tensor` objects, most operations work with
`TensorArray` objects directly.
The operation of `raw_rnn`, in pseudo-code, is basically the following:
@@ -1107,23 +1107,23 @@ def raw_rnn(cell, loop_fn,
`(time, cell_output, cell_state, loop_state)`
and returns the tuple
`(finished, next_input, next_cell_state, emit_output, next_loop_state)`.
- Here `time` is an int32 scalar `Output`, `cell_output` is a
- `Output` or (possibly nested) tuple of tensors as determined by
- `cell.output_size`, and `cell_state` is an `Output`
+ Here `time` is an int32 scalar `Tensor`, `cell_output` is a
+ `Tensor` or (possibly nested) tuple of tensors as determined by
+ `cell.output_size`, and `cell_state` is a `Tensor`
or (possibly nested) tuple of tensors, as determined by the `loop_fn`
on its first call (and should match `cell.state_size`).
- The outputs are: `finished`, a boolean `Output` of
+ The outputs are: `finished`, a boolean `Tensor` of
shape `[batch_size]`, `next_input`: the next input to feed to `cell`,
`next_cell_state`: the next state to feed to `cell`,
and `emit_output`: the output to store for this iteration.
- Note that `emit_output` should be an `Output` or (possibly nested)
+ Note that `emit_output` should be a `Tensor` or (possibly nested)
tuple of tensors with shapes and structure matching `cell.output_size`
and `cell_output` above. The parameter `cell_state` and output
`next_cell_state` may be either a single or (possibly nested) tuple
of tensors. The parameter `loop_state` and
output `next_loop_state` may be either a single or (possibly nested) tuple
- of `Output` and `TensorArray` objects. This last parameter
+ of `Tensor` and `TensorArray` objects. This last parameter
may be ignored by `loop_fn` and the return value may be `None`. If it
is not `None`, then the `loop_state` will be propagated through the RNN
loop, for use purely by `loop_fn` to keep track of its own state.
@@ -1136,8 +1136,8 @@ def raw_rnn(cell, loop_fn,
may be the output of `cell.zero_state()`. It should be a
(possibly nested) tuple structure of tensors.
If `cell.state_size` is an integer, this must be
- an `Output` of appropriate type and shape `[batch_size, cell.state_size]`.
- If `cell.state_size` is a `TensorShape`, this must be an `Output` of
+ a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
+ If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of
appropriate type and shape `[batch_size] + cell.state_size`.
If `cell.state_size` is a (possibly nested) tuple of ints or
`TensorShape`, this will be a tuple having the corresponding shapes.
diff --git a/tensorflow/python/ops/script_ops.py b/tensorflow/python/ops/script_ops.py
index 6e0e246b08..b89b76cf55 100644
--- a/tensorflow/python/ops/script_ops.py
+++ b/tensorflow/python/ops/script_ops.py
@@ -148,10 +148,10 @@ def py_func(func, inp, Tout, stateful=True, name=None):
Args:
func: A Python function, which accepts a list of NumPy `ndarray` objects
- having element types that match the corresponding `tf.Output` objects
+ having element types that match the corresponding `tf.Tensor` objects
in `inp`, and returns a list of `ndarray` objects (or a single `ndarray`)
having element types that match the corresponding values in `Tout`.
- inp: A list of `Output` objects.
+ inp: A list of `Tensor` objects.
Tout: A list or tuple of tensorflow data types or a single tensorflow data
type if there is only one, indicating what `func` returns.
stateful: (Boolean.) If True, the function should be considered stateful.
@@ -162,7 +162,7 @@ def py_func(func, inp, Tout, stateful=True, name=None):
name: A name for the operation (optional).
Returns:
- A list of `Output` or a single `Output` which `func` computes.
+ A list of `Tensor` or a single `Tensor` which `func` computes.
"""
token = _py_funcs.insert(func)
# We tie the registered function's life-time with the current
diff --git a/tensorflow/python/ops/sparse_ops.py b/tensorflow/python/ops/sparse_ops.py
index e29248ed6c..4363be820f 100644
--- a/tensorflow/python/ops/sparse_ops.py
+++ b/tensorflow/python/ops/sparse_ops.py
@@ -238,10 +238,10 @@ def sparse_concat(concat_dim, sp_inputs, name=None, expand_nonconcat_dim=False):
def sparse_add(a, b, thresh=0):
"""Adds two tensors, at least one of each is a `SparseTensor`.
- If one `SparseTensor` and one `Output` are passed in, returns an `Output`. If
+ If one `SparseTensor` and one `Tensor` are passed in, returns a `Tensor`. If
both arguments are `SparseTensor`s, this returns a `SparseTensor`. The order
of arguments does not matter. Use vanilla `tf.add()` for adding two dense
- `Output`s.
+ `Tensor`s.
The indices of any input `SparseTensor` are assumed ordered in standard
lexicographic order. If this is not the case, before this step run
@@ -270,19 +270,19 @@ def sparse_add(a, b, thresh=0):
* `thresh == 0.21`: .1, 0, and -.2 will vanish.
Args:
- a: The first operand; `SparseTensor` or `Output`.
- b: The second operand; `SparseTensor` or `Output`. At least one operand
+ a: The first operand; `SparseTensor` or `Tensor`.
+ b: The second operand; `SparseTensor` or `Tensor`. At least one operand
must be sparse.
- thresh: A 0-D `Output`. The magnitude threshold that determines if an
+ thresh: A 0-D `Tensor`. The magnitude threshold that determines if an
output value/index pair takes space. Its dtype should match that of the
values if they are real; if the latter are complex64/complex128, then the
dtype should be float32/float64, correspondingly.
Returns:
- A `SparseTensor` or an `Output`, representing the sum.
+ A `SparseTensor` or a `Tensor`, representing the sum.
Raises:
- TypeError: If both `a` and `b` are `Output`s. Use `tf.add()` instead.
+ TypeError: If both `a` and `b` are `Tensor`s. Use `tf.add()` instead.
"""
sparse_classes = (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)
if not any(isinstance(inp, sparse_classes) for inp in [a, b]):
@@ -407,7 +407,7 @@ def sparse_reshape(sp_input, shape, name=None):
Args:
sp_input: The input `SparseTensor`.
- shape: A 1-D (vector) int64 `Output` specifying the new dense shape of the
+ shape: A 1-D (vector) int64 `Tensor` specifying the new dense shape of the
represented `SparseTensor`.
name: A name prefix for the returned tensors (optional)
@@ -452,7 +452,7 @@ def sparse_split(split_dim, num_split, sp_input, name=None):
[ ]
Args:
- split_dim: A 0-D `int32` `Output`. The dimension along which to split.
+ split_dim: A 0-D `int32` `Tensor`. The dimension along which to split.
num_split: A Python integer. The number of ways to split.
sp_input: The `SparseTensor` to split.
name: A name for the operation (optional).
@@ -509,21 +509,21 @@ def sparse_to_dense(sparse_indices,
are checked during execution.
Args:
- sparse_indices: A 0-D, 1-D, or 2-D `Output` of type `int32` or `int64`.
+ sparse_indices: A 0-D, 1-D, or 2-D `Tensor` of type `int32` or `int64`.
`sparse_indices[i]` contains the complete index where `sparse_values[i]`
will be placed.
- output_shape: A 1-D `Output` of the same type as `sparse_indices`. Shape
+ output_shape: A 1-D `Tensor` of the same type as `sparse_indices`. Shape
of the dense output tensor.
- sparse_values: A 0-D or 1-D `Output`. Values corresponding to each row of
+ sparse_values: A 0-D or 1-D `Tensor`. Values corresponding to each row of
`sparse_indices`, or a scalar value to be used for all sparse indices.
- default_value: A 0-D `Output` of the same type as `sparse_values`. Value
+ default_value: A 0-D `Tensor` of the same type as `sparse_values`. Value
to set for indices not specified in `sparse_indices`. Defaults to zero.
validate_indices: A boolean value. If True, indices are checked to make
sure they are sorted in lexicographic order and that there are no repeats.
name: A name for the operation (optional).
Returns:
- Dense `Output` of shape `output_shape`. Has the same type as
+ Dense `Tensor` of shape `output_shape`. Has the same type as
`sparse_values`.
"""
return gen_sparse_ops._sparse_to_dense(
@@ -540,7 +540,7 @@ def sparse_reduce_sum(sp_input, axis=None, keep_dims=False,
"""Computes the sum of elements across dimensions of a SparseTensor.
This Op takes a SparseTensor and is the sparse counterpart to
- `tf.reduce_sum()`. In particular, this Op also returns a dense `Output`
+ `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
instead of a sparse one.
Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
@@ -1043,14 +1043,14 @@ def sparse_fill_empty_rows(sp_input, default_value, name=None):
def serialize_sparse(sp_input, name=None):
- """Serialize a `SparseTensor` into a string 3-vector (1-D `Output`) object.
+ """Serialize a `SparseTensor` into a string 3-vector (1-D `Tensor`) object.
Args:
sp_input: The input `SparseTensor`.
name: A name prefix for the returned tensors (optional).
Returns:
- A string 3-vector (1D `Output`), with each column representing the
+ A string 3-vector (1D `Tensor`), with each column representing the
serialized `SparseTensor`'s indices, values, and shape (respectively).
Raises:
@@ -1063,12 +1063,12 @@ def serialize_sparse(sp_input, name=None):
def serialize_many_sparse(sp_input, name=None):
- """Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Output`.
+ """Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` string `Tensor`.
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
- `SparseTensor` objects going into each row of the output `Output` will have
+ `SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
@@ -1078,7 +1078,7 @@ def serialize_many_sparse(sp_input, name=None):
name: A name prefix for the returned tensors (optional).
Returns:
- A string matrix (2-D `Output`) with `N` rows and `3` columns.
+ A string matrix (2-D `Tensor`) with `N` rows and `3` columns.
Each column represents serialized `SparseTensor`'s indices, values, and
shape (respectively).
@@ -1137,7 +1137,7 @@ def deserialize_many_sparse(serialized_sparse, dtype, rank=None, name=None):
shape = [2 50]
Args:
- serialized_sparse: 2-D `Output` of type `string` of shape `[N, 3]`.
+ serialized_sparse: 2-D `Tensor` of type `string` of shape `[N, 3]`.
The serialized and packed `SparseTensor` objects.
dtype: The `dtype` of the serialized `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
@@ -1526,7 +1526,7 @@ def _add_sparse_to_tensors_map(sp_input, container=None,
name: A name prefix for the returned tensors (optional).
Returns:
- A string 1-vector (1D `Output`), with the single element representing the
+ A string 1-vector (1D `Tensor`), with the single element representing the
a unique handle to a `SparseTensor` stored by the `SparseTensorMap`
underlying this op.
@@ -1547,7 +1547,7 @@ def _add_many_sparse_to_tensors_map(sp_input, container=None,
The `SparseTensor` must have rank `R` greater than 1, and the first dimension
is treated as the minibatch dimension. Elements of the `SparseTensor`
must be sorted in increasing order of this first dimension. The serialized
- `SparseTensor` objects going into each row of the output `Output` will have
+ `SparseTensor` objects going into each row of the output `Tensor` will have
rank `R-1`.
The minibatch size `N` is extracted from `sparse_shape[0]`.
@@ -1560,7 +1560,7 @@ def _add_many_sparse_to_tensors_map(sp_input, container=None,
name: A name prefix for the returned tensors (optional).
Returns:
- A string matrix (2-D `Output`) with `N` rows and `1` column.
+ A string matrix (2-D `Tensor`) with `N` rows and `1` column.
Each row represents a unique handle to a `SparseTensor` stored by
the `SparseTensorMap` underlying this op.
@@ -1623,7 +1623,7 @@ def _take_many_sparse_from_tensors_map(
Args:
sparse_map_op: The `Operation` that created the original handles.
Usually this is, e.g., `add_sparse_to_tensors_map(...).op`.
- sparse_handles: 2-D `Output` of type `string` of shape `[N, 1]`.
+ sparse_handles: 2-D `Tensor` of type `string` of shape `[N, 1]`.
The serialized and packed `SparseTensor` objects.
rank: (optional) Python int, the rank of the `SparseTensor` objects.
name: A name prefix for the returned tensors (optional)
diff --git a/tensorflow/python/ops/special_math_ops.py b/tensorflow/python/ops/special_math_ops.py
index ea00f7c8b1..85ef4111d3 100644
--- a/tensorflow/python/ops/special_math_ops.py
+++ b/tensorflow/python/ops/special_math_ops.py
@@ -52,7 +52,7 @@ def lbeta(x, name='lbeta'):
bivariate beta function.
Args:
- x: A rank `n + 1` `Output` with type `float`, or `double`.
+ x: A rank `n + 1` `Tensor` with type `float`, or `double`.
name: A name for the operation (optional).
Returns:
@@ -145,11 +145,11 @@ def einsum(equation, *inputs):
Args:
equation: a `str` describing the contraction, in the same format as
`numpy.einsum`.
- inputs: the inputs to contract (each one an `Output`), whose shapes should
+ inputs: the inputs to contract (each one a `Tensor`), whose shapes should
be consistent with `equation`.
Returns:
- The contracted `Output`, with shape determined by `equation`.
+ The contracted `Tensor`, with shape determined by `equation`.
Raises:
ValueError: If
@@ -230,16 +230,16 @@ def _einsum_reduction(t0, t0_axis_labels, t1, t1_axis_labels, axes_to_sum):
"""Helper for einsum() that computes the result of a two-argument einsum().
Args:
- t0: an `Output`
+ t0: a `Tensor`
t0_axis_labels: a string of axis labels. This string's length must equal
the rank of t0.
- t1: an `Output`
+ t1: a `Tensor`
t1_axis_labels: a string to axis labels. This string's length must equal
the rank of t1.
axes_to_sum: set of labels of axes to be summed over
Returns:
- An `Output` whose elements are obtained by summing, over all axes in
+ A `Tensor` whose elements are obtained by summing, over all axes in
`axes_to_sum`, the corresponding elements of `t0` and `t1`.
For example, if t0_axis_labels == 'abijk', t1_axis_labels == 'acjkl', and
diff --git a/tensorflow/python/ops/string_ops.py b/tensorflow/python/ops/string_ops.py
index 0adc8dfc39..c46c24af9a 100644
--- a/tensorflow/python/ops/string_ops.py
+++ b/tensorflow/python/ops/string_ops.py
@@ -85,8 +85,8 @@ def string_split(source, delimiter=" "): # pylint: disable=invalid-name
st.values = ['hello', 'world', 'a', 'b', 'c']
Args:
- source: `1-D` string `Output`, the strings to split.
- delimiter: `0-D` string `Output`, the delimiter character, the string should
+ source: `1-D` string `Tensor`, the strings to split.
+ delimiter: `0-D` string `Tensor`, the delimiter character, the string should
be length 0 or 1.
Returns:
diff --git a/tensorflow/python/ops/summary_ops.py b/tensorflow/python/ops/summary_ops.py
index 761d32a09c..4010a020de 100644
--- a/tensorflow/python/ops/summary_ops.py
+++ b/tensorflow/python/ops/summary_ops.py
@@ -56,7 +56,7 @@ def tensor_summary( # pylint: disable=invalid-name
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
Returns:
- A scalar `Output` of type `string`. The serialized `Summary` protocol
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
# pylint: enable=line-too-long
diff --git a/tensorflow/python/ops/tensor_array_grad.py b/tensorflow/python/ops/tensor_array_grad.py
index 4b3bcf9d28..1b1f3926d4 100644
--- a/tensorflow/python/ops/tensor_array_grad.py
+++ b/tensorflow/python/ops/tensor_array_grad.py
@@ -53,7 +53,7 @@ def _GetGradSource(op_or_tensor):
that is used to create the gradient TensorArray.
Args:
- op_or_tensor: `Output` or `Operation` which is an input to a
+ op_or_tensor: `Tensor` or `Operation` which is an input to a
TensorArray*Grad call.
Returns:
@@ -79,10 +79,10 @@ def _TensorArrayReadGrad(op, grad):
Args:
op: Forward TensorArrayRead op.
- grad: Gradient `Output` to TensorArrayRead.
+ grad: Gradient `Tensor` to TensorArrayRead.
Returns:
- A flow `Output`, which can be used in control dependencies to
+ A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
@@ -108,10 +108,10 @@ def _TensorArrayWriteGrad(op, flow):
Args:
op: Forward TensorArrayWrite op.
- flow: Gradient `Output` flow to TensorArrayWrite.
+ flow: Gradient `Tensor` flow to TensorArrayWrite.
Returns:
- A grad `Output`, the gradient created in an upstream ReadGrad or PackGrad.
+ A grad `Tensor`, the gradient created in an upstream ReadGrad or PackGrad.
"""
# handle is the output store_handle of TensorArrayReadGrad or
# the handle output of TensorArrayWriteGrad. we must use this one.
@@ -132,10 +132,10 @@ def _TensorArrayGatherGrad(op, grad):
Args:
op: Forward TensorArrayGather op.
- grad: Gradient `Output` to TensorArrayGather.
+ grad: Gradient `Tensor` to TensorArrayGather.
Returns:
- A flow `Output`, which can be used in control dependencies to
+ A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
@@ -161,10 +161,10 @@ def _TensorArrayScatterGrad(op, flow):
Args:
op: Forward TensorArrayScatter op.
- flow: Gradient `Output` flow to TensorArrayScatter.
+ flow: Gradient `Tensor` flow to TensorArrayScatter.
Returns:
- A grad `Output`, the gradient created in upstream ReadGrads or PackGrad.
+ A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad.
"""
handle = op.inputs[0]
indices = op.inputs[1]
@@ -183,10 +183,10 @@ def _TensorArrayConcatGrad(op, grad, unused_lengths_grad):
Args:
op: Forward TensorArrayConcat op.
- grad: Gradient `Output` to TensorArrayConcat.
+ grad: Gradient `Tensor` to TensorArrayConcat.
Returns:
- A flow `Output`, which can be used in control dependencies to
+ A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
@@ -213,10 +213,10 @@ def _TensorArraySplitGrad(op, flow):
Args:
op: Forward TensorArraySplit op.
- flow: Gradient `Output` flow to TensorArraySplit.
+ flow: Gradient `Tensor` flow to TensorArraySplit.
Returns:
- A grad `Output`, the gradient created in upstream ReadGrads or PackGrad.
+ A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad.
"""
handle = op.inputs[0]
dtype = op.get_attr("T")
diff --git a/tensorflow/python/ops/tensor_array_ops.py b/tensorflow/python/ops/tensor_array_ops.py
index f3b7444412..26b71d5920 100644
--- a/tensorflow/python/ops/tensor_array_ops.py
+++ b/tensorflow/python/ops/tensor_array_ops.py
@@ -75,7 +75,7 @@ class TensorArray(object):
Args:
dtype: (required) data type of the TensorArray.
- size: (optional) int32 scalar `Output`: the size of the TensorArray.
+ size: (optional) int32 scalar `Tensor`: the size of the TensorArray.
Required if handle is not provided.
dynamic_size: (optional) Python bool: If true, writes to the TensorArray
can grow the TensorArray past its initial size. Default: False.
@@ -85,9 +85,9 @@ class TensorArray(object):
tensor_array_name: (optional) Python string: the name of the TensorArray.
This is used when creating the TensorArray handle. If this value is
set, handle should be None.
- handle: (optional) An `Output` handle to an existing TensorArray. If this
+ handle: (optional) A `Tensor` handle to an existing TensorArray. If this
is set, tensor_array_name should be None.
- flow: (optional) A float `Output` scalar coming from an existing
+ flow: (optional) A float `Tensor` scalar coming from an existing
`TensorArray.flow`.
infer_shape: (optional, default: True) If True, shape inference
is enabled. In this case, all elements must have the same shape.
@@ -147,7 +147,7 @@ class TensorArray(object):
@property
def flow(self):
- """The flow `Output` forcing ops leading to this TensorArray state."""
+ """The flow `Tensor` forcing ops leading to this TensorArray state."""
return self._flow
@property
@@ -230,7 +230,7 @@ class TensorArray(object):
return ta
def pack(self, name=None):
- """Return the values in the TensorArray as a packed `Output`.
+ """Return the values in the TensorArray as a packed `Tensor`.
All of the values must have been written and their shapes must all match.
@@ -245,13 +245,13 @@ class TensorArray(object):
return self.gather(math_ops.range(0, self.size()), name=name)
def gather(self, indices, name=None):
- """Return selected values in the TensorArray as a packed `Output`.
+ """Return selected values in the TensorArray as a packed `Tensor`.
All of selected values must have been written and their shapes
must all match.
Args:
- indices: A `1-D` `Output` taking values in `[0, max_value)`. If
+ indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If
the `TensorArray` is not dynamic, `max_value=size()`.
name: A name for the operation (optional).
@@ -275,7 +275,7 @@ class TensorArray(object):
return value
def concat(self, name=None):
- """Return the values in the TensorArray as a concatenated `Output`.
+ """Return the values in the TensorArray as a concatenated `Tensor`.
All of the values must have been written, their ranks must match, and
and their shapes must all match for all dimensions except the first.
@@ -303,7 +303,7 @@ class TensorArray(object):
return value
def unpack(self, value, name=None):
- """Pack the values of an `Output` in the TensorArray.
+ """Pack the values of a `Tensor` in the TensorArray.
Args:
value: (N+1)-D. Tensor of type `dtype`. The Tensor to unpack.
@@ -323,10 +323,10 @@ class TensorArray(object):
indices=math_ops.range(0, num_elements), value=value, name=name)
def scatter(self, indices, value, name=None):
- """Scatter the values of an `Output` in specific indices of a `TensorArray`.
+ """Scatter the values of a `Tensor` in specific indices of a `TensorArray`.
Args:
- indices: A `1-D` `Output` taking values in `[0, max_value)`. If
+ indices: A `1-D` `Tensor` taking values in `[0, max_value)`. If
the `TensorArray` is not dynamic, `max_value=size()`.
value: (N+1)-D. Tensor of type `dtype`. The Tensor to unpack.
name: A name for the operation (optional).
@@ -361,7 +361,7 @@ class TensorArray(object):
return ta
def split(self, value, lengths, name=None):
- """Split the values of an `Output` into the TensorArray.
+ """Split the values of a `Tensor` into the TensorArray.
Args:
value: (N+1)-D. Tensor of type `dtype`. The Tensor to split.
diff --git a/tensorflow/python/ops/variable_scope.py b/tensorflow/python/ops/variable_scope.py
index ada96f8d7f..9fceeb2c35 100644
--- a/tensorflow/python/ops/variable_scope.py
+++ b/tensorflow/python/ops/variable_scope.py
@@ -218,7 +218,7 @@ class _VariableStore(object):
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
- Accessing this object as an `Output` returns the shards concatenated along
+ Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
@@ -965,7 +965,7 @@ def get_variable(name,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
- Accessing this object as an `Output` returns the shards concatenated along
+ Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
@@ -1331,7 +1331,7 @@ def variable_scope(name_or_scope,
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
- values: The list of `Output` arguments that are passed to the op function.
+ values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
diff --git a/tensorflow/python/ops/variables.py b/tensorflow/python/ops/variables.py
index 0af053a194..8ca982f939 100644
--- a/tensorflow/python/ops/variables.py
+++ b/tensorflow/python/ops/variables.py
@@ -36,7 +36,7 @@ class Variable(object):
variable to the graph by constructing an instance of the class `Variable`.
The `Variable()` constructor requires an initial value for the variable,
- which can be an `Output` of any type and shape. The initial value defines the
+ which can be a `Tensor` of any type and shape. The initial value defines the
type and shape of the variable. After construction, the type and shape of
the variable are fixed. The value can be changed using one of the assign
methods.
@@ -44,9 +44,9 @@ class Variable(object):
If you want to change the shape of a variable later you have to use an
`assign` Op with `validate_shape=False`.
- Just like any `Output`, variables created with `Variable()` can be used as
+ Just like any `Tensor`, variables created with `Variable()` can be used as
inputs for other Ops in the graph. Additionally, all the operators
- overloaded for the `Output` class are carried over to variables, so you can
+ overloaded for the `Tensor` class are carried over to variables, so you can
also add nodes to the graph by just doing arithmetic on variables.
```python
@@ -168,7 +168,7 @@ class Variable(object):
variable to its initial value.
Args:
- initial_value: An `Output`, or Python object convertible to an `Output`,
+ initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
@@ -238,7 +238,7 @@ class Variable(object):
"""Creates a new variable from arguments.
Args:
- initial_value: An `Output`, or Python object convertible to an `Output`,
+ initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
@@ -409,7 +409,7 @@ class Variable(object):
See [`value()`](#Variable.value).
Returns:
- An `Output` containing the value of the variable.
+ A `Tensor` containing the value of the variable.
"""
return self._snapshot
@@ -431,7 +431,7 @@ class Variable(object):
You usually do not need to call this method as all ops that need the value
of the variable call it automatically through a `convert_to_tensor()` call.
- Returns an `Output` which holds the value of the variable. You can not
+ Returns a `Tensor` which holds the value of the variable. You can not
assign a new value to this tensor as it is not a reference to the variable.
See [`ref()`](#Variable.ref) if you want to get a reference to the
variable.
@@ -442,7 +442,7 @@ class Variable(object):
is on a different device it will get a copy of the variable.
Returns:
- An `Output` containing the value of the variable.
+ A `Tensor` containing the value of the variable.
"""
return self._snapshot
@@ -463,13 +463,13 @@ class Variable(object):
You usually do not need to call this method as all ops that need a reference
to the variable call it automatically.
- Returns is an `Output` which holds a reference to the variable. You can
+ Returns is a `Tensor` which holds a reference to the variable. You can
assign a new value to the variable by passing the tensor to an assign op.
See [`value()`](#Variable.value) if you want to get the value of the
variable.
Returns:
- An `Output` that is a reference to the variable.
+ A `Tensor` that is a reference to the variable.
"""
return self._variable
@@ -530,7 +530,7 @@ class Variable(object):
```
Returns:
- An `Output` holding the value of this variable after its initializer
+ A `Tensor` holding the value of this variable after its initializer
has run.
"""
with ops.control_dependencies(None):
@@ -555,7 +555,7 @@ class Variable(object):
the variable.
Returns:
- An `Output`.
+ A `Tensor`.
"""
return self._initial_value
@@ -565,11 +565,11 @@ class Variable(object):
This is essentially a shortcut for `assign(self, value)`.
Args:
- value: An `Output`. The new value for this variable.
+ value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
Returns:
- An `Output` that will hold the new value of this variable after
+ A `Tensor` that will hold the new value of this variable after
the assignment has completed.
"""
return state_ops.assign(self._variable, value, use_locking=use_locking)
@@ -580,11 +580,11 @@ class Variable(object):
This is essentially a shortcut for `assign_add(self, delta)`.
Args:
- delta: An `Output`. The value to add to this variable.
+ delta: A `Tensor`. The value to add to this variable.
use_locking: If `True`, use locking during the operation.
Returns:
- An `Output` that will hold the new value of this variable after
+ A `Tensor` that will hold the new value of this variable after
the addition has completed.
"""
return state_ops.assign_add(self._variable, delta, use_locking=use_locking)
@@ -595,11 +595,11 @@ class Variable(object):
This is essentially a shortcut for `assign_sub(self, delta)`.
Args:
- delta: An `Output`. The value to subtract from this variable.
+ delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
Returns:
- An `Output` that will hold the new value of this variable after
+ A `Tensor` that will hold the new value of this variable after
the subtraction has completed.
"""
return state_ops.assign_sub(self._variable, delta, use_locking=use_locking)
@@ -615,7 +615,7 @@ class Variable(object):
use_locking: If `True`, use locking during the operation.
Returns:
- An `Output` that will hold the new value of this variable after
+ A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
@@ -645,7 +645,7 @@ class Variable(object):
limit: value at which incrementing the variable raises an error.
Returns:
- An `Output` that will hold the variable value before the increment. If no
+ A `Tensor` that will hold the variable value before the increment. If no
other Op modifies this variable, the values produced will all be
distinct.
"""
@@ -976,14 +976,14 @@ class PartitionedVariable(object):
return [i for i, p in enumerate(self._partitions) if p > 1]
def _concat(self):
- """Returns the overall concatenated value as an `Output`.
+ """Returns the overall concatenated value as a `Tensor`.
This is different from using the partitioned variable directly as a tensor
(through tensor conversion and `as_tensor`) in that it creates a new set of
operations that keeps the control dependencies from its scope.
Returns:
- `Output` containing the concatenated value.
+ `Tensor` containing the concatenated value.
"""
if len(self._variable_list) == 1:
with ops.name_scope(None):
@@ -1004,14 +1004,14 @@ class PartitionedVariable(object):
return array_ops.identity(concatenated, name=self._name)
def as_tensor(self):
- """Returns the overall concatenated value as an `Output`.
+ """Returns the overall concatenated value as a `Tensor`.
The returned tensor will not inherit the control dependencies from the scope
where the value is used, which is similar to getting the value of
`Variable`.
Returns:
- `Output` containing the concatenated value.
+ `Tensor` containing the concatenated value.
"""
with ops.control_dependencies(None):
return self._concat()
diff --git a/tensorflow/python/platform/benchmark.py b/tensorflow/python/platform/benchmark.py
index a6084dfd41..e98f56cc9c 100644
--- a/tensorflow/python/platform/benchmark.py
+++ b/tensorflow/python/platform/benchmark.py
@@ -200,11 +200,11 @@ class TensorFlowBenchmark(Benchmark):
name=None,
extras=None,
mbs=0):
- """Run an op or output in the given session. Report the results.
+ """Run an op or tensor in the given session. Report the results.
Args:
sess: `Session` object to use for timing.
- op_or_tensor: `Operation` or `Output` to benchmark.
+ op_or_tensor: `Operation` or `Tensor` to benchmark.
feed_dict: A `dict` of values to feed for each op iteration (see the
`feed_dict` parameter of `Session.run`).
burn_iters: Number of burn-in iterations to run.
diff --git a/tensorflow/python/summary/summary.py b/tensorflow/python/summary/summary.py
index daa7d18713..4e29bbd88d 100644
--- a/tensorflow/python/summary/summary.py
+++ b/tensorflow/python/summary/summary.py
@@ -106,7 +106,7 @@ def scalar(name, tensor, collections=None):
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
Returns:
- A scalar `Output` of type `string`. Which contains a `Summary` protobuf.
+ A scalar `Tensor` of type `string`. Which contains a `Summary` protobuf.
Raises:
ValueError: If tensor has the wrong shape or type.
@@ -153,14 +153,14 @@ def image(name, tensor, max_outputs=3, collections=None):
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
- tensor: A 4-D `uint8` or `float32` `Output` of shape `[batch_size, height,
+ tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_outputs: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
Returns:
- A scalar `Output` of type `string`. The serialized `Summary` protocol
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
name = _clean_tag(name)
@@ -188,13 +188,13 @@ def histogram(name, values, collections=None):
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
- values: A real numeric `Output`. Any shape. Values to use to
+ values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
Returns:
- A scalar `Output` of type `string`. The serialized `Summary` protocol
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
# pylint: enable=line-too-long
@@ -227,16 +227,16 @@ def audio(name, tensor, sample_rate, max_outputs=3, collections=None):
Args:
name: A name for the generated node. Will also serve as a series name in
TensorBoard.
- tensor: A 3-D `float32` `Output` of shape `[batch_size, frames, channels]`
- or a 2-D `float32` `Output` of shape `[batch_size, frames]`.
- sample_rate: A Scalar `float32` `Output` indicating the sample rate of the
+ tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
+ or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
+ sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [_ops.GraphKeys.SUMMARIES]
Returns:
- A scalar `Output` of type `string`. The serialized `Summary` protocol
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
# pylint: enable=line-too-long
@@ -268,14 +268,14 @@ def merge(inputs, collections=None, name=None):
in the summaries to merge use the same tag.
Args:
- inputs: A list of `string` `Output` objects containing serialized `Summary`
+ inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
- A scalar `Output` of type `string`. The serialized `Summary` protocol
+ A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
"""
# pylint: enable=line-too-long
@@ -296,7 +296,7 @@ def merge_all(key=_ops.GraphKeys.SUMMARIES):
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
- `Output` of type `string` containing the serialized `Summary` protocol
+ `Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_ops = _ops.get_collection(key)
diff --git a/tensorflow/python/summary/summary_iterator.py b/tensorflow/python/summary/summary_iterator.py
index 91b75c7e63..9c3e8fcf4e 100644
--- a/tensorflow/python/summary/summary_iterator.py
+++ b/tensorflow/python/summary/summary_iterator.py
@@ -137,7 +137,7 @@ class SummaryWriter(object):
You can pass the result of evaluating any summary op, using
[`Session.run()`](client.md#Session.run) or
- [`Output.eval()`](framework.md#Output.eval), to this
+ [`Tensor.eval()`](framework.md#Tensor.eval), to this
function. Alternatively, you can pass a `tf.Summary` protocol
buffer that you populate with your own data. The latter is
commonly done to report evaluation results in event files.
diff --git a/tensorflow/python/summary/writer/writer.py b/tensorflow/python/summary/writer/writer.py
index 2b394d376e..fa1715bbcb 100644
--- a/tensorflow/python/summary/writer/writer.py
+++ b/tensorflow/python/summary/writer/writer.py
@@ -98,7 +98,7 @@ class SummaryToEventTransformer(object):
You can pass the result of evaluating any summary op, using
[`Session.run()`](client.md#Session.run) or
- [`Output.eval()`](framework.md#Output.eval), to this
+ [`Tensor.eval()`](framework.md#Tensor.eval), to this
function. Alternatively, you can pass a `tf.Summary` protocol
buffer that you populate with your own data. The latter is
commonly done to report evaluation results in event files.
diff --git a/tensorflow/python/training/adadelta.py b/tensorflow/python/training/adadelta.py
index c0f1bb184e..50bb28ebcd 100644
--- a/tensorflow/python/training/adadelta.py
+++ b/tensorflow/python/training/adadelta.py
@@ -38,9 +38,9 @@ class AdadeltaOptimizer(optimizer.Optimizer):
"""Construct a new Adadelta optimizer.
Args:
- learning_rate: An `Output` or a floating point value. The learning rate.
- rho: An `Output` or a floating point value. The decay rate.
- epsilon: An `Output` or a floating point value. A constant epsilon used
+ learning_rate: A `Tensor` or a floating point value. The learning rate.
+ rho: A `Tensor` or a floating point value. The decay rate.
+ epsilon: A `Tensor` or a floating point value. A constant epsilon used
to better conditioning the grad update.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
diff --git a/tensorflow/python/training/adagrad.py b/tensorflow/python/training/adagrad.py
index 382be6a5f4..9646c5c228 100644
--- a/tensorflow/python/training/adagrad.py
+++ b/tensorflow/python/training/adagrad.py
@@ -38,7 +38,7 @@ class AdagradOptimizer(optimizer.Optimizer):
"""Construct a new Adagrad optimizer.
Args:
- learning_rate: An `Output` or a floating point value. The learning rate.
+ learning_rate: A `Tensor` or a floating point value. The learning rate.
initial_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
use_locking: If `True` use locks for update operations.
diff --git a/tensorflow/python/training/adagrad_da.py b/tensorflow/python/training/adagrad_da.py
index 4d7601fdd9..4f1b468986 100644
--- a/tensorflow/python/training/adagrad_da.py
+++ b/tensorflow/python/training/adagrad_da.py
@@ -53,8 +53,8 @@ class AdagradDAOptimizer(optimizer.Optimizer):
"""Construct a new AdagradDA optimizer.
Args:
- learning_rate: An `Output` or a floating point value. The learning rate.
- global_step: An `Output` containing the current training step number.
+ learning_rate: A `Tensor` or a floating point value. The learning rate.
+ global_step: A `Tensor` containing the current training step number.
initial_gradient_squared_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
l1_regularization_strength: A float value, must be greater than or
diff --git a/tensorflow/python/training/basic_session_run_hooks.py b/tensorflow/python/training/basic_session_run_hooks.py
index e360231c04..92b8f97c88 100644
--- a/tensorflow/python/training/basic_session_run_hooks.py
+++ b/tensorflow/python/training/basic_session_run_hooks.py
@@ -361,7 +361,7 @@ class NanTensorHook(session_run_hook.SessionRunHook):
"""Initializes NanLoss monitor.
Args:
- loss_tensor: `Output`, the loss tensor.
+ loss_tensor: `Tensor`, the loss tensor.
fail_on_nan_loss: `bool`, whether to raise exception when loss is NaN.
"""
self._loss_tensor = loss_tensor
@@ -403,7 +403,7 @@ class SummarySaverHook(session_run_hook.SessionRunHook):
summary_writer: `SummaryWriter`. If `None` and an `output_dir` was passed,
one will be created accordingly.
scaffold: `Scaffold` to get summary_op if it's not provided.
- summary_op: `Output` of type `string`. A serialized `Summary` protocol
+ summary_op: `Tensor` of type `string`. A serialized `Summary` protocol
buffer, as output by TF summary methods like `tf.summary.scalar` or
`tf.summary.merge_all`.
diff --git a/tensorflow/python/training/ftrl.py b/tensorflow/python/training/ftrl.py
index c57115f940..2bb4864d4a 100644
--- a/tensorflow/python/training/ftrl.py
+++ b/tensorflow/python/training/ftrl.py
@@ -43,7 +43,7 @@ class FtrlOptimizer(optimizer.Optimizer):
"""Construct a new FTRL optimizer.
Args:
- learning_rate: A float value or a constant float `Output`.
+ learning_rate: A float value or a constant float `Tensor`.
learning_rate_power: A float value, must be less or equal to zero.
initial_accumulator_value: The starting value for accumulators.
Only positive values are allowed.
diff --git a/tensorflow/python/training/input.py b/tensorflow/python/training/input.py
index a4c680e319..4fdda96860 100644
--- a/tensorflow/python/training/input.py
+++ b/tensorflow/python/training/input.py
@@ -73,7 +73,7 @@ def limit_epochs(tensor, num_epochs=None, name=None):
initialize local variables.
Args:
- tensor: Any `Output`.
+ tensor: Any `Tensor`.
num_epochs: A positive integer (optional). If specified, limits the number
of steps the output tensor may be evaluated.
name: A name for the operations (optional).
@@ -264,13 +264,13 @@ def range_input_producer(limit, num_epochs=None, shuffle=True, seed=None,
def slice_input_producer(tensor_list, num_epochs=None, shuffle=True, seed=None,
capacity=32, shared_name=None, name=None):
- """Produces a slice of each `Output` in `tensor_list`.
+ """Produces a slice of each `Tensor` in `tensor_list`.
Implemented using a Queue -- a `QueueRunner` for the Queue
is added to the current `Graph`'s `QUEUE_RUNNER` collection.
Args:
- tensor_list: A list of `Output` objects. Every `Output` in
+ tensor_list: A list of `Tensor` objects. Every `Tensor` in
`tensor_list` must have the same size in the first dimension.
num_epochs: An integer (optional). If specified, `slice_input_producer`
produces each slice `num_epochs` times before generating
@@ -423,7 +423,7 @@ def _store_sparse_tensors(tensor_list, enqueue_many, shared_map_ops=None):
objects created in the first call.
Args:
- tensor_list: List of `Output` and `SparseTensor` objects.
+ tensor_list: List of `Tensor` and `SparseTensor` objects.
enqueue_many: Python `Boolean`.
shared_map_ops: (optional) List of `Operation` objects from a previous
call to `_store_sparse_tensors`. If not `None`, the op types should be
@@ -432,7 +432,7 @@ def _store_sparse_tensors(tensor_list, enqueue_many, shared_map_ops=None):
Returns:
A tuple `(stored_list, sparse_info_list)` where `stored_list` is a list
- of `Output` objects (same length as `tensor_list`) and `sparse_info_list`
+ of `Tensor` objects (same length as `tensor_list`) and `sparse_info_list`
is a list of the same length of `_SparseMetaData` objects.
"""
maybe_shared_map_ops = shared_map_ops or [None] * len(tensor_list)
diff --git a/tensorflow/python/training/learning_rate_decay.py b/tensorflow/python/training/learning_rate_decay.py
index da64195413..1d9dc3f483 100644
--- a/tensorflow/python/training/learning_rate_decay.py
+++ b/tensorflow/python/training/learning_rate_decay.py
@@ -60,20 +60,20 @@ def exponential_decay(learning_rate, global_step, decay_steps, decay_rate,
```
Args:
- learning_rate: A scalar `float32` or `float64` `Output` or a
+ learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
- global_step: A scalar `int32` or `int64` `Output` or a Python number.
+ global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
- decay_steps: A scalar `int32` or `int64` `Output` or a Python number.
+ decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
- decay_rate: A scalar `float32` or `float64` `Output` or a
+ decay_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete intervals
name: String. Optional name of the operation. Defaults to
'ExponentialDecay'.
Returns:
- A scalar `Output` of the same type as `learning_rate`. The decayed
+ A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
@@ -111,11 +111,11 @@ def piecewise_constant(x, boundaries, values, name=None):
```
Args:
- x: A 0-D scalar `Output`. Must be one of the following types: `float32`,
+ x: A 0-D scalar `Tensor`. Must be one of the following types: `float32`,
`float64`, `uint8`, `int8`, `int16`, `int32`, `int64`.
- boundaries: A list of `Output`s or `int`s or `float`s with strictly
+ boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries, and with all elements having the same type as `x`.
- values: A list of `Output`s or float`s or `int`s that specifies the values
+ values: A list of `Tensor`s or float`s or `int`s that specifies the values
for the intervals defined by `boundaries`. It should have one more element
than `boundaries`, and all elements should have the same type.
name: A string. Optional name of the operation. Defaults to
@@ -216,22 +216,22 @@ def polynomial_decay(learning_rate, global_step, decay_steps,
```
Args:
- learning_rate: A scalar `float32` or `float64` `Output` or a
+ learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
- global_step: A scalar `int32` or `int64` `Output` or a Python number.
+ global_step: A scalar `int32` or `int64` `Tensor` or a Python number.
Global step to use for the decay computation. Must not be negative.
- decay_steps: A scalar `int32` or `int64` `Output` or a Python number.
+ decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
- end_learning_rate: A scalar `float32` or `float64` `Output` or a
+ end_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The minimal end learning rate.
- power: A scalar `float32` or `float64` `Output` or a
+ power: A scalar `float32` or `float64` `Tensor` or a
Python number. The power of the polynomial. Defaults to sqrt, i.e. 0.5.
cycle: A boolean, whether or not it should cycle beyond decay_steps.
name: String. Optional name of the operation. Defaults to
'PolynomialDecay'.
Returns:
- A scalar `Output` of the same type as `learning_rate`. The decayed
+ A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
@@ -295,7 +295,7 @@ def natural_exp_decay(learning_rate, global_step, decay_steps, decay_rate,
```
Args:
- learning_rate: A scalar `float32` or `float64` `Output` or a
+ learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A Python number.
Global step to use for the decay computation. Must not be negative.
@@ -307,7 +307,7 @@ def natural_exp_decay(learning_rate, global_step, decay_steps, decay_rate,
'ExponentialTimeDecay'.
Returns:
- A scalar `Output` of the same type as `learning_rate`. The decayed
+ A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
@@ -362,7 +362,7 @@ def inverse_time_decay(learning_rate, global_step, decay_steps, decay_rate,
```
Args:
- learning_rate: A scalar `float32` or `float64` `Output` or a
+ learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
global_step: A Python number.
Global step to use for the decay computation. Must not be negative.
@@ -374,7 +374,7 @@ def inverse_time_decay(learning_rate, global_step, decay_steps, decay_rate,
'InverseTimeDecay'.
Returns:
- A scalar `Output` of the same type as `learning_rate`. The decayed
+ A scalar `Tensor` of the same type as `learning_rate`. The decayed
learning rate.
Raises:
diff --git a/tensorflow/python/training/momentum.py b/tensorflow/python/training/momentum.py
index b203b0b2b3..62f8028ce6 100644
--- a/tensorflow/python/training/momentum.py
+++ b/tensorflow/python/training/momentum.py
@@ -35,8 +35,8 @@ class MomentumOptimizer(optimizer.Optimizer):
"""Construct a new Momentum optimizer.
Args:
- learning_rate: An `Output` or a floating point value. The learning rate.
- momentum: An `Output` or a floating point value. The momentum.
+ learning_rate: A `Tensor` or a floating point value. The learning rate.
+ momentum: A `Tensor` or a floating point value. The momentum.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Momentum".
diff --git a/tensorflow/python/training/moving_averages.py b/tensorflow/python/training/moving_averages.py
index 65ff9865f4..bac38ee689 100644
--- a/tensorflow/python/training/moving_averages.py
+++ b/tensorflow/python/training/moving_averages.py
@@ -89,9 +89,9 @@ def weighted_moving_average(value,
and `weight`.
Args:
- value: A numeric `Output`.
- decay: A float `Output` or float value. The moving average decay.
- weight: `Output` that keeps the current value of a weight.
+ value: A numeric `Tensor`.
+ decay: A float `Tensor` or float value. The moving average decay.
+ weight: `Tensor` that keeps the current value of a weight.
Shape should be able to multiply `value`.
truediv: Boolean, if `True`, dividing by `moving_average(weight)` is
floating point division. If `False`, use division implied by dtypes.
@@ -316,11 +316,11 @@ class ExponentialMovingAverage(object):
def apply(self, var_list=None):
"""Maintains moving averages of variables.
- `var_list` must be a list of `Variable` or `Output` objects. This method
+ `var_list` must be a list of `Variable` or `Tensor` objects. This method
creates shadow variables for all elements of `var_list`. Shadow variables
for `Variable` objects are initialized to the variable's initial value.
They will be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.
- For `Output` objects, the shadow variables are initialized to 0 and zero
+ For `Tensor` objects, the shadow variables are initialized to 0 and zero
debiased (see docstring in `assign_moving_average` for more details).
shadow variables are created with `trainable=False` and added to the
diff --git a/tensorflow/python/training/optimizer.py b/tensorflow/python/training/optimizer.py
index 271bc75d76..6dbc0d92b6 100644
--- a/tensorflow/python/training/optimizer.py
+++ b/tensorflow/python/training/optimizer.py
@@ -240,7 +240,7 @@ class Optimizer(object):
of using this function.
Args:
- loss: An `Output` containing the value to minimize.
+ loss: A `Tensor` containing the value to minimize.
global_step: Optional `Variable` to increment by one after the
variables have been updated.
var_list: Optional list of `Variable` objects to update to minimize
@@ -253,7 +253,7 @@ class Optimizer(object):
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
name: Optional name for the returned operation.
- grad_loss: Optional. An `Output` holding the gradient computed for `loss`.
+ grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
An Operation that updates the variables in `var_list`. If `global_step`
@@ -287,7 +287,7 @@ class Optimizer(object):
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
- for "variable". Note that "gradient" can be an `Output`, an
+ for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
@@ -302,7 +302,7 @@ class Optimizer(object):
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
- grad_loss: Optional. An `Output` holding the gradient computed for `loss`.
+ grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
@@ -498,7 +498,7 @@ class Optimizer(object):
"""Add ops to apply dense gradients to `var`.
Args:
- grad: An `Output`.
+ grad: A `Tensor`.
var: A `Variable` object.
Return:
@@ -563,7 +563,7 @@ class Optimizer(object):
Args:
var: A `Variable` object.
- val: An `Output`. The initial value of the slot.
+ val: A `Tensor`. The initial value of the slot.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
diff --git a/tensorflow/python/training/proximal_adagrad.py b/tensorflow/python/training/proximal_adagrad.py
index 0b5371ba1a..354df153a7 100644
--- a/tensorflow/python/training/proximal_adagrad.py
+++ b/tensorflow/python/training/proximal_adagrad.py
@@ -39,7 +39,7 @@ class ProximalAdagradOptimizer(optimizer.Optimizer):
"""Construct a new ProximalAdagrad optimizer.
Args:
- learning_rate: An `Output` or a floating point value. The learning rate.
+ learning_rate: A `Tensor` or a floating point value. The learning rate.
initial_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
l1_regularization_strength: A float value, must be greater than or
diff --git a/tensorflow/python/training/saver.py b/tensorflow/python/training/saver.py
index 714f0d64e0..ea4c163ac9 100644
--- a/tensorflow/python/training/saver.py
+++ b/tensorflow/python/training/saver.py
@@ -1253,7 +1253,7 @@ class Saver(object):
`sharded`, this is the prefix of the sharded checkpoint filename.
global_step: If provided the global step number is appended to
`save_path` to create the checkpoint filename. The optional argument
- can be an `Output`, an `Output` name or an integer.
+ can be a `Tensor`, a `Tensor` name or an integer.
latest_filename: Optional name for the protocol buffer file that will
contains the list of most recent checkpoint filenames. That file,
kept in the same directory as the checkpoint files, is automatically
@@ -1353,7 +1353,7 @@ class Saver(object):
as_text: If `True`, writes the meta_graph as an ASCII proto.
export_scope: Optional `string`. Name scope to remove.
clear_devices: Whether or not to clear the device field for an `Operation`
- or `Output` during export.
+ or `Tensor` during export.
Returns:
A `MetaGraphDef` proto.
@@ -1504,7 +1504,7 @@ def import_meta_graph(meta_graph_or_file, clear_devices=False,
meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
the path) containing a `MetaGraphDef`.
clear_devices: Whether or not to clear the device field for an `Operation`
- or `Output` during import.
+ or `Tensor` during import.
import_scope: Optional `string`. Name scope to add. Only used when
initializing from protocol buffer.
**kwargs: Optional keyed arguments.
@@ -1568,7 +1568,7 @@ def export_meta_graph(filename=None,
for easy import later into new name scopes. If `None`, the whole graph
is exported. graph_def and export_scope cannot both be specified.
clear_devices: Whether or not to clear the device field for an `Operation`
- or `Output` during export.
+ or `Tensor` during export.
**kwargs: Optional keyed arguments.
Returns:
diff --git a/tensorflow/python/training/session_manager.py b/tensorflow/python/training/session_manager.py
index 7cd02596b9..07cf982235 100644
--- a/tensorflow/python/training/session_manager.py
+++ b/tensorflow/python/training/session_manager.py
@@ -204,7 +204,7 @@ class SessionManager(object):
wait_for_checkpoint: Whether to wait for checkpoint to become available.
max_wait_secs: Maximum time to wait for checkpoints to become available.
config: Optional `ConfigProto` proto used to configure the session.
- init_feed_dict: Optional dictionary that maps `Output` objects to feed
+ init_feed_dict: Optional dictionary that maps `Tensor` objects to feed
values. This feed dictionary is passed to the session `run()` call when
running the init op.
init_fn: Optional callable used to initialize the model. Called after the
diff --git a/tensorflow/python/training/session_run_hook.py b/tensorflow/python/training/session_run_hook.py
index db1dabc8b9..0db41b7baf 100644
--- a/tensorflow/python/training/session_run_hook.py
+++ b/tensorflow/python/training/session_run_hook.py
@@ -32,7 +32,7 @@ There are a few pre-defined monitors:
- StopAtStepHook: Request stop based on global_step
- CheckpointSaverHook: saves checkpoint
- LoggingTensorHook: outputs one or more tensor values to log
- - NanTensorHook: Request stop if given `Output` contains Nans.
+ - NanTensorHook: Request stop if given `Tensor` contains Nans.
- SummarySaverHook: saves summaries to a summary writer
For more specific needs, you can create custom hooks:
diff --git a/tensorflow/python/training/slot_creator.py b/tensorflow/python/training/slot_creator.py
index b2287e6866..c631d78fdd 100644
--- a/tensorflow/python/training/slot_creator.py
+++ b/tensorflow/python/training/slot_creator.py
@@ -16,7 +16,7 @@
"""Standard functions for creating slots.
A slot is a `Variable` created with the same shape as a primary variable or
-`Output`. A slot is always scoped in the namespace of the primary object and
+`Tensor`. A slot is always scoped in the namespace of the primary object and
typically has the same device and type.
Slots are typically used as accumulators to track values associated with
@@ -70,8 +70,8 @@ def create_slot(primary, val, name, colocate_with_primary=True):
The type of the slot is determined by the given value.
Args:
- primary: The primary `Variable` or `Output`.
- val: An `Output` specifying the initial value of the slot.
+ primary: The primary `Variable` or `Tensor`.
+ val: A `Tensor` specifying the initial value of the slot.
name: Name to use for the slot variable.
colocate_with_primary: Boolean. If True the slot is located
on the same device as `primary`.
@@ -92,7 +92,7 @@ def create_zeros_slot(primary, name, dtype=None, colocate_with_primary=True):
"""Create a slot initialized to 0 with same shape as the primary object.
Args:
- primary: The primary `Variable` or `Output`.
+ primary: The primary `Variable` or `Tensor`.
name: Name to use for the slot variable.
dtype: Type of the slot variable. Defaults to the type of `primary`.
colocate_with_primary: Boolean. If True the slot is located
diff --git a/tensorflow/python/training/supervisor.py b/tensorflow/python/training/supervisor.py
index e057b0d4f4..af51c07417 100644
--- a/tensorflow/python/training/supervisor.py
+++ b/tensorflow/python/training/supervisor.py
@@ -238,12 +238,12 @@ class Supervisor(object):
default `Graph`. The supervisor may add operations to the graph before
creating a session, but the graph should not be modified by the caller
after passing it to the supervisor.
- ready_op: 1-D string `Output`. This tensor is evaluated by supervisors in
+ ready_op: 1-D string `Tensor`. This tensor is evaluated by supervisors in
`prepare_or_wait_for_session()` to check if the model is ready to use.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from `tf.report_uninitialized_variables()` If
`None`, the model is not checked for readiness.
- ready_for_local_init_op: 1-D string `Output`. This tensor is evaluated by
+ ready_for_local_init_op: 1-D string `Tensor`. This tensor is evaluated by
supervisors in `prepare_or_wait_for_session()` to check if the model is
ready to run the local_init_op.
The model is considered ready if it returns an empty array. Defaults to
@@ -257,7 +257,7 @@ class Supervisor(object):
when it can not be recovered. Defaults to an `Operation` that
initializes all variables. If `None`, no initialization is done
automatically unless you pass a value for `init_fn`, see below.
- init_feed_dict: A dictionary that maps `Output` objects to feed values.
+ init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
local_init_op: `Operation`. Used by all supervisors to run initializations
that should run for every new supervisor instance. By default these
@@ -389,10 +389,10 @@ class Supervisor(object):
"""Initializes ready_op.
Args:
- ready_op: `Output` to check if the model is initialized.
+ ready_op: `Tensor` to check if the model is initialized.
If it's set to USE_DEFAULT, creates an op that checks all
the variables are initialized.
- ready_for_local_init_op: `Output` to check if the model is ready to run
+ ready_for_local_init_op: `Tensor` to check if the model is ready to run
local_init_op.
If it's set to USE_DEFAULT, creates an op that checks all
the global variables are initialized.
@@ -416,7 +416,7 @@ class Supervisor(object):
Args:
init_op: `Operation` to initialize the variables. If set to USE_DEFAULT,
create an op that initializes all variables and tables.
- init_feed_dict: A dictionary that maps `Output` objects to feed values.
+ init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
"""
if init_op is Supervisor.USE_DEFAULT:
@@ -868,7 +868,7 @@ class Supervisor(object):
"""Returns the global_step from the default graph.
Returns:
- The global step `Output` or `None`.
+ The global step `Tensor` or `None`.
"""
try:
gs = ops.get_default_graph().get_tensor_by_name("global_step:0")
diff --git a/tensorflow/python/training/training_util.py b/tensorflow/python/training/training_util.py
index b2dadd48d2..ed7411c63e 100644
--- a/tensorflow/python/training/training_util.py
+++ b/tensorflow/python/training/training_util.py
@@ -43,7 +43,7 @@ def global_step(sess, global_step_tensor):
Args:
sess: A TensorFlow `Session` object.
- global_step_tensor: `Output` or the `name` of the operation that contains
+ global_step_tensor: `Tensor` or the `name` of the operation that contains
the global step.
Returns:
@@ -87,10 +87,10 @@ def get_global_step(graph=None):
def assert_global_step(global_step_tensor):
- """Asserts `global_step_tensor` is a scalar int `Variable` or `Output`.
+ """Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.
Args:
- global_step_tensor: `Output` to test.
+ global_step_tensor: `Tensor` to test.
"""
if not (isinstance(global_step_tensor, variables.Variable) or
isinstance(global_step_tensor, ops.Tensor)):