aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Mark Daoust <markdaoust@google.com>2018-08-09 07:03:39 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-09 07:08:30 -0700
commitf40a875355557483aeae60ffcf757fc9626c752b (patch)
tree7f642a6fd12495c1c7d9b2f3a37e376d8ee6d2c9
parentfd9fc4b4b69f7fce60497bbaf5cbd958f12ead8d (diff)
Remove usage of magic-api-link syntax from source files.
Back-ticks are now converted to links in the api_docs generator. With the new docs repo we're moving to simplify the docs pipeline, and make everything more readable. By doing this we no longer get test failures for symbols that don't exist (`tf.does_not_exist` will not get a link). There is also no way, not to set custom link text. That's okay. This is the result of the following regex replacement (+ a couple of manual edits.): re: @\{([^$].*?)(\$.+?)?} sub: `\1` Which does the following replacements: "@{tf.symbol}" --> "`tf.symbol`" "@{tf.symbol$link_text}" --> "`tf.symbol`" PiperOrigin-RevId: 208042358
-rw-r--r--tensorflow/contrib/all_reduce/python/all_reduce.py70
-rw-r--r--tensorflow/contrib/bigtable/python/ops/bigtable_api.py28
-rw-r--r--tensorflow/contrib/cudnn_rnn/python/layers/cudnn_rnn.py4
-rw-r--r--tensorflow/contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py30
-rw-r--r--tensorflow/contrib/data/__init__.py2
-rw-r--r--tensorflow/contrib/data/python/ops/batching.py22
-rw-r--r--tensorflow/contrib/data/python/ops/enumerate_ops.py2
-rw-r--r--tensorflow/contrib/data/python/ops/error_ops.py2
-rw-r--r--tensorflow/contrib/data/python/ops/get_single_element.py14
-rw-r--r--tensorflow/contrib/data/python/ops/grouping.py10
-rw-r--r--tensorflow/contrib/data/python/ops/interleave_ops.py16
-rw-r--r--tensorflow/contrib/data/python/ops/iterator_ops.py2
-rw-r--r--tensorflow/contrib/data/python/ops/optimization.py4
-rw-r--r--tensorflow/contrib/data/python/ops/prefetching_ops.py12
-rw-r--r--tensorflow/contrib/data/python/ops/readers.py4
-rw-r--r--tensorflow/contrib/data/python/ops/resampling.py2
-rw-r--r--tensorflow/contrib/data/python/ops/scan_ops.py4
-rw-r--r--tensorflow/contrib/data/python/ops/shuffle_ops.py4
-rw-r--r--tensorflow/contrib/data/python/ops/sliding.py2
-rw-r--r--tensorflow/contrib/data/python/ops/stats_ops.py20
-rw-r--r--tensorflow/contrib/data/python/ops/threadpool.py2
-rw-r--r--tensorflow/contrib/data/python/ops/unique.py2
-rw-r--r--tensorflow/contrib/data/python/ops/writers.py6
-rw-r--r--tensorflow/contrib/distribute/python/cross_tower_ops.py6
-rw-r--r--tensorflow/contrib/distribute/python/parameter_server_strategy.py6
-rw-r--r--tensorflow/contrib/distribute/python/prefetching_ops_v2.py8
-rw-r--r--tensorflow/contrib/eager/python/datasets.py2
-rw-r--r--tensorflow/contrib/eager/python/saver.py2
-rw-r--r--tensorflow/contrib/estimator/python/estimator/dnn_linear_combined.py2
-rw-r--r--tensorflow/contrib/estimator/python/estimator/extenders.py10
-rw-r--r--tensorflow/contrib/estimator/python/estimator/linear.py2
-rw-r--r--tensorflow/contrib/factorization/python/ops/kmeans.py16
-rw-r--r--tensorflow/contrib/framework/python/ops/variables.py8
-rw-r--r--tensorflow/contrib/image/python/ops/sparse_image_warp.py6
-rw-r--r--tensorflow/contrib/keras/__init__.py2
-rw-r--r--tensorflow/contrib/kernel_methods/README.md16
-rw-r--r--tensorflow/contrib/layers/python/layers/initializers.py4
-rw-r--r--tensorflow/contrib/layers/python/layers/layers.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/kmeans.py6
-rw-r--r--tensorflow/contrib/learn/python/learn/estimators/run_config.py2
-rw-r--r--tensorflow/contrib/learn/python/learn/experiment.py8
-rw-r--r--tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils.py8
-rw-r--r--tensorflow/contrib/lite/python/convert.py2
-rw-r--r--tensorflow/contrib/mixed_precision/python/loss_scale_manager.py4
-rw-r--r--tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py6
-rw-r--r--tensorflow/contrib/model_pruning/python/layers/rnn_cells.py2
-rw-r--r--tensorflow/contrib/nn/python/ops/alpha_dropout.py2
-rw-r--r--tensorflow/contrib/nn/python/ops/sampling_ops.py10
-rw-r--r--tensorflow/contrib/rnn/python/ops/rnn_cell.py2
-rw-r--r--tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py10
-rw-r--r--tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py2
-rw-r--r--tensorflow/contrib/signal/python/kernel_tests/test_util.py6
-rw-r--r--tensorflow/contrib/signal/python/ops/mel_ops.py2
-rw-r--r--tensorflow/contrib/summary/summary.py2
-rw-r--r--tensorflow/contrib/tpu/python/tpu/tpu_estimator.py4
-rw-r--r--tensorflow/contrib/training/python/training/tensor_queue_dataset.py2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_GatherNd.pbtxt2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceScatterNdAdd.pbtxt2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ResourceScatterNdUpdate.pbtxt2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterNd.pbtxt2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterNdAdd.pbtxt2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterNdNonAliasingAdd.pbtxt2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterNdSub.pbtxt2
-rw-r--r--tensorflow/core/api_def/base_api/api_def_ScatterNdUpdate.pbtxt2
-rw-r--r--tensorflow/g3doc/README.txt6
-rw-r--r--tensorflow/python/client/session.py44
-rw-r--r--tensorflow/python/data/ops/dataset_ops.py36
-rw-r--r--tensorflow/python/data/ops/iterator_ops.py10
-rw-r--r--tensorflow/python/data/ops/optional_ops.py6
-rw-r--r--tensorflow/python/data/util/convert.py6
-rw-r--r--tensorflow/python/data/util/random_seed.py6
-rw-r--r--tensorflow/python/debug/lib/debug_gradients.py6
-rw-r--r--tensorflow/python/debug/wrappers/dumping_wrapper.py2
-rw-r--r--tensorflow/python/eager/backprop.py2
-rw-r--r--tensorflow/python/eager/context.py2
-rw-r--r--tensorflow/python/eager/function.py18
-rw-r--r--tensorflow/python/estimator/canned/dnn_linear_combined.py4
-rw-r--r--tensorflow/python/estimator/canned/linear.py4
-rw-r--r--tensorflow/python/estimator/estimator.py4
-rw-r--r--tensorflow/python/feature_column/feature_column.py10
-rw-r--r--tensorflow/python/feature_column/feature_column_v2.py6
-rw-r--r--tensorflow/python/framework/errors_impl.py34
-rw-r--r--tensorflow/python/framework/function.py2
-rw-r--r--tensorflow/python/framework/importer.py4
-rw-r--r--tensorflow/python/framework/ops.py70
-rw-r--r--tensorflow/python/framework/random_seed.py2
-rw-r--r--tensorflow/python/framework/sparse_tensor.py2
-rw-r--r--tensorflow/python/framework/tensor_shape.py2
-rw-r--r--tensorflow/python/framework/test_util.py4
-rw-r--r--tensorflow/python/keras/engine/base_layer.py8
-rw-r--r--tensorflow/python/layers/base.py4
-rw-r--r--tensorflow/python/layers/core.py4
-rw-r--r--tensorflow/python/ops/array_ops.py6
-rw-r--r--tensorflow/python/ops/control_flow_ops.py12
-rw-r--r--tensorflow/python/ops/custom_gradient.py4
-rw-r--r--tensorflow/python/ops/data_flow_ops.py32
-rw-r--r--tensorflow/python/ops/embedding_ops.py2
-rw-r--r--tensorflow/python/ops/image_ops_impl.py12
-rw-r--r--tensorflow/python/ops/init_ops.py26
-rw-r--r--tensorflow/python/ops/losses/losses_impl.py2
-rw-r--r--tensorflow/python/ops/nn_impl.py6
-rw-r--r--tensorflow/python/ops/nn_ops.py28
-rw-r--r--tensorflow/python/ops/numerics.py4
-rw-r--r--tensorflow/python/ops/random_ops.py18
-rw-r--r--tensorflow/python/ops/rnn_cell_impl.py2
-rw-r--r--tensorflow/python/ops/script_ops.py10
-rw-r--r--tensorflow/python/ops/spectral_ops.py4
-rw-r--r--tensorflow/python/ops/state_ops.py4
-rw-r--r--tensorflow/python/ops/summary_ops_v2.py64
-rw-r--r--tensorflow/python/ops/template.py4
-rw-r--r--tensorflow/python/ops/variable_scope.py10
-rw-r--r--tensorflow/python/ops/variables.py20
-rw-r--r--tensorflow/python/summary/writer/writer.py4
-rw-r--r--tensorflow/python/training/distribute.py4
-rw-r--r--tensorflow/python/training/moving_averages.py2
-rw-r--r--tensorflow/python/training/quantize_training.i2
-rw-r--r--tensorflow/python/training/server_lib.py10
-rw-r--r--tensorflow/python/training/supervisor.py4
-rw-r--r--tensorflow/python/training/warm_starting_util.py2
119 files changed, 530 insertions, 528 deletions
diff --git a/tensorflow/contrib/all_reduce/python/all_reduce.py b/tensorflow/contrib/all_reduce/python/all_reduce.py
index 159d985db5..3b539734a2 100644
--- a/tensorflow/contrib/all_reduce/python/all_reduce.py
+++ b/tensorflow/contrib/all_reduce/python/all_reduce.py
@@ -32,10 +32,10 @@ def _flatten_tensors(tensors):
"""Check tensors for isomorphism and flatten.
Args:
- tensors: list of T @{tf.Tensor} which must all have the same shape.
+ tensors: list of T `tf.Tensor` which must all have the same shape.
Returns:
- tensors: a list of T @{tf.Tensor} which are flattened (1D) views of tensors
+ tensors: a list of T `tf.Tensor` which are flattened (1D) views of tensors
shape: the original shape of each element of input tensors
Raises:
@@ -61,12 +61,12 @@ def _reshape_tensors(tensors, shape):
"""Reshape tensors flattened by _flatten_tensors.
Args:
- tensors: list of T @{tf.Tensor} of identical length 1D tensors.
+ tensors: list of T `tf.Tensor` of identical length 1D tensors.
shape: list of integers describing the desired shape. Product of
the elements must equal the length of each tensor.
Returns:
- list of T @{tf.Tensor} which are the reshaped inputs.
+ list of T `tf.Tensor` which are the reshaped inputs.
"""
reshaped = []
for t in tensors:
@@ -79,12 +79,12 @@ def _padded_split(tensor, pieces):
"""Like split for 1D tensors but pads-out case where len % pieces != 0.
Args:
- tensor: T @{tf.Tensor} that must be 1D.
+ tensor: T `tf.Tensor` that must be 1D.
pieces: a positive integer specifying the number of pieces into which
tensor should be split.
Returns:
- list of T @{tf.Tensor} of length pieces, which hold the values of
+ list of T `tf.Tensor` of length pieces, which hold the values of
thin input tensor, in order. The final tensor may
be zero-padded on the end to make its size equal to those of all
of the other tensors.
@@ -132,11 +132,11 @@ def _strip_padding(tensors, pad_len):
"""Strip the suffix padding added by _padded_split.
Args:
- tensors: list of T @{tf.Tensor} of identical length 1D tensors.
+ tensors: list of T `tf.Tensor` of identical length 1D tensors.
pad_len: number of elements to be stripped from the end of each tensor.
Returns:
- list of T @{tf.Tensor} which are the stripped inputs.
+ list of T `tf.Tensor` which are the stripped inputs.
Raises:
ValueError: tensors must be a non-empty list of 1D tensors, and
@@ -161,12 +161,12 @@ def _ragged_split(tensor, pieces):
"""Like split for 1D tensors but allows case where len % pieces != 0.
Args:
- tensor: T @{tf.Tensor} that must be 1D.
+ tensor: T `tf.Tensor` that must be 1D.
pieces: a positive integer specifying the number of pieces into which
tensor should be split.
Returns:
- list of T @{tf.Tensor} of length pieces, which hold the values of
+ list of T `tf.Tensor` of length pieces, which hold the values of
the input tensor, in order. The final tensor may be shorter
than the others, which will all be of equal length.
@@ -256,7 +256,7 @@ def build_ring_all_reduce(input_tensors, num_workers, num_subchunks,
"""Construct a subgraph performing a ring-style all-reduce of input_tensors.
Args:
- input_tensors: a list of T @{tf.Tensor} objects, which must all
+ input_tensors: a list of T `tf.Tensor` objects, which must all
have the same shape and type.
num_workers: number of worker tasks spanned by input_tensors.
num_subchunks: number of subchunks each device should process in one tick.
@@ -272,7 +272,7 @@ def build_ring_all_reduce(input_tensors, num_workers, num_subchunks,
size.
Returns:
- a list of T @{tf.Tensor} identical sum-reductions of input_tensors.
+ a list of T `tf.Tensor` identical sum-reductions of input_tensors.
"""
if len(input_tensors) < 2:
raise ValueError("input_tensors must be length 2 or longer")
@@ -299,7 +299,7 @@ def _build_ring_gather(input_tensors, devices, num_subchunks,
"""Construct a subgraph for the first (reduction) pass of ring all-reduce.
Args:
- input_tensors: a list of T @{tf.Tensor} 1D input tensors of same
+ input_tensors: a list of T `tf.Tensor` 1D input tensors of same
shape and type.
devices: array of device name strings
num_subchunks: number of subchunks each device should process in one tick.
@@ -311,7 +311,7 @@ def _build_ring_gather(input_tensors, devices, num_subchunks,
ValueError: tensors must all be one dimensional.
Returns:
- list of list of T @{tf.Tensor} of (partially) reduced values where
+ list of list of T `tf.Tensor` of (partially) reduced values where
exactly num_subchunks chunks at each device are fully reduced.
"""
num_devices = len(input_tensors)
@@ -360,11 +360,11 @@ def _apply_unary_to_chunks(f, chunks_by_dev):
"""Apply a unary op to each tensor in chunks_by_dev, on same device.
Args:
- f: a unary function over T @{tf.Tensor}.
- chunks_by_dev: list of lists of T @{tf.Tensor}.
+ f: a unary function over T `tf.Tensor`.
+ chunks_by_dev: list of lists of T `tf.Tensor`.
Returns:
- new list of lists of T @{tf.Tensor} with the same structure as
+ new list of lists of T `tf.Tensor` with the same structure as
chunks_by_dev containing the derived tensors.
"""
output = []
@@ -381,14 +381,14 @@ def _build_ring_scatter(pred_by_s_d, rank_by_s_d,
Args:
pred_by_s_d: as produced by _ring_permutations
rank_by_s_d: as produced by _ring_permutations
- chunks_by_dev: list of list of T @{tf.Tensor} indexed by ints
+ chunks_by_dev: list of list of T `tf.Tensor` indexed by ints
(device, chunk)
Raises:
ValueError: chunks_by_dev is not well-formed
Returns:
- list of T @{tf.Tensor} which are the fully reduced tensors, one
+ list of T `tf.Tensor` which are the fully reduced tensors, one
at each device corresponding to the outer dimension of chunks_by_dev.
"""
num_devices = len(chunks_by_dev)
@@ -448,12 +448,12 @@ def build_recursive_hd_all_reduce(input_tensors, red_op, un_op=None):
the future with edge-case specific logic.
Args:
- input_tensors: list of T @{tf.Tensor} to be elementwise reduced.
+ input_tensors: list of T `tf.Tensor` to be elementwise reduced.
red_op: a binary elementwise reduction Op.
un_op: an optional unary elementwise Op to apply to reduced values.
Returns:
- list of T @{tf.Tensor} which are the fully reduced tensors, one
+ list of T `tf.Tensor` which are the fully reduced tensors, one
at each device of input_tensors.
Raises:
@@ -475,13 +475,13 @@ def _build_recursive_hd_gather(input_tensors, devices, red_op):
"""Construct the gather phase of recursive halving-doubling all-reduce.
Args:
- input_tensors: list of T @{tf.Tensor} to be elementwise reduced.
+ input_tensors: list of T `tf.Tensor` to be elementwise reduced.
devices: a list of strings naming the devices hosting input_tensors,
which will also be used to host the (partial) reduction values.
red_op: a binary elementwise reduction Op.
Returns:
- list of T @{tf.Tensor} which are the fully reduced tensor shards.
+ list of T `tf.Tensor` which are the fully reduced tensor shards.
Raises:
ValueError: num_devices not a power of 2, or tensor len not divisible
@@ -516,12 +516,12 @@ def _build_recursive_hd_scatter(input_tensors, devices):
"""Construct the scatter phase of recursive halving-doublng all-reduce.
Args:
- input_tensors: list of T @{tf.Tensor} that are fully-reduced shards.
+ input_tensors: list of T `tf.Tensor` that are fully-reduced shards.
devices: a list of strings naming the devices on which the reconstituted
full tensors should be placed.
Returns:
- list of T @{tf.Tensor} which are the fully reduced tensors.
+ list of T `tf.Tensor` which are the fully reduced tensors.
"""
num_devices = len(devices)
num_hops = int(math.log(num_devices, 2))
@@ -571,7 +571,7 @@ def build_shuffle_all_reduce(input_tensors, gather_devices, red_op, un_op=None):
un_op: optional elementwise unary Op to be applied to fully-reduced values.
Returns:
- list of T @{tf.Tensor} which are the fully reduced tensors.
+ list of T `tf.Tensor` which are the fully reduced tensors.
"""
input_tensors, shape = _flatten_tensors(input_tensors)
dst_devices = [t.device for t in input_tensors]
@@ -594,7 +594,7 @@ def _build_shuffle_gather(input_tensors, gather_devices, red_op, un_op=None):
un_op: optional elementwise unary Op to be applied to fully-reduced values.
Returns:
- list of T @{tf.Tensor} which are the fully reduced shards.
+ list of T `tf.Tensor` which are the fully reduced shards.
Raises:
ValueError: inputs not well-formed.
@@ -629,7 +629,7 @@ def _build_shuffle_scatter(reduced_shards, dst_devices):
should be reconstituted.
Returns:
- list of T @{tf.Tensor} scattered tensors.
+ list of T `tf.Tensor` scattered tensors.
"""
num_devices = len(dst_devices)
out_tensors = []
@@ -644,7 +644,7 @@ def _split_by_task(devices, values):
Args:
devices: list of device name strings
- values: list of T @{tf.tensor} of same length as devices.
+ values: list of T `tf.tensor` of same length as devices.
Returns:
(per_task_devices, per_task_values) where both values are
@@ -680,14 +680,14 @@ def build_nccl_all_reduce(input_tensors, red_op, un_op=None):
"""Build a subgraph that does one full all-reduce, using NCCL.
Args:
- input_tensors: list of T @{tf.Tensor} of same-shape and type values to
+ input_tensors: list of T `tf.Tensor` of same-shape and type values to
be reduced.
red_op: binary elementwise reduction operator. Must be one of
{tf.add}
un_op: optional unary elementwise Op to apply to fully-reduce values.
Returns:
- list of T @{tf.Tensor} of reduced values.
+ list of T `tf.Tensor` of reduced values.
Raises:
ValueError: red_op not supported.
@@ -709,14 +709,14 @@ def _build_nccl_hybrid(input_tensors, red_op, upper_level_f):
"""Construct a subgraph for NCCL hybrid all-reduce.
Args:
- input_tensors: list of T @{tf.Tensor} of same-shape and type values to
+ input_tensors: list of T `tf.Tensor` of same-shape and type values to
be reduced.
red_op: binary elementwise reduction operator.
upper_level_f: function for reducing one value per worker, across
workers.
Returns:
- list of T @{tf.Tensor} of reduced values.
+ list of T `tf.Tensor` of reduced values.
Raises:
ValueError: inputs not well-formed.
@@ -797,7 +797,7 @@ def _build_shuffle_hybrid(input_tensors, gather_devices, red_op, upper_level_f):
"""Construct a subgraph for Shuffle hybrid all-reduce.
Args:
- input_tensors: list of T @{tf.Tensor} of same-shape and type values to
+ input_tensors: list of T `tf.Tensor` of same-shape and type values to
be reduced.
gather_devices: list of device names on which to host gather shards.
red_op: binary elementwise reduction operator.
@@ -805,7 +805,7 @@ def _build_shuffle_hybrid(input_tensors, gather_devices, red_op, upper_level_f):
workers.
Returns:
- list of T @{tf.Tensor} of reduced values.
+ list of T `tf.Tensor` of reduced values.
Raises:
ValueError: inputs not well-formed.
diff --git a/tensorflow/contrib/bigtable/python/ops/bigtable_api.py b/tensorflow/contrib/bigtable/python/ops/bigtable_api.py
index 1102fb3c2d..3e1b622867 100644
--- a/tensorflow/contrib/bigtable/python/ops/bigtable_api.py
+++ b/tensorflow/contrib/bigtable/python/ops/bigtable_api.py
@@ -17,8 +17,8 @@
TensorFlow has support for reading from and writing to Cloud Bigtable. To use
TensorFlow + Cloud Bigtable integration, first create a BigtableClient to
configure your connection to Cloud Bigtable, and then create a BigtableTable
-object to allow you to create numerous @{tf.data.Dataset}s to read data, or
-write a @{tf.data.Dataset} object to the underlying Cloud Bigtable table.
+object to allow you to create numerous `tf.data.Dataset`s to read data, or
+write a `tf.data.Dataset` object to the underlying Cloud Bigtable table.
For background on Cloud Bigtable, see: https://cloud.google.com/bigtable .
"""
@@ -203,7 +203,7 @@ class BigtableTable(object):
be retrieved. If end is None, all subsequent row keys will be retrieved.
Returns:
- A @{tf.data.Dataset} containing `tf.string` Tensors corresponding to all
+ A `tf.data.Dataset` containing `tf.string` Tensors corresponding to all
of the row keys between `start` and `end`.
"""
# TODO(saeta): Make inclusive / exclusive configurable?
@@ -219,7 +219,7 @@ class BigtableTable(object):
retrieved.
Returns:
- A @{tf.data.Dataset}. containing `tf.string` Tensors corresponding to all
+ A `tf.data.Dataset`. containing `tf.string` Tensors corresponding to all
of the row keys matching that prefix.
"""
return _BigtablePrefixKeyDataset(self, prefix)
@@ -228,11 +228,11 @@ class BigtableTable(object):
"""Retrieves a sampling of row keys from the Bigtable table.
This dataset is most often used in conjunction with
- @{tf.contrib.data.parallel_interleave} to construct a set of ranges for
+ `tf.contrib.data.parallel_interleave` to construct a set of ranges for
scanning in parallel.
Returns:
- A @{tf.data.Dataset} returning string row keys.
+ A `tf.data.Dataset` returning string row keys.
"""
return _BigtableSampleKeysDataset(self)
@@ -272,7 +272,7 @@ class BigtableTable(object):
that are treated as the column qualifier (column name).
Returns:
- A @{tf.data.Dataset} returning the row keys and the cell contents.
+ A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
@@ -317,7 +317,7 @@ class BigtableTable(object):
that are treated as the column qualifier (column name).
Returns:
- A @{tf.data.Dataset} returning the row keys and the cell contents.
+ A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
@@ -373,7 +373,7 @@ class BigtableTable(object):
that are treated as the column qualifier (column name).
Returns:
- A @{tf.data.Dataset} returning the row keys and the cell contents.
+ A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
@@ -435,7 +435,7 @@ class BigtableTable(object):
that are treated as the column qualifier (column name).
Returns:
- A @{tf.data.Dataset} returning the row keys and the cell contents.
+ A `tf.data.Dataset` returning the row keys and the cell contents.
Raises:
ValueError: If the configured probability is unexpected.
@@ -450,12 +450,12 @@ class BigtableTable(object):
"""Writes a dataset to the table.
Args:
- dataset: A @{tf.data.Dataset} to be written to this table. It must produce
+ dataset: A `tf.data.Dataset` to be written to this table. It must produce
a list of number-of-columns+1 elements, all of which must be strings.
The first value will be used as the row key, and subsequent values will
be used as cell values for the corresponding columns from the
corresponding column_families and columns entries.
- column_families: A @{tf.Tensor} of `tf.string`s corresponding to the
+ column_families: A `tf.Tensor` of `tf.string`s corresponding to the
column names to store the dataset's elements into.
columns: A `tf.Tensor` of `tf.string`s corresponding to the column names
to store the dataset's elements into.
@@ -463,7 +463,7 @@ class BigtableTable(object):
Leave as None to use server-provided timestamps.
Returns:
- A @{tf.Operation} that can be run to perform the write.
+ A `tf.Operation` that can be run to perform the write.
Raises:
ValueError: If there are unexpected or incompatible types, or if the
@@ -502,7 +502,7 @@ class BigtableTable(object):
normalized_columns: The column families and column qualifiers to retrieve.
Returns:
- A @{tf.data.Dataset} representing the result of the parallel scan.
+ A `tf.data.Dataset` representing the result of the parallel scan.
"""
if num_parallel_scans is None:
num_parallel_scans = 50
diff --git a/tensorflow/contrib/cudnn_rnn/python/layers/cudnn_rnn.py b/tensorflow/contrib/cudnn_rnn/python/layers/cudnn_rnn.py
index d58198faf3..e26d56c857 100644
--- a/tensorflow/contrib/cudnn_rnn/python/layers/cudnn_rnn.py
+++ b/tensorflow/contrib/cudnn_rnn/python/layers/cudnn_rnn.py
@@ -56,7 +56,7 @@ class _CudnnRNN(base_layer.Layer):
Cudnn RNNs have two major differences from other platform-independent RNNs tf
provides:
* Cudnn LSTM and GRU are mathematically different from their tf counterparts.
- (e.g. @{tf.contrib.rnn.LSTMBlockCell} and @{tf.nn.rnn_cell.GRUCell}.
+ (e.g. `tf.contrib.rnn.LSTMBlockCell` and `tf.nn.rnn_cell.GRUCell`.
* Cudnn-trained checkpoints are not directly compatible with tf RNNs:
* They use a single opaque parameter buffer for the entire (possibly)
multi-layer multi-directional RNN; Whereas tf RNN weights are per-cell and
@@ -182,7 +182,7 @@ class _CudnnRNN(base_layer.Layer):
dropout: dropout rate, a number between [0, 1]. Dropout is applied between
each layer (no dropout is applied for a model with a single layer).
When set to 0, dropout is disabled.
- seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
+ seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
dtype: tf.float16, tf.float32 or tf.float64
kernel_initializer: starting value to initialize the weight.
diff --git a/tensorflow/contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py b/tensorflow/contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py
index 748d7cd011..2c92f31788 100644
--- a/tensorflow/contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py
+++ b/tensorflow/contrib/cudnn_rnn/python/ops/cudnn_rnn_ops.py
@@ -61,8 +61,8 @@ _WEIGHTS_VARIABLE_NAME = rnn_cell_impl._WEIGHTS_VARIABLE_NAME
class CudnnCompatibleLSTMCell(lstm_ops.LSTMBlockCell):
"""Cudnn Compatible LSTMCell.
- A simple wrapper around @{tf.contrib.rnn.LSTMBlockCell} to use along with
- @{tf.contrib.cudnn_rnn.CudnnLSTM}. The latter's params can be used by
+ A simple wrapper around `tf.contrib.rnn.LSTMBlockCell` to use along with
+ `tf.contrib.cudnn_rnn.CudnnLSTM`. The latter's params can be used by
this cell seamlessly.
"""
@@ -76,8 +76,8 @@ class CudnnCompatibleLSTMCell(lstm_ops.LSTMBlockCell):
class CudnnCompatibleGRUCell(rnn_cell_impl.GRUCell):
"""Cudnn Compatible GRUCell.
- A GRU impl akin to @{tf.nn.rnn_cell.GRUCell} to use along with
- @{tf.contrib.cudnn_rnn.CudnnGRU}. The latter's params can be used by
+ A GRU impl akin to `tf.nn.rnn_cell.GRUCell` to use along with
+ `tf.contrib.cudnn_rnn.CudnnGRU`. The latter's params can be used by
it seamlessly.
It differs from platform-independent GRUs in how the new memory gate is
@@ -97,7 +97,7 @@ class CudnnCompatibleGRUCell(rnn_cell_impl.GRUCell):
$$h_t = (1 - u_t) .* h'_t + u_t .* h_t-1$$
```
- Other GRU (see @{tf.nn.rnn_cell.GRUCell} and @{tf.contrib.rnn.GRUBlockCell}):
+ Other GRU (see `tf.nn.rnn_cell.GRUCell` and `tf.contrib.rnn.GRUBlockCell`):
```python
# new memory gate
\\(h'_t = tanh(x_t * W_h + (r_t .* h_t-1) * R_h + b_{Wh})\\)
@@ -891,7 +891,7 @@ def _cudnn_rnn(inputs,
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
- seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
+ seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@@ -957,7 +957,7 @@ def cudnn_lstm(inputs,
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
- seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
+ seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@@ -998,7 +998,7 @@ def _cudnn_rnn_no_input_c(inputs,
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
- seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
+ seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@@ -1040,7 +1040,7 @@ def cudnn_gru(inputs,
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
- seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
+ seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@@ -1079,7 +1079,7 @@ def cudnn_rnn_relu(inputs,
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
- seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
+ seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@@ -1119,7 +1119,7 @@ def cudnn_rnn_tanh(inputs,
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
- seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
+ seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@@ -1161,7 +1161,7 @@ def cudnn_rnn_opaque_params_to_canonical(rnn_mode,
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
- seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
+ seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@@ -1224,7 +1224,7 @@ def cudnn_rnn_canonical_to_opaque_params(rnn_mode,
direction: the direction model that the model operates. Could be either
'unidirectional' or 'bidirectional'
dropout: whether to enable dropout. With it is 0, dropout is disabled.
- seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
+ seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@@ -1282,7 +1282,7 @@ def cudnn_rnn_opaque_params_size(rnn_mode,
'unidirectional' or 'bidirectional'
dtype: one of tf.float32 or tf.float64.
dropout: whether to enable dropout. With it is 0, dropout is disabled.
- seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
+ seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
name: name of the operation.
Returns:
@@ -1349,7 +1349,7 @@ class _CudnnRNN(object):
'unidirectional' or 'bidirectional'
dtype: dtype of params, tf.float32 or tf.float64.
dropout: whether to enable dropout. With it is 0, dropout is disabled.
- seed: the op seed used for initializing dropout. See @{tf.set_random_seed}
+ seed: the op seed used for initializing dropout. See `tf.set_random_seed`
for behavior.
Raises:
ValueError: if direction is invalid.
diff --git a/tensorflow/contrib/data/__init__.py b/tensorflow/contrib/data/__init__.py
index 7878e46e88..dbfff9b4f8 100644
--- a/tensorflow/contrib/data/__init__.py
+++ b/tensorflow/contrib/data/__init__.py
@@ -15,7 +15,7 @@
"""Experimental API for building input pipelines.
This module contains experimental `Dataset` sources and transformations that can
-be used in conjunction with the @{tf.data.Dataset} API. Note that the
+be used in conjunction with the `tf.data.Dataset` API. Note that the
`tf.contrib.data` API is not subject to the same backwards compatibility
guarantees as `tf.data`, but we will provide deprecation advice in advance of
removing existing functionality.
diff --git a/tensorflow/contrib/data/python/ops/batching.py b/tensorflow/contrib/data/python/ops/batching.py
index 4835c4e5bd..9f059942a6 100644
--- a/tensorflow/contrib/data/python/ops/batching.py
+++ b/tensorflow/contrib/data/python/ops/batching.py
@@ -185,7 +185,7 @@ def dense_to_sparse_batch(batch_size, row_shape):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
@@ -401,7 +401,7 @@ def unbatch():
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
@@ -443,7 +443,7 @@ def unbatch():
def batch_and_drop_remainder(batch_size):
"""A batching transformation that omits the final small batch (if present).
- Like @{tf.data.Dataset.batch}, this transformation combines
+ Like `tf.data.Dataset.batch`, this transformation combines
consecutive elements of this dataset into batches. However, if the batch
size does not evenly divide the input dataset size, this transformation will
drop the final smaller element.
@@ -467,7 +467,7 @@ def batch_and_drop_remainder(batch_size):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}
+ `tf.data.Dataset.apply`
"""
def _apply_fn(dataset):
@@ -484,25 +484,25 @@ def padded_batch_and_drop_remainder(batch_size,
padding_values=None):
"""A batching and padding transformation that omits the final small batch.
- Like @{tf.data.Dataset.padded_batch}, this transformation combines
+ Like `tf.data.Dataset.padded_batch`, this transformation combines
consecutive elements of this dataset into batches. However, if the batch
size does not evenly divide the input dataset size, this transformation will
drop the final smaller element.
- See `@{tf.contrib.data.batch_and_drop_remainder}` for more details.
+ See `tf.contrib.data.batch_and_drop_remainder` for more details.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
consecutive elements of this dataset to combine in a single batch.
padded_shapes: A nested structure of `tf.TensorShape` or
`tf.int64` vector tensor-like objects. See
- @{tf.data.Dataset.padded_batch} for details.
+ `tf.data.Dataset.padded_batch` for details.
padding_values: (Optional.) A nested structure of scalar-shaped
- `tf.Tensor`. See @{tf.data.Dataset.padded_batch} for details.
+ `tf.Tensor`. See `tf.data.Dataset.padded_batch` for details.
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}
+ `tf.data.Dataset.apply`
"""
def _apply_fn(dataset):
@@ -661,7 +661,7 @@ def assert_element_shape(expected_shapes):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}
+ `tf.data.Dataset.apply`
"""
def _check_shape(*elements):
@@ -760,7 +760,7 @@ def map_and_batch(map_func,
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
Raises:
ValueError: If both `num_parallel_batches` and `num_parallel_calls` are
diff --git a/tensorflow/contrib/data/python/ops/enumerate_ops.py b/tensorflow/contrib/data/python/ops/enumerate_ops.py
index ac2b386b81..490281e0d2 100644
--- a/tensorflow/contrib/data/python/ops/enumerate_ops.py
+++ b/tensorflow/contrib/data/python/ops/enumerate_ops.py
@@ -47,7 +47,7 @@ def enumerate_dataset(start=0):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
diff --git a/tensorflow/contrib/data/python/ops/error_ops.py b/tensorflow/contrib/data/python/ops/error_ops.py
index d46d96c461..b4a7521e08 100644
--- a/tensorflow/contrib/data/python/ops/error_ops.py
+++ b/tensorflow/contrib/data/python/ops/error_ops.py
@@ -42,7 +42,7 @@ def ignore_errors():
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
diff --git a/tensorflow/contrib/data/python/ops/get_single_element.py b/tensorflow/contrib/data/python/ops/get_single_element.py
index ef9284456e..a6713b017a 100644
--- a/tensorflow/contrib/data/python/ops/get_single_element.py
+++ b/tensorflow/contrib/data/python/ops/get_single_element.py
@@ -29,8 +29,8 @@ from tensorflow.python.ops import gen_dataset_ops
def get_single_element(dataset):
"""Returns the single element in `dataset` as a nested structure of tensors.
- This function enables you to use a @{tf.data.Dataset} in a stateless
- "tensor-in tensor-out" expression, without creating a @{tf.data.Iterator}.
+ This function enables you to use a `tf.data.Dataset` in a stateless
+ "tensor-in tensor-out" expression, without creating a `tf.data.Iterator`.
This can be useful when your preprocessing transformations are expressed
as a `Dataset`, and you want to use the transformation at serving time.
For example:
@@ -50,10 +50,10 @@ def get_single_element(dataset):
```
Args:
- dataset: A @{tf.data.Dataset} object containing a single element.
+ dataset: A `tf.data.Dataset` object containing a single element.
Returns:
- A nested structure of @{tf.Tensor} objects, corresponding to the single
+ A nested structure of `tf.Tensor` objects, corresponding to the single
element of `dataset`.
Raises:
@@ -77,11 +77,11 @@ def reduce_dataset(dataset, reducer):
"""Returns the result of reducing the `dataset` using `reducer`.
Args:
- dataset: A @{tf.data.Dataset} object.
- reducer: A @{tf.contrib.data.Reducer} object representing the reduce logic.
+ dataset: A `tf.data.Dataset` object.
+ reducer: A `tf.contrib.data.Reducer` object representing the reduce logic.
Returns:
- A nested structure of @{tf.Tensor} objects, corresponding to the result
+ A nested structure of `tf.Tensor` objects, corresponding to the result
of reducing `dataset` using `reducer`.
Raises:
diff --git a/tensorflow/contrib/data/python/ops/grouping.py b/tensorflow/contrib/data/python/ops/grouping.py
index bd8d398c58..6edc1d7990 100644
--- a/tensorflow/contrib/data/python/ops/grouping.py
+++ b/tensorflow/contrib/data/python/ops/grouping.py
@@ -50,7 +50,7 @@ def group_by_reducer(key_func, reducer):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
@@ -92,7 +92,7 @@ def group_by_window(key_func,
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
Raises:
ValueError: if neither or both of {`window_size`, `window_size_func`} are
@@ -142,11 +142,11 @@ def bucket_by_sequence_length(element_length_func,
bucket_batch_sizes: `list<int>`, batch size per bucket. Length should be
`len(bucket_boundaries) + 1`.
padded_shapes: Nested structure of `tf.TensorShape` to pass to
- @{tf.data.Dataset.padded_batch}. If not provided, will use
+ `tf.data.Dataset.padded_batch`. If not provided, will use
`dataset.output_shapes`, which will result in variable length dimensions
being padded out to the maximum length in each batch.
padding_values: Values to pad with, passed to
- @{tf.data.Dataset.padded_batch}. Defaults to padding with 0.
+ `tf.data.Dataset.padded_batch`. Defaults to padding with 0.
pad_to_bucket_boundary: bool, if `False`, will pad dimensions with unknown
size to maximum length in batch. If `True`, will pad dimensions with
unknown size to bucket boundary minus 1 (i.e., the maximum length in each
@@ -155,7 +155,7 @@ def bucket_by_sequence_length(element_length_func,
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
Raises:
ValueError: if `len(bucket_batch_sizes) != len(bucket_boundaries) + 1`.
diff --git a/tensorflow/contrib/data/python/ops/interleave_ops.py b/tensorflow/contrib/data/python/ops/interleave_ops.py
index bcc959594a..5a1a35199a 100644
--- a/tensorflow/contrib/data/python/ops/interleave_ops.py
+++ b/tensorflow/contrib/data/python/ops/interleave_ops.py
@@ -42,7 +42,7 @@ def parallel_interleave(map_func,
`parallel_interleave()` maps `map_func` across its input to produce nested
datasets, and outputs their elements interleaved. Unlike
- @{tf.data.Dataset.interleave}, it gets elements from `cycle_length` nested
+ `tf.data.Dataset.interleave`, it gets elements from `cycle_length` nested
datasets in parallel, which increases the throughput, especially in the
presence of stragglers. Furthermore, the `sloppy` argument can be used to
improve performance, by relaxing the requirement that the outputs are produced
@@ -79,7 +79,7 @@ def parallel_interleave(map_func,
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return readers.ParallelInterleaveDataset(
@@ -138,7 +138,7 @@ def sloppy_interleave(map_func, cycle_length, block_length=1):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return readers.ParallelInterleaveDataset(
@@ -196,15 +196,15 @@ def sample_from_datasets(datasets, weights=None, seed=None):
"""Samples elements at random from the datasets in `datasets`.
Args:
- datasets: A list of @{tf.data.Dataset} objects with compatible structure.
+ datasets: A list of `tf.data.Dataset` objects with compatible structure.
weights: (Optional.) A list of `len(datasets)` floating-point values where
`weights[i]` represents the probability with which an element should be
- sampled from `datasets[i]`, or a @{tf.data.Dataset} object where each
+ sampled from `datasets[i]`, or a `tf.data.Dataset` object where each
element is such a list. Defaults to a uniform distribution across
`datasets`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
- @{tf.set_random_seed} for behavior.
+ `tf.set_random_seed` for behavior.
Returns:
A dataset that interleaves elements from `datasets` at random, according to
@@ -262,8 +262,8 @@ def choose_from_datasets(datasets, choice_dataset):
```
Args:
- datasets: A list of @{tf.data.Dataset} objects with compatible structure.
- choice_dataset: A @{tf.data.Dataset} of scalar `tf.int64` tensors between
+ datasets: A list of `tf.data.Dataset` objects with compatible structure.
+ choice_dataset: A `tf.data.Dataset` of scalar `tf.int64` tensors between
`0` and `len(datasets) - 1`.
Returns:
diff --git a/tensorflow/contrib/data/python/ops/iterator_ops.py b/tensorflow/contrib/data/python/ops/iterator_ops.py
index d2c1d0d362..18515e21ed 100644
--- a/tensorflow/contrib/data/python/ops/iterator_ops.py
+++ b/tensorflow/contrib/data/python/ops/iterator_ops.py
@@ -118,7 +118,7 @@ class CheckpointInputPipelineHook(session_run_hook.SessionRunHook):
pipeline.
For saving the input pipeline checkpoint alongside the model weights use
- @{tf.contrib.data.make_saveable_from_iterator} directly to create a
+ `tf.contrib.data.make_saveable_from_iterator` directly to create a
`SaveableObject` and add to the `SAVEABLE_OBJECTS` collection. Note, however,
that you will need to be careful not to restore the training iterator during
eval. You can do that by not adding the iterator to the SAVEABLE_OBJECTS
diff --git a/tensorflow/contrib/data/python/ops/optimization.py b/tensorflow/contrib/data/python/ops/optimization.py
index 018c5115e1..fa1b851ad7 100644
--- a/tensorflow/contrib/data/python/ops/optimization.py
+++ b/tensorflow/contrib/data/python/ops/optimization.py
@@ -36,7 +36,7 @@ def assert_next(transformations):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
@@ -56,7 +56,7 @@ def optimize(optimizations=None):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
diff --git a/tensorflow/contrib/data/python/ops/prefetching_ops.py b/tensorflow/contrib/data/python/ops/prefetching_ops.py
index 0243c72c70..be6fb69fee 100644
--- a/tensorflow/contrib/data/python/ops/prefetching_ops.py
+++ b/tensorflow/contrib/data/python/ops/prefetching_ops.py
@@ -92,7 +92,7 @@ def function_buffering_resource_reset(function_buffer_resource, name=None):
# pylint: disable=protected-access
class _PrefetchToDeviceIterator(object):
- """A replacement for @{tf.data.Iterator} that prefetches to another device.
+ """A replacement for `tf.data.Iterator` that prefetches to another device.
Args:
input_dataset: The input dataset
@@ -158,7 +158,7 @@ class _PrefetchToDeviceIterator(object):
self._input_dataset)
def get_next(self, name=None):
- """See @{tf.data.Iterator.get_next}."""
+ """See `tf.data.Iterator.get_next`."""
self._get_next_call_count += 1
if self._get_next_call_count > iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD:
warnings.warn(iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE)
@@ -199,7 +199,7 @@ class _PrefetchToDeviceIterator(object):
class _PrefetchToDeviceEagerIterator(iterator_ops.EagerIterator):
- """A replacement for @{tf.data.Iterator} that prefetches to another device.
+ """A replacement for `tf.data.Iterator` that prefetches to another device.
Args:
input_dataset: The input dataset
@@ -334,7 +334,7 @@ class _PrefetchToDeviceDataset(dataset_ops.Dataset):
def prefetch_to_device(device, buffer_size=None):
"""A transformation that prefetches dataset values to the given `device`.
- NOTE: Although the transformation creates a @{tf.data.Dataset}, the
+ NOTE: Although the transformation creates a `tf.data.Dataset`, the
transformation must be the final `Dataset` in the input pipeline.
Args:
@@ -344,7 +344,7 @@ def prefetch_to_device(device, buffer_size=None):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _PrefetchToDeviceDataset(dataset, device, buffer_size)
@@ -361,7 +361,7 @@ def copy_to_device(target_device, source_device="/cpu:0"):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
diff --git a/tensorflow/contrib/data/python/ops/readers.py b/tensorflow/contrib/data/python/ops/readers.py
index 14d69f8d5b..3882d4bfdb 100644
--- a/tensorflow/contrib/data/python/ops/readers.py
+++ b/tensorflow/contrib/data/python/ops/readers.py
@@ -234,7 +234,7 @@ def make_tf_record_dataset(
Args:
file_pattern: List of files or patterns of TFRecord file paths.
- See @{tf.gfile.Glob} for pattern rules.
+ See `tf.gfile.Glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
parser_fn: (Optional.) A function accepting string input to parse
@@ -340,7 +340,7 @@ def make_csv_dataset(
Args:
file_pattern: List of files or patterns of file paths containing CSV
- records. See @{tf.gfile.Glob} for pattern rules.
+ records. See `tf.gfile.Glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
column_names: An optional list of strings that corresponds to the CSV
diff --git a/tensorflow/contrib/data/python/ops/resampling.py b/tensorflow/contrib/data/python/ops/resampling.py
index 182a5c6ff3..75642f143e 100644
--- a/tensorflow/contrib/data/python/ops/resampling.py
+++ b/tensorflow/contrib/data/python/ops/resampling.py
@@ -50,7 +50,7 @@ def rejection_resample(class_func, target_dist, initial_dist=None, seed=None):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
diff --git a/tensorflow/contrib/data/python/ops/scan_ops.py b/tensorflow/contrib/data/python/ops/scan_ops.py
index ea9dcfe68f..6b002b4a53 100644
--- a/tensorflow/contrib/data/python/ops/scan_ops.py
+++ b/tensorflow/contrib/data/python/ops/scan_ops.py
@@ -151,7 +151,7 @@ class _ScanDataset(dataset_ops.Dataset):
def scan(initial_state, scan_func):
"""A transformation that scans a function across an input dataset.
- This transformation is a stateful relative of @{tf.data.Dataset.map}.
+ This transformation is a stateful relative of `tf.data.Dataset.map`.
In addition to mapping `scan_func` across the elements of the input dataset,
`scan()` accumulates one or more state tensors, whose initial values are
`initial_state`.
@@ -166,7 +166,7 @@ def scan(initial_state, scan_func):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _ScanDataset(dataset, initial_state, scan_func)
diff --git a/tensorflow/contrib/data/python/ops/shuffle_ops.py b/tensorflow/contrib/data/python/ops/shuffle_ops.py
index d7f8a73fe3..4356721704 100644
--- a/tensorflow/contrib/data/python/ops/shuffle_ops.py
+++ b/tensorflow/contrib/data/python/ops/shuffle_ops.py
@@ -92,11 +92,11 @@ def shuffle_and_repeat(buffer_size, count=None, seed=None):
indefinitely.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
- @{tf.set_random_seed} for behavior.
+ `tf.set_random_seed` for behavior.
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset): # pylint: disable=missing-docstring
diff --git a/tensorflow/contrib/data/python/ops/sliding.py b/tensorflow/contrib/data/python/ops/sliding.py
index e9dd74530a..8025dcdd16 100644
--- a/tensorflow/contrib/data/python/ops/sliding.py
+++ b/tensorflow/contrib/data/python/ops/sliding.py
@@ -109,7 +109,7 @@ def sliding_window_batch(window_size,
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
Raises:
ValueError: if invalid arguments are provided.
diff --git a/tensorflow/contrib/data/python/ops/stats_ops.py b/tensorflow/contrib/data/python/ops/stats_ops.py
index 97931f75bd..3b4e981402 100644
--- a/tensorflow/contrib/data/python/ops/stats_ops.py
+++ b/tensorflow/contrib/data/python/ops/stats_ops.py
@@ -29,7 +29,7 @@ class StatsAggregator(object):
"""A stateful resource that aggregates statistics from one or more iterators.
To record statistics, use one of the custom transformation functions defined
- in this module when defining your @{tf.data.Dataset}. All statistics will be
+ in this module when defining your `tf.data.Dataset`. All statistics will be
aggregated by the `StatsAggregator` that is associated with a particular
iterator (see below). For example, to record the total number of bytes
produced by iterating over a dataset:
@@ -39,7 +39,7 @@ class StatsAggregator(object):
dataset = dataset.apply(stats_ops.bytes_produced_stats("total_bytes"))
```
- To associate a `StatsAggregator` with a @{tf.data.Iterator} object, use
+ To associate a `StatsAggregator` with a `tf.data.Iterator` object, use
the following pattern:
```python
@@ -55,7 +55,7 @@ class StatsAggregator(object):
To get a protocol buffer summary of the currently aggregated statistics,
use the `StatsAggregator.get_summary()` tensor. The easiest way to do this
- is to add the returned tensor to the @{tf.GraphKeys.SUMMARIES} collection,
+ is to add the returned tensor to the `tf.GraphKeys.SUMMARIES` collection,
so that the summaries will be included with any existing summaries.
```python
@@ -74,13 +74,13 @@ class StatsAggregator(object):
self._resource = gen_dataset_ops.stats_aggregator_handle()
def get_summary(self):
- """Returns a string @{tf.Tensor} that summarizes the aggregated statistics.
+ """Returns a string `tf.Tensor` that summarizes the aggregated statistics.
- The returned tensor will contain a serialized @{tf.summary.Summary} protocol
+ The returned tensor will contain a serialized `tf.summary.Summary` protocol
buffer, which can be used with the standard TensorBoard logging facilities.
Returns:
- A scalar string @{tf.Tensor} that summarizes the aggregated statistics.
+ A scalar string `tf.Tensor` that summarizes the aggregated statistics.
"""
return gen_dataset_ops.stats_aggregator_summary(self._resource)
@@ -122,7 +122,7 @@ def set_stats_aggregator(stats_aggregator):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
@@ -145,7 +145,7 @@ def bytes_produced_stats(tag):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
@@ -169,7 +169,7 @@ def latency_stats(tag):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
@@ -192,7 +192,7 @@ def feature_stats(tag):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
diff --git a/tensorflow/contrib/data/python/ops/threadpool.py b/tensorflow/contrib/data/python/ops/threadpool.py
index 9af1e784ff..dc67accdcf 100644
--- a/tensorflow/contrib/data/python/ops/threadpool.py
+++ b/tensorflow/contrib/data/python/ops/threadpool.py
@@ -100,6 +100,6 @@ def override_threadpool(dataset, thread_pool):
Returns:
A dataset containing the same values as `dataset`, but which uses
`thread_pool` to compute any of its parallel operations (such as
- @{tf.data.Dataset.map}).
+ `tf.data.Dataset.map`).
"""
return _ThreadPoolDataset(dataset, thread_pool)
diff --git a/tensorflow/contrib/data/python/ops/unique.py b/tensorflow/contrib/data/python/ops/unique.py
index e0ce0a4ef1..e0d606311c 100644
--- a/tensorflow/contrib/data/python/ops/unique.py
+++ b/tensorflow/contrib/data/python/ops/unique.py
@@ -38,7 +38,7 @@ def unique():
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
diff --git a/tensorflow/contrib/data/python/ops/writers.py b/tensorflow/contrib/data/python/ops/writers.py
index f53bd3f738..c455fdcba6 100644
--- a/tensorflow/contrib/data/python/ops/writers.py
+++ b/tensorflow/contrib/data/python/ops/writers.py
@@ -38,13 +38,13 @@ class TFRecordWriter(object):
argument_dtype=dtypes.string)
def write(self, dataset):
- """Returns a @{tf.Operation} to write a dataset to a file.
+ """Returns a `tf.Operation` to write a dataset to a file.
Args:
- dataset: a @{tf.data.Dataset} whose elements are to be written to a file
+ dataset: a `tf.data.Dataset` whose elements are to be written to a file
Returns:
- A @{tf.Operation} that, when run, writes contents of `dataset` to a file.
+ A `tf.Operation` that, when run, writes contents of `dataset` to a file.
"""
if not isinstance(dataset, dataset_ops.Dataset):
raise TypeError("`dataset` must be a `tf.data.Dataset` object.")
diff --git a/tensorflow/contrib/distribute/python/cross_tower_ops.py b/tensorflow/contrib/distribute/python/cross_tower_ops.py
index 9b5534393e..3a7addf221 100644
--- a/tensorflow/contrib/distribute/python/cross_tower_ops.py
+++ b/tensorflow/contrib/distribute/python/cross_tower_ops.py
@@ -157,7 +157,7 @@ class CrossTowerOps(object):
Args:
aggregation: Indicates how a variable will be aggregated. Accepted values
- are @{tf.VariableAggregation.SUM}, @{tf.VariableAggregation.MEAN}.
+ are `tf.VariableAggregation.SUM`, `tf.VariableAggregation.MEAN`.
per_device_value: a PerDevice object.
destinations: the reduction destinations.
@@ -181,7 +181,7 @@ class CrossTowerOps(object):
Args:
aggregation: Indicates how a variable will be aggregated. Accepted values
- are @{tf.VariableAggregation.SUM}, @{tf.VariableAggregation.MEAN}.
+ are `tf.VariableAggregation.SUM`, `tf.VariableAggregation.MEAN`.
value_destination_pairs: a list or a tuple of tuples of PerDevice objects
and destinations. If a destination is None, then the destinations
are set to match the devices of the input PerDevice object.
@@ -305,7 +305,7 @@ def _ungroup_and_make_mirrored(grouped_reduced,
cross_tower_utils.aggregate_gradients_using*.
destinations: a list of device strings for returned Mirrored objects.
aggregation: Indicates how a variable will be aggregated. Accepted values
- are @{tf.VariableAggregation.SUM}, @{tf.VariableAggregation.MEAN}.
+ are `tf.VariableAggregation.SUM`, `tf.VariableAggregation.MEAN`.
num_between_graph_workers: number of workers in the between-graph
replication.
diff --git a/tensorflow/contrib/distribute/python/parameter_server_strategy.py b/tensorflow/contrib/distribute/python/parameter_server_strategy.py
index f2c7fd556a..407c78df95 100644
--- a/tensorflow/contrib/distribute/python/parameter_server_strategy.py
+++ b/tensorflow/contrib/distribute/python/parameter_server_strategy.py
@@ -77,16 +77,16 @@ class ParameterServerStrategy(distribute_lib.DistributionStrategy):
GPUs) even if there is only CPU or one GPU. When defining the `fn`, extra
caution needs to be taken:
- 1) Always use @{tf.get_variable} instead of @{tf.Variable} which is not able
+ 1) Always use `tf.get_variable` instead of `tf.Variable` which is not able
to refer to the same variable on different towers.
2) It is generally not recommended to open a device scope under the strategy's
- scope. A device scope (i.e. calling @{tf.device}) will be merged with or
+ scope. A device scope (i.e. calling `tf.device`) will be merged with or
override the device for operations but will not change the device for
variables.
3) It is also not recommended to open a colocation scope (i.e. calling
- @{tf.colocate_with}) under the strategy's scope. For colocating variables,
+ `tf.colocate_with`) under the strategy's scope. For colocating variables,
use `distribution.colocate_vars_with` instead. Colocation of ops will possibly
create conflicts of device assignement.
"""
diff --git a/tensorflow/contrib/distribute/python/prefetching_ops_v2.py b/tensorflow/contrib/distribute/python/prefetching_ops_v2.py
index 24cdc627a3..1ff60c0762 100644
--- a/tensorflow/contrib/distribute/python/prefetching_ops_v2.py
+++ b/tensorflow/contrib/distribute/python/prefetching_ops_v2.py
@@ -35,7 +35,7 @@ from tensorflow.python.util import nest
# pylint: disable=protected-access
class _PrefetchToDeviceIterator(object):
- """A replacement for @{tf.data.Iterator} that prefetches to another device.
+ """A replacement for `tf.data.Iterator` that prefetches to another device.
Args:
input_dataset: The input dataset.
@@ -108,7 +108,7 @@ class _PrefetchToDeviceIterator(object):
self._input_dataset)
def get_next(self, name=None):
- """See @{tf.data.Iterator.get_next}."""
+ """See `tf.data.Iterator.get_next`."""
self._get_next_call_count += 1
if self._get_next_call_count > iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD:
warnings.warn(iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE)
@@ -209,7 +209,7 @@ class _PrefetchToDeviceDataset(dataset_ops.Dataset):
def prefetch_to_devices(devices, buffer_size=None):
"""A transformation that prefetches dataset values to the given `devices`.
- NOTE: Although the transformation creates a @{tf.data.Dataset}, the
+ NOTE: Although the transformation creates a `tf.data.Dataset`, the
transformation must be the final `Dataset` in the input pipeline.
Args:
@@ -220,7 +220,7 @@ def prefetch_to_devices(devices, buffer_size=None):
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
return _PrefetchToDeviceDataset(dataset, devices, buffer_size)
diff --git a/tensorflow/contrib/eager/python/datasets.py b/tensorflow/contrib/eager/python/datasets.py
index 16844e0d68..135095a979 100644
--- a/tensorflow/contrib/eager/python/datasets.py
+++ b/tensorflow/contrib/eager/python/datasets.py
@@ -28,7 +28,7 @@ class Iterator(iterator_ops.EagerIterator):
"""An iterator producing tf.Tensor objects from a tf.data.Dataset.
NOTE: Unlike the iterator created by the
- @{tf.data.Dataset.make_one_shot_iterator} method, this class enables
+ `tf.data.Dataset.make_one_shot_iterator` method, this class enables
additional experimental functionality, such as prefetching to the GPU.
"""
diff --git a/tensorflow/contrib/eager/python/saver.py b/tensorflow/contrib/eager/python/saver.py
index d709308647..f9c716360c 100644
--- a/tensorflow/contrib/eager/python/saver.py
+++ b/tensorflow/contrib/eager/python/saver.py
@@ -161,7 +161,7 @@ class Saver(object):
Args:
file_prefix: Path prefix where parameters were previously saved.
Typically obtained from a previous `save()` call, or from
- @{tf.train.latest_checkpoint}.
+ `tf.train.latest_checkpoint`.
"""
with ops.device("/device:CPU:0"):
self._saver.restore(None, file_prefix)
diff --git a/tensorflow/contrib/estimator/python/estimator/dnn_linear_combined.py b/tensorflow/contrib/estimator/python/estimator/dnn_linear_combined.py
index 2eef60c39f..724bc2c82f 100644
--- a/tensorflow/contrib/estimator/python/estimator/dnn_linear_combined.py
+++ b/tensorflow/contrib/estimator/python/estimator/dnn_linear_combined.py
@@ -147,7 +147,7 @@ class DNNLinearCombinedEstimator(estimator.Estimator):
if a categorical column is multivalent. One of "mean", "sqrtn", and
"sum" -- these are effectively different ways to do example-level
normalization, which can be useful for bag-of-words features. For more
- details, see @{tf.feature_column.linear_model$linear_model}.
+ details, see `tf.feature_column.linear_model`.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
diff --git a/tensorflow/contrib/estimator/python/estimator/extenders.py b/tensorflow/contrib/estimator/python/estimator/extenders.py
index bf08be09e7..26449b4651 100644
--- a/tensorflow/contrib/estimator/python/estimator/extenders.py
+++ b/tensorflow/contrib/estimator/python/estimator/extenders.py
@@ -34,7 +34,7 @@ _VALID_METRIC_FN_ARGS = set(['features', 'labels', 'predictions', 'config'])
def add_metrics(estimator, metric_fn):
- """Creates a new @{tf.estimator.Estimator} which has given metrics.
+ """Creates a new `tf.estimator.Estimator` which has given metrics.
Example:
@@ -61,7 +61,7 @@ def add_metrics(estimator, metric_fn):
```
Args:
- estimator: A @{tf.estimator.Estimator} object.
+ estimator: A `tf.estimator.Estimator` object.
metric_fn: A function which should obey the following signature:
- Args: can only have following four arguments in any order:
* predictions: Predictions `Tensor` or dict of `Tensor` created by given
@@ -79,7 +79,7 @@ def add_metrics(estimator, metric_fn):
function, namely a `(metric_tensor, update_op)` tuple.
Returns:
- A new @{tf.estimator.Estimator} which has a union of original metrics with
+ A new `tf.estimator.Estimator` which has a union of original metrics with
given ones.
"""
_verify_metric_fn_args(metric_fn)
@@ -165,14 +165,14 @@ def forward_features(estimator, keys=None):
```
Args:
- estimator: A @{tf.estimator.Estimator} object.
+ estimator: A `tf.estimator.Estimator` object.
keys: a `string` or a `list` of `string`. If it is `None`, all of the
`features` in `dict` is forwarded to the `predictions`. If it is a
`string`, only given key is forwarded. If it is a `list` of strings, all
the given `keys` are forwarded.
Returns:
- A new @{tf.estimator.Estimator} which forwards features to predictions.
+ A new `tf.estimator.Estimator` which forwards features to predictions.
Raises:
ValueError:
diff --git a/tensorflow/contrib/estimator/python/estimator/linear.py b/tensorflow/contrib/estimator/python/estimator/linear.py
index 62a37abefb..2b68f24eb2 100644
--- a/tensorflow/contrib/estimator/python/estimator/linear.py
+++ b/tensorflow/contrib/estimator/python/estimator/linear.py
@@ -121,7 +121,7 @@ class LinearEstimator(estimator.Estimator):
is multivalent. One of "mean", "sqrtn", and "sum" -- these are
effectively different ways to do example-level normalization, which can
be useful for bag-of-words features. for more details, see
- @{tf.feature_column.linear_model$linear_model}.
+ `tf.feature_column.linear_model`.
"""
def _model_fn(features, labels, mode, config):
return linear_lib._linear_model_fn( # pylint: disable=protected-access
diff --git a/tensorflow/contrib/factorization/python/ops/kmeans.py b/tensorflow/contrib/factorization/python/ops/kmeans.py
index 9ffdd3ba5e..4d8d5004fe 100644
--- a/tensorflow/contrib/factorization/python/ops/kmeans.py
+++ b/tensorflow/contrib/factorization/python/ops/kmeans.py
@@ -158,12 +158,12 @@ class _ModelFn(object):
return either `features` or, equivalently, `(features, None)`.
Args:
- features: The input points. See @{tf.estimator.Estimator}.
- mode: See @{tf.estimator.Estimator}.
- config: See @{tf.estimator.Estimator}.
+ features: The input points. See `tf.estimator.Estimator`.
+ mode: See `tf.estimator.Estimator`.
+ config: See `tf.estimator.Estimator`.
Returns:
- A @{tf.estimator.EstimatorSpec} (see @{tf.estimator.Estimator}) specifying
+ A `tf.estimator.EstimatorSpec` (see `tf.estimator.Estimator`) specifying
this behavior:
* `train_op`: Execute one mini-batch or full-batch run of Lloyd's
algorithm.
@@ -394,7 +394,7 @@ class KMeansClustering(estimator.Estimator):
relative_tolerance: A relative tolerance of change in the loss between
iterations. Stops learning if the loss changes less than this amount.
This may not work correctly if `use_mini_batch=True`.
- config: See @{tf.estimator.Estimator}.
+ config: See `tf.estimator.Estimator`.
feature_columns: An optionable iterable containing all the feature columns
used by the model. All items in the set should be feature column
instances that can be passed to `tf.feature_column.input_layer`. If this
@@ -431,7 +431,7 @@ class KMeansClustering(estimator.Estimator):
"""Finds the index of the closest cluster center to each input point.
Args:
- input_fn: Input points. See @{tf.estimator.Estimator.predict}.
+ input_fn: Input points. See `tf.estimator.Estimator.predict`.
Yields:
The index of the closest cluster center for each input point.
@@ -447,7 +447,7 @@ class KMeansClustering(estimator.Estimator):
which returns the negative sum.
Args:
- input_fn: Input points. See @{tf.estimator.Estimator.evaluate}. Only one
+ input_fn: Input points. See `tf.estimator.Estimator.evaluate`. Only one
batch is retrieved.
Returns:
@@ -465,7 +465,7 @@ class KMeansClustering(estimator.Estimator):
sklearn function returns the Euclidean distance.
Args:
- input_fn: Input points. See @{tf.estimator.Estimator.predict}.
+ input_fn: Input points. See `tf.estimator.Estimator.predict`.
Yields:
The distances from each input point to each cluster center.
diff --git a/tensorflow/contrib/framework/python/ops/variables.py b/tensorflow/contrib/framework/python/ops/variables.py
index 322d5c335e..a7acae804a 100644
--- a/tensorflow/contrib/framework/python/ops/variables.py
+++ b/tensorflow/contrib/framework/python/ops/variables.py
@@ -241,13 +241,13 @@ def variable(name,
use_resource: If `True` use a ResourceVariable instead of a Variable.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
- @{tf.VariableSynchronization}. By default the synchronization is set to
+ `tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
- @{tf.VariableAggregation}.
+ `tf.VariableAggregation`.
Returns:
The created or existing variable.
@@ -320,13 +320,13 @@ def model_variable(name,
use_resource: If `True` use a ResourceVariable instead of a Variable.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
- @{tf.VariableSynchronization}. By default the synchronization is set to
+ `tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
- @{tf.VariableAggregation}.
+ `tf.VariableAggregation`.
Returns:
The created or existing variable.
diff --git a/tensorflow/contrib/image/python/ops/sparse_image_warp.py b/tensorflow/contrib/image/python/ops/sparse_image_warp.py
index 54a215d6db..1ea8f705b7 100644
--- a/tensorflow/contrib/image/python/ops/sparse_image_warp.py
+++ b/tensorflow/contrib/image/python/ops/sparse_image_warp.py
@@ -112,10 +112,10 @@ def sparse_image_warp(image,
Apply a non-linear warp to the image, where the warp is specified by
the source and destination locations of a (potentially small) number of
control points. First, we use a polyharmonic spline
- (@{tf.contrib.image.interpolate_spline}) to interpolate the displacements
+ (`tf.contrib.image.interpolate_spline`) to interpolate the displacements
between the corresponding control points to a dense flow field.
Then, we warp the image using this dense flow field
- (@{tf.contrib.image.dense_image_warp}).
+ (`tf.contrib.image.dense_image_warp`).
Let t index our control points. For regularization_weight=0, we have:
warped_image[b, dest_control_point_locations[b, t, 0],
@@ -126,7 +126,7 @@ def sparse_image_warp(image,
For regularization_weight > 0, this condition is met approximately, since
regularized interpolation trades off smoothness of the interpolant vs.
reconstruction of the interpolant at the control points.
- See @{tf.contrib.image.interpolate_spline} for further documentation of the
+ See `tf.contrib.image.interpolate_spline` for further documentation of the
interpolation_order and regularization_weight arguments.
diff --git a/tensorflow/contrib/keras/__init__.py b/tensorflow/contrib/keras/__init__.py
index a162f0cb58..cecf1ddcdb 100644
--- a/tensorflow/contrib/keras/__init__.py
+++ b/tensorflow/contrib/keras/__init__.py
@@ -15,7 +15,7 @@
# ==============================================================================
"""Implementation of the Keras API meant to be a high-level API for TensorFlow.
-This module an alias for @{tf.keras}, for backwards compatibility.
+This module an alias for `tf.keras`, for backwards compatibility.
Detailed documentation and user guides are also available at
[keras.io](https://keras.io).
diff --git a/tensorflow/contrib/kernel_methods/README.md b/tensorflow/contrib/kernel_methods/README.md
index 44ed9670a0..1bce3277ff 100644
--- a/tensorflow/contrib/kernel_methods/README.md
+++ b/tensorflow/contrib/kernel_methods/README.md
@@ -21,13 +21,15 @@ Currently, there is a [RandomFourierFeatureMapper](https://www.tensorflow.org/co
output. More mappers are on the way.
## Kernel-based Estimators
-These are estimators inheriting from the @{tf.contrib.learn.Estimator} class and
-use kernel mappers internally to discover non-linearities in the data. These
-canned estimators map their input features using kernel mapper Ops and then
-apply linear models to the mapped features. Combining kernel mappers with linear
-models and different loss functions leads to a variety of models: linear and
-non-linear SVMs, linear regression (with and without kernels) and (multinomial)
-logistic regression (with and without kernels).
+
+These estimators inherit from the
+[`tf.contrib.learn.Estimator`](https://www.tensorflow.org/code/tensorflow/contrib/learn/python/learn/estimators/estimator.py)
+class and use kernel mappers internally to discover non-linearities in the
+data. These canned estimators map their input features using kernel mapper
+Ops and then apply linear models to the mapped features. Combining kernel
+mappers with linear models and different loss functions leads to a variety of
+models: linear and non-linear SVMs, linear regression (with and without
+kernels) and (multinomial) logistic regression (with and without kernels).
Currently there is a [KernelLinearClassifier](https://www.tensorflow.org/code/tensorflow/contrib/kernel_methods/python/kernel_estimators.py) implemented but more pre-packaged estimators
are on the way.
diff --git a/tensorflow/contrib/layers/python/layers/initializers.py b/tensorflow/contrib/layers/python/layers/initializers.py
index 51610f21b2..1192198ec2 100644
--- a/tensorflow/contrib/layers/python/layers/initializers.py
+++ b/tensorflow/contrib/layers/python/layers/initializers.py
@@ -47,7 +47,7 @@ def xavier_initializer(uniform=True, seed=None, dtype=dtypes.float32):
Args:
uniform: Whether to use uniform or normal distributed random initialization.
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed} for behavior.
+ `tf.set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
@@ -98,7 +98,7 @@ def variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False,
mode: String. 'FAN_IN', 'FAN_OUT', 'FAN_AVG'.
uniform: Whether to use uniform or normal distributed random initialization.
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed} for behavior.
+ `tf.set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
diff --git a/tensorflow/contrib/layers/python/layers/layers.py b/tensorflow/contrib/layers/python/layers/layers.py
index 6250f88529..04668f112d 100644
--- a/tensorflow/contrib/layers/python/layers/layers.py
+++ b/tensorflow/contrib/layers/python/layers/layers.py
@@ -1584,7 +1584,7 @@ def dropout(inputs,
outputs_collections: Collection to add the outputs.
scope: Optional scope for name_scope.
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed} for behavior.
+ `tf.set_random_seed` for behavior.
Returns:
A tensor representing the output of the operation.
diff --git a/tensorflow/contrib/learn/python/learn/estimators/kmeans.py b/tensorflow/contrib/learn/python/learn/estimators/kmeans.py
index 66ebcfd1d8..21f7dcc5e4 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/kmeans.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/kmeans.py
@@ -15,9 +15,9 @@
"""Implementation of k-means clustering on top of `Estimator` API (deprecated).
This module is deprecated. Please use
-@{tf.contrib.factorization.KMeansClustering} instead of
-@{tf.contrib.learn.KMeansClustering}. It has a similar interface, but uses the
-@{tf.estimator.Estimator} API instead of @{tf.contrib.learn.Estimator}.
+`tf.contrib.factorization.KMeansClustering` instead of
+`tf.contrib.learn.KMeansClustering`. It has a similar interface, but uses the
+`tf.estimator.Estimator` API instead of `tf.contrib.learn.Estimator`.
"""
from __future__ import absolute_import
diff --git a/tensorflow/contrib/learn/python/learn/estimators/run_config.py b/tensorflow/contrib/learn/python/learn/estimators/run_config.py
index c36879e048..08f23aa223 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/run_config.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/run_config.py
@@ -221,7 +221,7 @@ class ClusterConfig(object):
class RunConfig(ClusterConfig, core_run_config.RunConfig):
"""This class specifies the configurations for an `Estimator` run.
- This class is a deprecated implementation of @{tf.estimator.RunConfig}
+ This class is a deprecated implementation of `tf.estimator.RunConfig`
interface.
"""
_USE_DEFAULT = 0
diff --git a/tensorflow/contrib/learn/python/learn/experiment.py b/tensorflow/contrib/learn/python/learn/experiment.py
index 08e907a608..4e64efdd95 100644
--- a/tensorflow/contrib/learn/python/learn/experiment.py
+++ b/tensorflow/contrib/learn/python/learn/experiment.py
@@ -162,16 +162,16 @@ class Experiment(object):
Args:
estimator: Object implementing Estimator interface, which could be a
- combination of @{tf.contrib.learn.Trainable} and
- @{tf.contrib.learn.Evaluable} (deprecated), or
- @{tf.estimator.Estimator}.
+ combination of `tf.contrib.learn.Trainable` and
+ `tf.contrib.learn.Evaluable` (deprecated), or
+ `tf.estimator.Estimator`.
train_input_fn: function, returns features and labels for training.
eval_input_fn: function, returns features and labels for evaluation. If
`eval_steps` is `None`, this should be configured only to produce for a
finite number of batches (generally, 1 epoch over the evaluation data).
eval_metrics: `dict` of string, metric function. If `None`, default set
is used. This should be `None` if the `estimator` is
- @{tf.estimator.Estimator}. If metrics are provided they will be
+ `tf.estimator.Estimator`. If metrics are provided they will be
*appended* to the default set.
train_steps: Perform this many steps of training. `None`, the default,
means train forever.
diff --git a/tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils.py b/tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils.py
index 66af6833da..4f22054af3 100644
--- a/tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils.py
+++ b/tensorflow/contrib/learn/python/learn/utils/saved_model_export_utils.py
@@ -415,7 +415,7 @@ def make_export_strategy(serving_input_fn,
`InputFnOps`.
default_output_alternative_key: the name of the head to serve when an
incoming serving request does not explicitly request a specific head.
- Must be `None` if the estimator inherits from @{tf.estimator.Estimator}
+ Must be `None` if the estimator inherits from `tf.estimator.Estimator`
or for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
@@ -453,7 +453,7 @@ def make_export_strategy(serving_input_fn,
The string path to the exported directory.
Raises:
- ValueError: If `estimator` is a @{tf.estimator.Estimator} instance
+ ValueError: If `estimator` is a `tf.estimator.Estimator` instance
and `default_output_alternative_key` was specified.
"""
if isinstance(estimator, core_estimator.Estimator):
@@ -504,7 +504,7 @@ def make_parsing_export_strategy(feature_columns,
that must be provided at serving time (excluding labels!).
default_output_alternative_key: the name of the head to serve when an
incoming serving request does not explicitly request a specific head.
- Must be `None` if the estimator inherits from @{tf.estimator.Estimator}
+ Must be `None` if the estimator inherits from `tf.estimator.Estimator`
or for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
@@ -767,7 +767,7 @@ def extend_export_strategy(base_export_strategy,
The string path to the SavedModel indicated by post_export_fn.
Raises:
- ValueError: If `estimator` is a @{tf.estimator.Estimator} instance
+ ValueError: If `estimator` is a `tf.estimator.Estimator` instance
and `default_output_alternative_key` was specified or if post_export_fn
does not return a valid directory.
RuntimeError: If unable to create temporary or final export directory.
diff --git a/tensorflow/contrib/lite/python/convert.py b/tensorflow/contrib/lite/python/convert.py
index ec49738fb5..11d4bdbe82 100644
--- a/tensorflow/contrib/lite/python/convert.py
+++ b/tensorflow/contrib/lite/python/convert.py
@@ -54,7 +54,7 @@ def toco_convert_protos(model_flags_str, toco_flags_str, input_data_str):
"""Convert `input_data_str` according to model and toco parameters.
Unless you know what you are doing consider using
- the more friendly @{tf.contrib.lite.toco_convert}}.
+ the more friendly `tf.contrib.lite.toco_convert`.
Args:
model_flags_str: Serialized proto describing model properties, see
diff --git a/tensorflow/contrib/mixed_precision/python/loss_scale_manager.py b/tensorflow/contrib/mixed_precision/python/loss_scale_manager.py
index be7377b151..eba505881f 100644
--- a/tensorflow/contrib/mixed_precision/python/loss_scale_manager.py
+++ b/tensorflow/contrib/mixed_precision/python/loss_scale_manager.py
@@ -41,12 +41,12 @@ class LossScaleManager(object):
applied on variables.
This class is used together with
- @{tf.contrib.mixed_precision.LossScaleOptimizer} for mixed precision training
+ `tf.contrib.mixed_precision.LossScaleOptimizer` for mixed precision training
(float32 variables and float16 ops) on Nvidia GPUs in order to achieve the
same model quality as single precision training, with the benefits of
potential higher throughput.
- See @{tf.contrib.mixed_precision.LossScaleOptimizer} for more details.
+ See `tf.contrib.mixed_precision.LossScaleOptimizer` for more details.
"""
@abc.abstractmethod
diff --git a/tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py b/tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py
index 93050a3ae3..fcce52a07a 100644
--- a/tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py
+++ b/tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py
@@ -103,7 +103,7 @@ class LossScaleOptimizer(optimizer.Optimizer):
Args:
opt: The actual optimizer that will be used to compute and apply the
- gradients. Must be an implementation of the @{tf.train.Optimizer}
+ gradients. Must be an implementation of the `tf.train.Optimizer`
interface.
loss_scale_manager: A LossScaleManager object.
"""
@@ -117,7 +117,7 @@ class LossScaleOptimizer(optimizer.Optimizer):
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
- """Compute gradients. See base class @{tf.train.Optimizer}."""
+ """Compute gradients. See base class `tf.train.Optimizer`."""
loss_scale = self._loss_scale_manager.get_loss_scale()
if context.executing_eagerly():
@@ -141,7 +141,7 @@ class LossScaleOptimizer(optimizer.Optimizer):
return self._down_scale(grads_and_vars, loss_scale)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
- """Apply gradients. See base class @{tf.train.Optimizer}."""
+ """Apply gradients. See base class `tf.train.Optimizer`."""
grads = [g for (g, _) in grads_and_vars]
is_finite_grad = []
diff --git a/tensorflow/contrib/model_pruning/python/layers/rnn_cells.py b/tensorflow/contrib/model_pruning/python/layers/rnn_cells.py
index a5b050d25d..5f6c6aea74 100644
--- a/tensorflow/contrib/model_pruning/python/layers/rnn_cells.py
+++ b/tensorflow/contrib/model_pruning/python/layers/rnn_cells.py
@@ -48,7 +48,7 @@ class MaskedBasicLSTMCell(tf_rnn.BasicLSTMCell):
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
- For advanced models, please use the full @{tf.nn.rnn_cell.LSTMCell}
+ For advanced models, please use the full `tf.nn.rnn_cell.LSTMCell`
that follows.
"""
diff --git a/tensorflow/contrib/nn/python/ops/alpha_dropout.py b/tensorflow/contrib/nn/python/ops/alpha_dropout.py
index 2f92d05ba8..98f4264fe0 100644
--- a/tensorflow/contrib/nn/python/ops/alpha_dropout.py
+++ b/tensorflow/contrib/nn/python/ops/alpha_dropout.py
@@ -43,7 +43,7 @@ def alpha_dropout(x, keep_prob, noise_shape=None, seed=None, name=None): # pylin
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed} for behavior.
+ `tf.set_random_seed` for behavior.
name: A name for this operation (optional).
Returns:
diff --git a/tensorflow/contrib/nn/python/ops/sampling_ops.py b/tensorflow/contrib/nn/python/ops/sampling_ops.py
index e65925610c..de71b0845e 100644
--- a/tensorflow/contrib/nn/python/ops/sampling_ops.py
+++ b/tensorflow/contrib/nn/python/ops/sampling_ops.py
@@ -123,15 +123,15 @@ def rank_sampled_softmax_loss(weights,
"""Computes softmax loss using rank-based adaptive resampling.
This has been shown to improve rank loss after training compared to
- @{tf.nn.sampled_softmax_loss}. For a description of the algorithm and some
+ `tf.nn.sampled_softmax_loss`. For a description of the algorithm and some
experimental results, please see: [TAPAS: Two-pass Approximate Adaptive
Sampling for Softmax](https://arxiv.org/abs/1707.03073).
Sampling follows two phases:
* In the first phase, `num_sampled` classes are selected using
- @{tf.nn.learned_unigram_candidate_sampler} or supplied `sampled_values`.
+ `tf.nn.learned_unigram_candidate_sampler` or supplied `sampled_values`.
The logits are calculated on those sampled classes. This phases is
- similar to @{tf.nn.sampled_softmax_loss}.
+ similar to `tf.nn.sampled_softmax_loss`.
* In the second phase, the `num_resampled` classes with highest predicted
probability are kept. Probabilities are
`LogSumExp(logits / resampling_temperature)`, where the sum is over
@@ -142,7 +142,7 @@ def rank_sampled_softmax_loss(weights,
picks more candidates close to the predicted classes. A common strategy is
to decrease the temperature as training proceeds.
- See @{tf.nn.sampled_softmax_loss} for more documentation on sampling and
+ See `tf.nn.sampled_softmax_loss` for more documentation on sampling and
for typical default values for some of the parameters.
This operation is for training only. It is generally an underestimate of
@@ -197,7 +197,7 @@ def rank_sampled_softmax_loss(weights,
where a sampled class equals one of the target classes.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(weights) > 1`. Currently `"div"` and `"mod"` are supported.
- See @{tf.nn.embedding_lookup} for more details.
+ See `tf.nn.embedding_lookup` for more details.
name: A name for the operation (optional).
Returns:
diff --git a/tensorflow/contrib/rnn/python/ops/rnn_cell.py b/tensorflow/contrib/rnn/python/ops/rnn_cell.py
index 1816b469ee..f74c95f962 100644
--- a/tensorflow/contrib/rnn/python/ops/rnn_cell.py
+++ b/tensorflow/contrib/rnn/python/ops/rnn_cell.py
@@ -3276,7 +3276,7 @@ class IndyLSTMCell(rnn_cell_impl.LayerRNNCell):
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
- For advanced models, please use the full @{tf.nn.rnn_cell.LSTMCell}
+ For advanced models, please use the full `tf.nn.rnn_cell.LSTMCell`
that follows.
TODO(gonnet): Write a paper describing this and add a reference here.
diff --git a/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py b/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
index 1c9d179e3c..0ba32cd3bf 100644
--- a/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
+++ b/tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
@@ -382,8 +382,8 @@ class LuongAttention(_BaseAttentionMechanism):
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
probability_fn: (optional) A `callable`. Converts the score to
- probabilities. The default is @{tf.nn.softmax}. Other options include
- @{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
+ probabilities. The default is `tf.nn.softmax`. Other options include
+ `tf.contrib.seq2seq.hardmax` and `tf.contrib.sparsemax.sparsemax`.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional) The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
@@ -529,8 +529,8 @@ class BahdanauAttention(_BaseAttentionMechanism):
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) A `callable`. Converts the score to
- probabilities. The default is @{tf.nn.softmax}. Other options include
- @{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
+ probabilities. The default is `tf.nn.softmax`. Other options include
+ `tf.contrib.seq2seq.hardmax` and `tf.contrib.sparsemax.sparsemax`.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
@@ -1091,7 +1091,7 @@ class AttentionWrapper(rnn_cell_impl.RNNCell):
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
- @{tf.contrib.seq2seq.tile_batch} (NOT `tf.tile`).
+ `tf.contrib.seq2seq.tile_batch` (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
diff --git a/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py b/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
index f17dbb0fe3..74741a7bd6 100644
--- a/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
+++ b/tensorflow/contrib/seq2seq/python/ops/beam_search_decoder.py
@@ -234,7 +234,7 @@ class BeamSearchDecoder(decoder.Decoder):
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
- @{tf.contrib.seq2seq.tile_batch} (NOT `tf.tile`).
+ `tf.contrib.seq2seq.tile_batch` (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
diff --git a/tensorflow/contrib/signal/python/kernel_tests/test_util.py b/tensorflow/contrib/signal/python/kernel_tests/test_util.py
index 7d6289532a..b4422a4988 100644
--- a/tensorflow/contrib/signal/python/kernel_tests/test_util.py
+++ b/tensorflow/contrib/signal/python/kernel_tests/test_util.py
@@ -27,15 +27,15 @@ def grappler_optimize(graph, fetches=None, rewriter_config=None):
"""Tries to optimize the provided graph using grappler.
Args:
- graph: A @{tf.Graph} instance containing the graph to optimize.
+ graph: A `tf.Graph` instance containing the graph to optimize.
fetches: An optional list of `Tensor`s to fetch (i.e. not optimize away).
Grappler uses the 'train_op' collection to look for fetches, so if not
provided this collection should be non-empty.
- rewriter_config: An optional @{tf.RewriterConfig} to use when rewriting the
+ rewriter_config: An optional `tf.RewriterConfig` to use when rewriting the
graph.
Returns:
- A @{tf.GraphDef} containing the rewritten graph.
+ A `tf.GraphDef` containing the rewritten graph.
"""
if rewriter_config is None:
rewriter_config = rewriter_config_pb2.RewriterConfig()
diff --git a/tensorflow/contrib/signal/python/ops/mel_ops.py b/tensorflow/contrib/signal/python/ops/mel_ops.py
index 062d84aea1..ecc2fedb9f 100644
--- a/tensorflow/contrib/signal/python/ops/mel_ops.py
+++ b/tensorflow/contrib/signal/python/ops/mel_ops.py
@@ -108,7 +108,7 @@ def linear_to_mel_weight_matrix(num_mel_bins=20,
# `M` has shape [frames, num_mel_bins]
M = tf.matmul(S, A)
- The matrix can be used with @{tf.tensordot} to convert an arbitrary rank
+ The matrix can be used with `tf.tensordot` to convert an arbitrary rank
`Tensor` of linear-scale spectral bins into the mel scale.
# S has shape [..., num_spectrogram_bins].
diff --git a/tensorflow/contrib/summary/summary.py b/tensorflow/contrib/summary/summary.py
index d22b80ac88..42898e797c 100644
--- a/tensorflow/contrib/summary/summary.py
+++ b/tensorflow/contrib/summary/summary.py
@@ -17,7 +17,7 @@
The operations in this package are safe to use with eager execution turned on or
off. It has a more flexible API that allows summaries to be written directly
from ops to places other than event log files, rather than propagating protos
-from @{tf.summary.merge_all} to @{tf.summary.FileWriter}.
+from `tf.summary.merge_all` to `tf.summary.FileWriter`.
To use with eager execution enabled, write your code as follows:
diff --git a/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py b/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
index c104b2403c..029492b489 100644
--- a/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
+++ b/tensorflow/contrib/tpu/python/tpu/tpu_estimator.py
@@ -224,7 +224,7 @@ class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=prote
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
- @{tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a
+ `tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
@@ -247,7 +247,7 @@ class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=prote
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
- summaries with @{tf.contrib.summary.create_file_writer}.
+ summaries with `tf.contrib.summary.create_file_writer`.
"""
def __new__(cls,
diff --git a/tensorflow/contrib/training/python/training/tensor_queue_dataset.py b/tensorflow/contrib/training/python/training/tensor_queue_dataset.py
index a2444934bc..f46d03209c 100644
--- a/tensorflow/contrib/training/python/training/tensor_queue_dataset.py
+++ b/tensorflow/contrib/training/python/training/tensor_queue_dataset.py
@@ -156,7 +156,7 @@ def prepend_from_queue_and_padded_batch_dataset(batch_size,
Returns:
A `Dataset` transformation function, which can be passed to
- @{tf.data.Dataset.apply}.
+ `tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
diff --git a/tensorflow/core/api_def/base_api/api_def_GatherNd.pbtxt b/tensorflow/core/api_def/base_api/api_def_GatherNd.pbtxt
index 342a1f6b05..a0e42dd02c 100644
--- a/tensorflow/core/api_def/base_api/api_def_GatherNd.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_GatherNd.pbtxt
@@ -27,7 +27,7 @@ slice of `params`:
output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
-Whereas in @{tf.gather} `indices` defines slices into the first
+Whereas in `tf.gather` `indices` defines slices into the first
dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
first `N` dimensions of `params`, where `N = indices.shape[-1]`.
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceScatterNdAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceScatterNdAdd.pbtxt
index 2b58969da2..d9c4d5a4a4 100644
--- a/tensorflow/core/api_def/base_api/api_def_ResourceScatterNdAdd.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceScatterNdAdd.pbtxt
@@ -63,7 +63,7 @@ The resulting update to ref would look like this:
[1, 12, 3, 14, 14, 6, 7, 20]
-See @{tf.scatter_nd} for more details about how to make updates to
+See `tf.scatter_nd` for more details about how to make updates to
slices.
END
}
diff --git a/tensorflow/core/api_def/base_api/api_def_ResourceScatterNdUpdate.pbtxt b/tensorflow/core/api_def/base_api/api_def_ResourceScatterNdUpdate.pbtxt
index 17b79ee30c..d724cfccec 100644
--- a/tensorflow/core/api_def/base_api/api_def_ResourceScatterNdUpdate.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_ResourceScatterNdUpdate.pbtxt
@@ -63,7 +63,7 @@ The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
-See @{tf.scatter_nd} for more details about how to make updates to
+See `tf.scatter_nd` for more details about how to make updates to
slices.
END
}
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterNd.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterNd.pbtxt
index ad1c527b01..0b5917d428 100644
--- a/tensorflow/core/api_def/base_api/api_def_ScatterNd.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterNd.pbtxt
@@ -30,7 +30,7 @@ END
Creates a new tensor by applying sparse `updates` to individual values or
slices within a tensor (initially zero for numeric, empty for string) of
the given `shape` according to indices. This operator is the inverse of the
-@{tf.gather_nd} operator which extracts values or slices from a given tensor.
+`tf.gather_nd` operator which extracts values or slices from a given tensor.
If `indices` contains duplicates, then their updates are accumulated (summed).
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterNdAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterNdAdd.pbtxt
index a9a7646314..5929425bc8 100644
--- a/tensorflow/core/api_def/base_api/api_def_ScatterNdAdd.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterNdAdd.pbtxt
@@ -66,7 +66,7 @@ The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
-See @{tf.scatter_nd} for more details about how to make updates to
+See `tf.scatter_nd` for more details about how to make updates to
slices.
END
}
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterNdNonAliasingAdd.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterNdNonAliasingAdd.pbtxt
index 35116e5f6a..fa15538f8c 100644
--- a/tensorflow/core/api_def/base_api/api_def_ScatterNdNonAliasingAdd.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterNdNonAliasingAdd.pbtxt
@@ -61,6 +61,6 @@ The resulting value `output` would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
-See @{tf.scatter_nd} for more details about how to make updates to slices.
+See `tf.scatter_nd` for more details about how to make updates to slices.
END
}
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterNdSub.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterNdSub.pbtxt
index 99e5c4908b..67346f051e 100644
--- a/tensorflow/core/api_def/base_api/api_def_ScatterNdSub.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterNdSub.pbtxt
@@ -66,7 +66,7 @@ The resulting update to ref would look like this:
[1, -9, 3, -6, -4, 6, 7, -4]
-See @{tf.scatter_nd} for more details about how to make updates to
+See `tf.scatter_nd` for more details about how to make updates to
slices.
END
}
diff --git a/tensorflow/core/api_def/base_api/api_def_ScatterNdUpdate.pbtxt b/tensorflow/core/api_def/base_api/api_def_ScatterNdUpdate.pbtxt
index cb57c171b9..1a75e67c0c 100644
--- a/tensorflow/core/api_def/base_api/api_def_ScatterNdUpdate.pbtxt
+++ b/tensorflow/core/api_def/base_api/api_def_ScatterNdUpdate.pbtxt
@@ -68,7 +68,7 @@ The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
-See @{tf.scatter_nd} for more details about how to make updates to
+See `tf.scatter_nd` for more details about how to make updates to
slices.
END
}
diff --git a/tensorflow/g3doc/README.txt b/tensorflow/g3doc/README.txt
index ed648f8b6b..515a9e9a02 100644
--- a/tensorflow/g3doc/README.txt
+++ b/tensorflow/g3doc/README.txt
@@ -22,12 +22,12 @@ When authoring docs, note that we have some new syntax for references --
at least for docs coming from Python docstrings or
tensorflow/docs_src/. Use:
-* @{tf.symbol} to make a link to the reference page for a Python
+* `tf.symbol` to make a link to the reference page for a Python
symbol. Note that class members don't get their own page, but the
- syntax still works, since @{tf.MyClass.method} links to the right
+ syntax still works, since `tf.MyClass.method` links to the right
part of the tf.MyClass page.
-* @{tensorflow::symbol} to make a link to the reference page for a C++
+* `tensorflow::symbol` to make a link to the reference page for a C++
symbol. (This only works for a few symbols but will work for more soon.)
* @{$doc_page} to make a link to another (not an API reference) doc
diff --git a/tensorflow/python/client/session.py b/tensorflow/python/client/session.py
index 58a002c776..28f26ad27e 100644
--- a/tensorflow/python/client/session.py
+++ b/tensorflow/python/client/session.py
@@ -724,7 +724,7 @@ class BaseSession(SessionInterface):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
- @{tf.Operation.run} or @{tf.Tensor.eval} should be executed in
+ `tf.Operation.run` or `tf.Tensor.eval` should be executed in
this session.
```python
@@ -736,7 +736,7 @@ class BaseSession(SessionInterface):
print(c.eval())
```
- To get the current default session, use @{tf.get_default_session}.
+ To get the current default session, use `tf.get_default_session`.
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
@@ -765,7 +765,7 @@ class BaseSession(SessionInterface):
*N.B.* Entering a `with sess.as_default():` block does not affect
the current default graph. If you are using multiple graphs, and
- `sess.graph` is different from the value of @{tf.get_default_graph},
+ `sess.graph` is different from the value of `tf.get_default_graph`,
you must explicitly enter a `with sess.graph.as_default():` block
to make `sess.graph` the default graph.
@@ -786,14 +786,14 @@ class BaseSession(SessionInterface):
nested list, tuple, namedtuple, dict, or OrderedDict containing graph
elements at its leaves. A graph element can be one of the following types:
- * An @{tf.Operation}.
+ * An `tf.Operation`.
The corresponding fetched value will be `None`.
- * A @{tf.Tensor}.
+ * A `tf.Tensor`.
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
- * A @{tf.SparseTensor}.
+ * A `tf.SparseTensor`.
The corresponding fetched value will be a
- @{tf.SparseTensorValue}
+ `tf.SparseTensorValue`
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
@@ -829,16 +829,16 @@ class BaseSession(SessionInterface):
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
- * If the key is a @{tf.Tensor}, the
+ * If the key is a `tf.Tensor`, the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
- @{tf.placeholder}, the shape of
+ `tf.placeholder`, the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
- @{tf.SparseTensor},
+ `tf.SparseTensor`,
the value should be a
- @{tf.SparseTensorValue}.
+ `tf.SparseTensorValue`.
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
@@ -1120,7 +1120,7 @@ class BaseSession(SessionInterface):
For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th
argument to the returned callable must be a numpy ndarray (or something
convertible to an ndarray) with matching element type and shape. See
- @{tf.Session.run} for details of the allowable feed key and value types.
+ `tf.Session.run` for details of the allowable feed key and value types.
The returned callable will have the same return type as
`tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`,
@@ -1128,14 +1128,14 @@ class BaseSession(SessionInterface):
it will return `None`.
Args:
- fetches: A value or list of values to fetch. See @{tf.Session.run}
+ fetches: A value or list of values to fetch. See `tf.Session.run`
for details of the allowable fetch types.
feed_list: (Optional.) A list of `feed_dict` keys. See
- @{tf.Session.run} for details of the allowable feed key types.
+ `tf.Session.run` for details of the allowable feed key types.
accept_options: (Optional.) Iff `True`, the returned `Callable` will be
- able to accept @{tf.RunOptions} and @{tf.RunMetadata} as optional
+ able to accept `tf.RunOptions` and `tf.RunMetadata` as optional
keyword arguments `options` and `run_metadata`, respectively, with
- the same syntax and semantics as @{tf.Session.run}, which is useful
+ the same syntax and semantics as `tf.Session.run`, which is useful
for certain use cases (profiling and debugging) but will result in
measurable slowdown of the `Callable`'s performance. Default: `False`.
@@ -1145,7 +1145,7 @@ class BaseSession(SessionInterface):
Raises:
TypeError: If `fetches` or `feed_list` cannot be interpreted
- as arguments to @{tf.Session.run}.
+ as arguments to `tf.Session.run`.
"""
if feed_list is not None:
if not isinstance(feed_list, (list, tuple)):
@@ -1453,10 +1453,10 @@ class Session(BaseSession):
```
A session may own resources, such as
- @{tf.Variable}, @{tf.QueueBase},
- and @{tf.ReaderBase}. It is important to release
+ `tf.Variable`, `tf.QueueBase`,
+ and `tf.ReaderBase`. It is important to release
these resources when they are no longer required. To do this, either
- invoke the @{tf.Session.close} method on the session, or use
+ invoke the `tf.Session.close` method on the session, or use
the session as a context manager. The following two examples are
equivalent:
@@ -1592,8 +1592,8 @@ class InteractiveSession(BaseSession):
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
- The methods @{tf.Tensor.eval}
- and @{tf.Operation.run}
+ The methods `tf.Tensor.eval`
+ and `tf.Operation.run`
will use that session to run ops.
This is convenient in interactive shells and [IPython
diff --git a/tensorflow/python/data/ops/dataset_ops.py b/tensorflow/python/data/ops/dataset_ops.py
index 6cda2a77cc..8ba98cb88d 100644
--- a/tensorflow/python/data/ops/dataset_ops.py
+++ b/tensorflow/python/data/ops/dataset_ops.py
@@ -222,7 +222,7 @@ class Dataset(object):
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
- @{tf.constant} operations. For large datasets (> 1 GB), this can waste
+ `tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If tensors contains
one or more large NumPy arrays, consider the alternative described in
@{$guide/datasets#consuming_numpy_arrays$this guide}.
@@ -241,7 +241,7 @@ class Dataset(object):
Note that if `tensors` contains a NumPy array, and eager execution is not
enabled, the values will be embedded in the graph as one or more
- @{tf.constant} operations. For large datasets (> 1 GB), this can waste
+ `tf.constant` operations. For large datasets (> 1 GB), this can waste
memory and run into byte limits of graph serialization. If tensors contains
one or more large NumPy arrays, consider the alternative described in
@{$guide/datasets#consuming_numpy_arrays$this guide}.
@@ -331,7 +331,7 @@ class Dataset(object):
```
NOTE: The current implementation of `Dataset.from_generator()` uses
- @{tf.py_func} and inherits the same constraints. In particular, it
+ `tf.py_func` and inherits the same constraints. In particular, it
requires the `Dataset`- and `Iterator`-related operations to be placed
on a device in the same process as the Python program that called
`Dataset.from_generator()`. The body of `generator` will not be
@@ -641,7 +641,7 @@ class Dataset(object):
Defaults to `True`.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
- @{tf.set_random_seed} for behavior.
+ `tf.set_random_seed` for behavior.
Returns:
Dataset: A `Dataset` of strings corresponding to file names.
@@ -706,7 +706,7 @@ class Dataset(object):
dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
- @{tf.set_random_seed} for behavior.
+ `tf.set_random_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
@@ -863,7 +863,7 @@ class Dataset(object):
This transformation combines multiple consecutive elements of the input
dataset into a single element.
- Like @{tf.data.Dataset.batch}, the tensors in the resulting element will
+ Like `tf.data.Dataset.batch`, the tensors in the resulting element will
have an additional outer dimension, which will be `batch_size` (or
`N % batch_size` for the last element if `batch_size` does not divide the
number of input elements `N` evenly and `drop_remainder` is `False`). If
@@ -871,7 +871,7 @@ class Dataset(object):
should set the `drop_remainder` argument to `True` to prevent the smaller
batch from being produced.
- Unlike @{tf.data.Dataset.batch}, the input elements to be batched may have
+ Unlike `tf.data.Dataset.batch`, the input elements to be batched may have
different shapes, and this transformation will pad each component to the
respective shape in `padding_shapes`. The `padding_shapes` argument
determines the resulting shape for each dimension of each component in an
@@ -883,8 +883,8 @@ class Dataset(object):
will be padded out to the maximum length of all elements in that
dimension.
- See also @{tf.contrib.data.dense_to_sparse_batch}, which combines elements
- that may have different shapes into a @{tf.SparseTensor}.
+ See also `tf.contrib.data.dense_to_sparse_batch`, which combines elements
+ that may have different shapes into a `tf.SparseTensor`.
Args:
batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
@@ -1039,7 +1039,7 @@ class Dataset(object):
elements are produced. `cycle_length` controls the number of input elements
that are processed concurrently. If you set `cycle_length` to 1, this
transformation will handle one input element at a time, and will produce
- identical results = to @{tf.data.Dataset.flat_map}. In general,
+ identical results = to `tf.data.Dataset.flat_map`. In general,
this transformation will apply `map_func` to `cycle_length` input elements,
open iterators on the returned `Dataset` objects, and cycle through them
producing `block_length` consecutive elements from each iterator, and
@@ -1306,7 +1306,7 @@ class _NestedDatasetComponent(object):
class _VariantDataset(Dataset):
- """A Dataset wrapper around a @{tf.variant}-typed function argument."""
+ """A Dataset wrapper around a `tf.variant`-typed function argument."""
def __init__(self, dataset_variant, structure):
super(_VariantDataset, self).__init__()
@@ -1342,20 +1342,20 @@ class StructuredFunctionWrapper(object):
func: A function from a nested structure to another nested structure.
transformation_name: Human-readable name of the transformation in which
this function is being instantiated, for error messages.
- dataset: (Optional.) A @{tf.data.Dataset}. If given, the structure of this
+ dataset: (Optional.) A `tf.data.Dataset`. If given, the structure of this
dataset will be assumed as the structure for `func` arguments; otherwise
`input_classes`, `input_shapes`, and `input_types` must be defined.
input_classes: (Optional.) A nested structure of `type`. If given, this
argument defines the Python types for `func` arguments.
- input_shapes: (Optional.) A nested structure of @{tf.TensorShape}. If
+ input_shapes: (Optional.) A nested structure of `tf.TensorShape`. If
given, this argument defines the shapes and structure for `func`
arguments.
- input_types: (Optional.) A nested structure of @{tf.DType}. If given, this
+ input_types: (Optional.) A nested structure of `tf.DType`. If given, this
argument defines the element types and structure for `func` arguments.
add_to_graph: (Optional.) If `True`, the function will be added to the
default graph.
experimental_nested_dataset_support: (Optional.) If `True`, the function
- will support @{tf.data.Dataset} objects as arguments and return values.
+ will support `tf.data.Dataset` objects as arguments and return values.
Raises:
ValueError: If an invalid combination of `dataset`, `input_classes`,
@@ -1478,7 +1478,7 @@ class StructuredFunctionWrapper(object):
self._function._create_definition_if_needed() # pylint: disable=protected-access
def _defun_args(self):
- """Returns a flat list of @{tf.DType} for the input element structure."""
+ """Returns a flat list of `tf.DType` for the input element structure."""
ret = []
for input_type, input_class in zip(nest.flatten(self._input_types),
nest.flatten(self._input_classes)):
@@ -1523,7 +1523,7 @@ def flat_structure(dataset):
`**flat_structure(self)` to the op constructor.
Args:
- dataset: A @{tf.data.Dataset}.
+ dataset: A `tf.data.Dataset`.
Returns:
A dictionary of keyword arguments that can be passed to many Dataset op
@@ -1846,7 +1846,7 @@ class ShuffleDataset(Dataset):
dataset will sample.
seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
random seed that will be used to create the distribution. See
- @{tf.set_random_seed} for behavior.
+ `tf.set_random_seed` for behavior.
reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
that the dataset should be pseudorandomly reshuffled each time it is
iterated over. (Defaults to `True`.)
diff --git a/tensorflow/python/data/ops/iterator_ops.py b/tensorflow/python/data/ops/iterator_ops.py
index f2dfea69a8..8f8e026df9 100644
--- a/tensorflow/python/data/ops/iterator_ops.py
+++ b/tensorflow/python/data/ops/iterator_ops.py
@@ -220,9 +220,9 @@ class Iterator(checkpointable.CheckpointableBase):
"""Creates a new, uninitialized `Iterator` based on the given handle.
This method allows you to define a "feedable" iterator where you can choose
- between concrete iterators by feeding a value in a @{tf.Session.run} call.
- In that case, `string_handle` would be a @{tf.placeholder}, and you would
- feed it with the value of @{tf.data.Iterator.string_handle} in each step.
+ between concrete iterators by feeding a value in a `tf.Session.run` call.
+ In that case, `string_handle` would be a `tf.placeholder`, and you would
+ feed it with the value of `tf.data.Iterator.string_handle` in each step.
For example, if you had two iterators that marked the current position in
a training dataset and a test dataset, you could choose which to use in
@@ -362,9 +362,9 @@ class Iterator(checkpointable.CheckpointableBase):
In graph mode, you should typically call this method *once* and use its
result as the input to another computation. A typical loop will then call
- @{tf.Session.run} on the result of that computation. The loop will terminate
+ `tf.Session.run` on the result of that computation. The loop will terminate
when the `Iterator.get_next()` operation raises
- @{tf.errors.OutOfRangeError}. The following skeleton shows how to use
+ `tf.errors.OutOfRangeError`. The following skeleton shows how to use
this method when building a training loop:
```python
diff --git a/tensorflow/python/data/ops/optional_ops.py b/tensorflow/python/data/ops/optional_ops.py
index 1d3007ef76..b75b98dc72 100644
--- a/tensorflow/python/data/ops/optional_ops.py
+++ b/tensorflow/python/data/ops/optional_ops.py
@@ -33,8 +33,8 @@ class Optional(object):
An `Optional` can represent the result of an operation that may fail as a
value, rather than raising an exception and halting execution. For example,
- @{tf.contrib.data.get_next_as_optional} returns an `Optional` that either
- contains the next value from a @{tf.data.Iterator} if one exists, or a "none"
+ `tf.contrib.data.get_next_as_optional` returns an `Optional` that either
+ contains the next value from a `tf.data.Iterator` if one exists, or a "none"
value that indicates the end of the sequence has been reached.
"""
@@ -55,7 +55,7 @@ class Optional(object):
"""Returns a nested structure of values wrapped by this optional.
If this optional does not have a value (i.e. `self.has_value()` evaluates
- to `False`), this operation will raise @{tf.errors.InvalidArgumentError}
+ to `False`), this operation will raise `tf.errors.InvalidArgumentError`
at runtime.
Args:
diff --git a/tensorflow/python/data/util/convert.py b/tensorflow/python/data/util/convert.py
index 746b3d66de..ba297900b0 100644
--- a/tensorflow/python/data/util/convert.py
+++ b/tensorflow/python/data/util/convert.py
@@ -36,11 +36,11 @@ def optional_param_to_tensor(argument_name,
def partial_shape_to_tensor(shape_like):
- """Returns a @{tf.Tensor} that represents the given shape.
+ """Returns a `tf.Tensor` that represents the given shape.
Args:
- shape_like: A value that can be converted to a @{tf.TensorShape} or a
- @{tf.Tensor}.
+ shape_like: A value that can be converted to a `tf.TensorShape` or a
+ `tf.Tensor`.
Returns:
A 1-D `tf.Tensor` of `tf.int64` elements representing the given shape, where
diff --git a/tensorflow/python/data/util/random_seed.py b/tensorflow/python/data/util/random_seed.py
index e2c9d8672f..d5169f7a53 100644
--- a/tensorflow/python/data/util/random_seed.py
+++ b/tensorflow/python/data/util/random_seed.py
@@ -29,14 +29,14 @@ from tensorflow.python.ops import math_ops
def get_seed(seed):
"""Returns the local seeds an operation should use given an op-specific seed.
- See @{tf.get_seed} for more details. This wrapper adds support for the case
+ See `tf.get_seed` for more details. This wrapper adds support for the case
where `seed` may be a tensor.
Args:
- seed: An integer or a @{tf.int64} scalar tensor.
+ seed: An integer or a `tf.int64` scalar tensor.
Returns:
- A tuple of two @{tf.int64} scalar tensors that should be used for the local
+ A tuple of two `tf.int64` scalar tensors that should be used for the local
seed of the calling dataset.
"""
seed, seed2 = random_seed.get_seed(seed)
diff --git a/tensorflow/python/debug/lib/debug_gradients.py b/tensorflow/python/debug/lib/debug_gradients.py
index 589a13db7f..5e95bcba47 100644
--- a/tensorflow/python/debug/lib/debug_gradients.py
+++ b/tensorflow/python/debug/lib/debug_gradients.py
@@ -69,7 +69,7 @@ class GradientsDebugger(object):
"""Gradients Debugger.
Allows retrieval of gradient tensors created by TensorFlow's automatic
- differentiation algorithm, i.e., @{tf.gradients} and optimizer classes that
+ differentiation algorithm, i.e., `tf.gradients` and optimizer classes that
use it.
"""
# TODO(cais): Add examples code in the doc string?
@@ -142,8 +142,8 @@ class GradientsDebugger(object):
Args:
input_tensor: the input `tf.Tensor` object whose related gradient tensors
are to be reigstered with this `GradientsDebugger` instance when they
- are created, e.g., during @{tf.gradients} calls or the construction
- of optimization (training) op that uses @{tf.gradients}.
+ are created, e.g., during `tf.gradients` calls or the construction
+ of optimization (training) op that uses `tf.gradients`.
Returns:
A forwarded identity of `input_tensor`, as a `tf.Tensor`.
diff --git a/tensorflow/python/debug/wrappers/dumping_wrapper.py b/tensorflow/python/debug/wrappers/dumping_wrapper.py
index 3fac2e5971..c02d5f66ec 100644
--- a/tensorflow/python/debug/wrappers/dumping_wrapper.py
+++ b/tensorflow/python/debug/wrappers/dumping_wrapper.py
@@ -45,7 +45,7 @@ class DumpingDebugWrapperSession(framework.NonInteractiveDebugWrapperSession):
session_root: (`str`) Path to the session root directory. Must be a
directory that does not exist or an empty directory. If the directory
does not exist, it will be created by the debugger core during debug
- @{tf.Session.run}
+ `tf.Session.run`
calls.
As the `run()` calls occur, subdirectories will be added to
`session_root`. The subdirectories' names has the following pattern:
diff --git a/tensorflow/python/eager/backprop.py b/tensorflow/python/eager/backprop.py
index 728b283695..553f761a14 100644
--- a/tensorflow/python/eager/backprop.py
+++ b/tensorflow/python/eager/backprop.py
@@ -646,7 +646,7 @@ class GradientTape(object):
Operations are recorded if they are executed within this context manager and
at least one of their inputs is being "watched".
- Trainable variables (created by `tf.Variable` or @{tf.get_variable},
+ Trainable variables (created by `tf.Variable` or `tf.get_variable`,
trainable=True is default in both cases) are automatically watched. Tensors
can be manually watched by invoking the `watch` method on this context
manager.
diff --git a/tensorflow/python/eager/context.py b/tensorflow/python/eager/context.py
index aa57ca03e6..6a327bd010 100644
--- a/tensorflow/python/eager/context.py
+++ b/tensorflow/python/eager/context.py
@@ -663,7 +663,7 @@ def internal_operation_seed():
def executing_eagerly():
"""Returns True if the current thread has eager execution enabled.
- Eager execution is typically enabled via @{tf.enable_eager_execution},
+ Eager execution is typically enabled via `tf.enable_eager_execution`,
but may also be enabled within the context of a Python function via
tf.contrib.eager.py_func.
"""
diff --git a/tensorflow/python/eager/function.py b/tensorflow/python/eager/function.py
index adbf5605ed..f87d88040f 100644
--- a/tensorflow/python/eager/function.py
+++ b/tensorflow/python/eager/function.py
@@ -1221,7 +1221,7 @@ def defun(func=None, input_signature=None, compiled=False):
"""Compiles a Python function into a callable TensorFlow graph.
`defun` (short for "define function") trace-compiles a Python function
- composed of TensorFlow operations into a callable that executes a @{tf.Graph}
+ composed of TensorFlow operations into a callable that executes a `tf.Graph`
containing those operations. The callable produced by `defun` contains only
the subgraph of TensorFlow operations that were executed when the Python
function was called with a particular input signature, defined as a list
@@ -1244,9 +1244,9 @@ def defun(func=None, input_signature=None, compiled=False):
For a Python function to be compatible with `defun`, all of its arguments must
be hashable Python objects or lists thereof. The function itself may not
modify the list/map structure of its arguments. Additionally, it must return
- zero or more @{tf.Tensor} objects. If the Python function returns
- a @{tf.Variable}, its compiled version will return the value of that variable
- as a @{tf.Tensor}.
+ zero or more `tf.Tensor` objects. If the Python function returns
+ a `tf.Variable`, its compiled version will return the value of that variable
+ as a `tf.Tensor`.
Executing a graph generated by `defun` respects device annotations (i.e.,
all `with tf.device` directives present in a Python function will also be
@@ -1315,7 +1315,7 @@ def defun(func=None, input_signature=None, compiled=False):
When using `defun`, there are subtleties regarding inputs, Python control
flow, and variable creation that one should be aware of. For concreteness, let
- `f` be a Python function that returns zero or more @{tf.Tensor} objects and
+ `f` be a Python function that returns zero or more `tf.Tensor` objects and
let `F = defun(f)`. `F` builds a graph for each unique input signature it
sees, Python control flow is baked into graphs, and operations related to
variable initialization are automatically lifted out of the graphs that `F`
@@ -1398,10 +1398,10 @@ def defun(func=None, input_signature=None, compiled=False):
On the other hand, because `defun` generates graphs by tracing and not by
source code analysis, it fully unrolls Python `for` and `while` loops,
potentially creating large graphs. If your Python function has native loops
- that run for many iterations, consider replacing them with @{tf.while_loop}
+ that run for many iterations, consider replacing them with `tf.while_loop`
operations.
- When constructing graphs, @{tf.Tensor} objects cannot be used as Python
+ When constructing graphs, `tf.Tensor` objects cannot be used as Python
`bool` objects. This means, for example, that you should replace code in `f`
resembling
@@ -1420,7 +1420,7 @@ def defun(func=None, input_signature=None, compiled=False):
automatically lifted out of the graphs generated by `defun`. In practice, this
implies that variable creation and initialization only happen the first time
`F` is called, and that variables are reused every time thereafter. Many
- TensorFlow APIs, like @{tf.keras.layers.Layer} objects, create variables the
+ TensorFlow APIs, like `tf.keras.layers.Layer` objects, create variables the
first time they are called and reuse them thereafter. Automatic variable
lifting makes it possible to compile these APIs without extra effort, at the
cost of introducing a discrepancy between the semantics of executing Python
@@ -1459,7 +1459,7 @@ def defun(func=None, input_signature=None, compiled=False):
to reference the same set of variables, add logic to your Python function that
ensures that variables are only created the first time it is called and are
reused for every subsequent invocation; note that this is precisely what
- @{tf.keras.layers.Layer} objects do, so we recommend using them to represent
+ `tf.keras.layers.Layer` objects do, so we recommend using them to represent
variable-bearing computations whenever possible.
Args:
diff --git a/tensorflow/python/estimator/canned/dnn_linear_combined.py b/tensorflow/python/estimator/canned/dnn_linear_combined.py
index efa7812452..4945c3ba11 100644
--- a/tensorflow/python/estimator/canned/dnn_linear_combined.py
+++ b/tensorflow/python/estimator/canned/dnn_linear_combined.py
@@ -388,7 +388,7 @@ class DNNLinearCombinedClassifier(estimator.Estimator):
if a categorical column is multivalent. One of "mean", "sqrtn", and
"sum" -- these are effectively different ways to do example-level
normalization, which can be useful for bag-of-words features. For more
- details, see @{tf.feature_column.linear_model$linear_model}.
+ details, see `tf.feature_column.linear_model`.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
@@ -586,7 +586,7 @@ class DNNLinearCombinedRegressor(estimator.Estimator):
if a categorical column is multivalent. One of "mean", "sqrtn", and
"sum" -- these are effectively different ways to do example-level
normalization, which can be useful for bag-of-words features. For more
- details, see @{tf.feature_column.linear_model$linear_model}.
+ details, see `tf.feature_column.linear_model`.
Raises:
ValueError: If both linear_feature_columns and dnn_features_columns are
diff --git a/tensorflow/python/estimator/canned/linear.py b/tensorflow/python/estimator/canned/linear.py
index 58a7160348..115dd18518 100644
--- a/tensorflow/python/estimator/canned/linear.py
+++ b/tensorflow/python/estimator/canned/linear.py
@@ -306,7 +306,7 @@ class LinearClassifier(estimator.Estimator):
is multivalent. One of "mean", "sqrtn", and "sum" -- these are
effectively different ways to do example-level normalization, which can
be useful for bag-of-words features. for more details, see
- @{tf.feature_column.linear_model$linear_model}.
+ `tf.feature_column.linear_model`.
Returns:
A `LinearClassifier` estimator.
@@ -472,7 +472,7 @@ class LinearRegressor(estimator.Estimator):
is multivalent. One of "mean", "sqrtn", and "sum" -- these are
effectively different ways to do example-level normalization, which can
be useful for bag-of-words features. for more details, see
- @{tf.feature_column.linear_model$linear_model}.
+ `tf.feature_column.linear_model`.
"""
head = head_lib._regression_head( # pylint: disable=protected-access
label_dimension=label_dimension, weight_column=weight_column,
diff --git a/tensorflow/python/estimator/estimator.py b/tensorflow/python/estimator/estimator.py
index 3b6b180b25..b238222d00 100644
--- a/tensorflow/python/estimator/estimator.py
+++ b/tensorflow/python/estimator/estimator.py
@@ -128,7 +128,7 @@ class Estimator(object):
```
For more details on warm-start configuration, see
- @{tf.estimator.WarmStartSettings$WarmStartSettings}.
+ `tf.estimator.WarmStartSettings`.
Args:
model_fn: Model function. Follows the signature:
@@ -1027,7 +1027,7 @@ class Estimator(object):
"""Creates the global step tensor in graph.
The global step tensor must be an integer type with name 'global_step' and
- be added to the collection @{tf.GraphKeys.GLOBAL_STEP}.
+ be added to the collection `tf.GraphKeys.GLOBAL_STEP`.
Args:
graph: The graph in which to create the global step tensor.
diff --git a/tensorflow/python/feature_column/feature_column.py b/tensorflow/python/feature_column/feature_column.py
index d091d2fe0a..2246d2f3e9 100644
--- a/tensorflow/python/feature_column/feature_column.py
+++ b/tensorflow/python/feature_column/feature_column.py
@@ -16,7 +16,7 @@
FeatureColumns provide a high level abstraction for ingesting and representing
features. FeatureColumns are also the primary way of encoding features for
-canned @{tf.estimator.Estimator}s.
+canned `tf.estimator.Estimator`s.
When using FeatureColumns with `Estimators`, the type of feature column you
should choose depends on (1) the feature type and (2) the model type.
@@ -1936,7 +1936,7 @@ class _FeatureColumn(object):
It is used for get_parsing_spec for `tf.parse_example`. Returned spec is a
dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
- supported objects. Please check documentation of @{tf.parse_example} for all
+ supported objects. Please check documentation of `tf.parse_example` for all
supported spec objects.
Let's say a Feature column depends on raw feature ('raw') and another
@@ -1995,7 +1995,7 @@ class _DenseColumn(_FeatureColumn):
weight_collections: List of graph collections to which Variables (if any
will be created) are added.
trainable: If `True` also add variables to the graph collection
- `GraphKeys.TRAINABLE_VARIABLES` (see @{tf.Variable}).
+ `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
Returns:
`Tensor` of shape [batch_size] + `_variable_shape`.
@@ -2062,7 +2062,7 @@ class _CategoricalColumn(_FeatureColumn):
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
- A categorical feature typically handled with a @{tf.SparseTensor} of IDs.
+ A categorical feature typically handled with a `tf.SparseTensor` of IDs.
"""
__metaclass__ = abc.ABCMeta
@@ -2097,7 +2097,7 @@ class _CategoricalColumn(_FeatureColumn):
weight_collections: List of graph collections to which variables (if any
will be created) are added.
trainable: If `True` also add variables to the graph collection
- `GraphKeys.TRAINABLE_VARIABLES` (see @{tf.get_variable}).
+ `GraphKeys.TRAINABLE_VARIABLES` (see `tf.get_variable`).
"""
pass
diff --git a/tensorflow/python/feature_column/feature_column_v2.py b/tensorflow/python/feature_column/feature_column_v2.py
index b4dd23f58d..b6bf516286 100644
--- a/tensorflow/python/feature_column/feature_column_v2.py
+++ b/tensorflow/python/feature_column/feature_column_v2.py
@@ -16,7 +16,7 @@
FeatureColumns provide a high level abstraction for ingesting and representing
features. FeatureColumns are also the primary way of encoding features for
-canned @{tf.estimator.Estimator}s.
+canned `tf.estimator.Estimator`s.
When using FeatureColumns with `Estimators`, the type of feature column you
should choose depends on (1) the feature type and (2) the model type.
@@ -1904,7 +1904,7 @@ class FeatureColumn(object):
It is used for get_parsing_spec for `tf.parse_example`. Returned spec is a
dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other
- supported objects. Please check documentation of @{tf.parse_example} for all
+ supported objects. Please check documentation of `tf.parse_example` for all
supported spec objects.
Let's say a Feature column depends on raw feature ('raw') and another
@@ -2025,7 +2025,7 @@ def _create_dense_column_weighted_sum(column,
class CategoricalColumn(FeatureColumn):
"""Represents a categorical feature.
- A categorical feature typically handled with a @{tf.SparseTensor} of IDs.
+ A categorical feature typically handled with a `tf.SparseTensor` of IDs.
"""
__metaclass__ = abc.ABCMeta
diff --git a/tensorflow/python/framework/errors_impl.py b/tensorflow/python/framework/errors_impl.py
index 84106c32c6..eff3831464 100644
--- a/tensorflow/python/framework/errors_impl.py
+++ b/tensorflow/python/framework/errors_impl.py
@@ -63,9 +63,9 @@ class OpError(Exception):
*N.B.* If the failed op was synthesized at runtime, e.g. a `Send`
or `Recv` op, there will be no corresponding
- @{tf.Operation}
+ `tf.Operation`
object. In that case, this will return `None`, and you should
- instead use the @{tf.OpError.node_def} to
+ instead use the `tf.OpError.node_def` to
discover information about the op.
Returns:
@@ -181,10 +181,10 @@ class CancelledError(OpError):
"""Raised when an operation or step is cancelled.
For example, a long-running operation (e.g.
- @{tf.QueueBase.enqueue} may be
+ `tf.QueueBase.enqueue` may be
cancelled by running another operation (e.g.
- @{tf.QueueBase.close},
- or by @{tf.Session.close}.
+ `tf.QueueBase.close`,
+ or by `tf.Session.close`.
A step that is running such a long-running operation will fail by raising
`CancelledError`.
@@ -221,9 +221,9 @@ class InvalidArgumentError(OpError):
This may occur, for example, if an operation is receives an input
tensor that has an invalid value or shape. For example, the
- @{tf.matmul} op will raise this
+ `tf.matmul` op will raise this
error if it receives an input that is not a matrix, and the
- @{tf.reshape} op will raise
+ `tf.reshape` op will raise
this error if the new shape does not match the number of elements in the input
tensor.
@@ -256,7 +256,7 @@ class NotFoundError(OpError):
"""Raised when a requested entity (e.g., a file or directory) was not found.
For example, running the
- @{tf.WholeFileReader.read}
+ `tf.WholeFileReader.read`
operation could raise `NotFoundError` if it receives the name of a file that
does not exist.
@@ -273,7 +273,7 @@ class AlreadyExistsError(OpError):
"""Raised when an entity that we attempted to create already exists.
For example, running an operation that saves a file
- (e.g. @{tf.train.Saver.save})
+ (e.g. `tf.train.Saver.save`)
could potentially raise this exception if an explicit filename for an
existing file was passed.
@@ -291,7 +291,7 @@ class PermissionDeniedError(OpError):
"""Raised when the caller does not have permission to run an operation.
For example, running the
- @{tf.WholeFileReader.read}
+ `tf.WholeFileReader.read`
operation could raise `PermissionDeniedError` if it receives the name of a
file for which the user does not have the read file permission.
@@ -340,7 +340,7 @@ class FailedPreconditionError(OpError):
"""Operation was rejected because the system is not in a state to execute it.
This exception is most commonly raised when running an operation
- that reads a @{tf.Variable}
+ that reads a `tf.Variable`
before it has been initialized.
@@__init__
@@ -357,9 +357,9 @@ class AbortedError(OpError):
"""The operation was aborted, typically due to a concurrent action.
For example, running a
- @{tf.QueueBase.enqueue}
+ `tf.QueueBase.enqueue`
operation may raise `AbortedError` if a
- @{tf.QueueBase.close} operation
+ `tf.QueueBase.close` operation
previously ran.
@@__init__
@@ -375,9 +375,9 @@ class OutOfRangeError(OpError):
"""Raised when an operation iterates past the valid input range.
This exception is raised in "end-of-file" conditions, such as when a
- @{tf.QueueBase.dequeue}
+ `tf.QueueBase.dequeue`
operation is blocked on an empty queue, and a
- @{tf.QueueBase.close}
+ `tf.QueueBase.close`
operation executes.
@@__init__
@@ -395,7 +395,7 @@ class UnimplementedError(OpError):
Some operations may raise this error when passed otherwise-valid
arguments that it does not currently support. For example, running
- the @{tf.nn.max_pool} operation
+ the `tf.nn.max_pool` operation
would raise this error if pooling was requested on the batch dimension,
because this is not yet supported.
@@ -443,7 +443,7 @@ class DataLossError(OpError):
"""Raised when unrecoverable data loss or corruption is encountered.
For example, this may be raised by running a
- @{tf.WholeFileReader.read}
+ `tf.WholeFileReader.read`
operation, if the file is truncated while it is being read.
@@__init__
diff --git a/tensorflow/python/framework/function.py b/tensorflow/python/framework/function.py
index 12bf03c5fa..f47c0d8a5e 100644
--- a/tensorflow/python/framework/function.py
+++ b/tensorflow/python/framework/function.py
@@ -665,7 +665,7 @@ class _FuncGraph(ops.Graph):
def container(self, container_name):
"""Returns a context manager that specifies the resource container to use.
- Overridden from @{tf.Graph} to update both the init_scope container
+ Overridden from `tf.Graph` to update both the init_scope container
and the present inner container. This is necessary to make sure setting
containers applies correctly both to created variables and to stateful
ops.
diff --git a/tensorflow/python/framework/importer.py b/tensorflow/python/framework/importer.py
index 687bfebd43..e48e67c8a1 100644
--- a/tensorflow/python/framework/importer.py
+++ b/tensorflow/python/framework/importer.py
@@ -344,9 +344,9 @@ def import_graph_def(graph_def,
This function provides a way to import a serialized TensorFlow
[`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)
protocol buffer, and extract individual objects in the `GraphDef` as
- @{tf.Tensor} and @{tf.Operation} objects. Once extracted,
+ `tf.Tensor` and `tf.Operation` objects. Once extracted,
these objects are placed into the current default `Graph`. See
- @{tf.Graph.as_graph_def} for a way to create a `GraphDef`
+ `tf.Graph.as_graph_def` for a way to create a `GraphDef`
proto.
Args:
diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py
index ed0bf1afe0..98a1802490 100644
--- a/tensorflow/python/framework/ops.py
+++ b/tensorflow/python/framework/ops.py
@@ -229,7 +229,7 @@ class Tensor(_TensorLike):
A `Tensor` is a symbolic handle to one of the outputs of an
`Operation`. It does not hold the values of that operation's output,
but instead provides a means of computing those values in a
- TensorFlow @{tf.Session}.
+ TensorFlow `tf.Session`.
This class has two primary purposes:
@@ -240,7 +240,7 @@ class Tensor(_TensorLike):
2. After the graph has been launched in a session, the value of the
`Tensor` can be computed by passing it to
- @{tf.Session.run}.
+ `tf.Session.run`.
`t.eval()` is a shortcut for calling
`tf.get_default_session().run(t)`.
@@ -365,7 +365,7 @@ class Tensor(_TensorLike):
The shape is computed using shape inference functions that are
registered in the Op for each `Operation`. See
- @{tf.TensorShape}
+ `tf.TensorShape`
for more details of what a shape represents.
The inferred shape of a tensor is used to provide shape
@@ -695,7 +695,7 @@ class Tensor(_TensorLike):
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
- See @{tf.Session.run} for a
+ See `tf.Session.run` for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this tensor. If
none, the default session will be used.
@@ -1455,10 +1455,10 @@ class IndexedSlices(_TensorLike):
The `IndexedSlices` class is used principally in the definition of
gradients for operations that have sparse gradients
- (e.g. @{tf.gather}).
+ (e.g. `tf.gather`).
Contrast this representation with
- @{tf.SparseTensor},
+ `tf.SparseTensor`,
which uses multi-dimensional indices and scalar values.
"""
@@ -1619,8 +1619,8 @@ class Operation(object):
more `Tensor` objects as input, and produces zero or more `Tensor`
objects as output. Objects of type `Operation` are created by
calling a Python op constructor (such as
- @{tf.matmul})
- or @{tf.Graph.create_op}.
+ `tf.matmul`)
+ or `tf.Graph.create_op`.
For example `c = tf.matmul(a, b)` creates an `Operation` of type
"MatMul" that takes tensors `a` and `b` as input, and produces `c`
@@ -1628,7 +1628,7 @@ class Operation(object):
After the graph has been launched in a session, an `Operation` can
be executed by passing it to
- @{tf.Session.run}.
+ `tf.Session.run`.
`op.run()` is a shortcut for calling `tf.get_default_session().run(op)`.
"""
@@ -2338,7 +2338,7 @@ class Operation(object):
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
- See @{tf.Session.run}
+ See `tf.Session.run`
for a description of the valid feed values.
session: (Optional.) The `Session` to be used to run to this operation. If
none, the default session will be used.
@@ -2727,13 +2727,13 @@ class Graph(object):
"""A TensorFlow computation, represented as a dataflow graph.
A `Graph` contains a set of
- @{tf.Operation} objects,
+ `tf.Operation` objects,
which represent units of computation; and
- @{tf.Tensor} objects, which represent
+ `tf.Tensor` objects, which represent
the units of data that flow between operations.
A default `Graph` is always registered, and accessible by calling
- @{tf.get_default_graph}.
+ `tf.get_default_graph`.
To add an operation to the default graph, simply call one of the functions
that defines a new `Operation`:
@@ -2743,7 +2743,7 @@ class Graph(object):
```
Another typical usage involves the
- @{tf.Graph.as_default}
+ `tf.Graph.as_default`
context manager, which overrides the current default graph for the
lifetime of the context:
@@ -2764,7 +2764,7 @@ class Graph(object):
that are identified by name. For convenience when building a large
graph, collections can store groups of related objects: for
example, the `tf.Variable` uses a collection (named
- @{tf.GraphKeys.GLOBAL_VARIABLES}) for
+ `tf.GraphKeys.GLOBAL_VARIABLES`) for
all variables that are created during the construction of a graph. The caller
may define additional collections by specifying a new name.
"""
@@ -2941,7 +2941,7 @@ class Graph(object):
"""Returns a version number that increases as ops are added to the graph.
Note that this is unrelated to the
- @{tf.Graph.graph_def_versions}.
+ `tf.Graph.graph_def_versions`.
Returns:
An integer version that increases as ops are added to the graph.
@@ -2991,7 +2991,7 @@ class Graph(object):
After calling `g.finalize()`, no new operations can be added to
`g`. This method is used to ensure that no operations are added
to a graph when it is shared between multiple threads, for example
- when using a @{tf.train.QueueRunner}.
+ when using a `tf.train.QueueRunner`.
"""
self._finalized = True
@@ -3040,7 +3040,7 @@ class Graph(object):
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
- (using @{tf.import_graph_def}) or used with the
+ (using `tf.import_graph_def`) or used with the
[C++ Session API](../../../../api_docs/cc/index.md).
This method is thread-safe.
@@ -3086,7 +3086,7 @@ class Graph(object):
"""Returns a serialized `GraphDef` representation of this graph.
The serialized `GraphDef` can be imported into another `Graph`
- (using @{tf.import_graph_def}) or used with the
+ (using `tf.import_graph_def`) or used with the
[C++ Session API](../../api_docs/cc/index.md).
This method is thread-safe.
@@ -4884,7 +4884,7 @@ def device(device_name_or_function):
"""Wrapper for `Graph.device()` using the default graph.
See
- @{tf.Graph.device}
+ `tf.Graph.device`
for more details.
Args:
@@ -4950,7 +4950,7 @@ def colocate_with(op, ignore_existing=False):
def control_dependencies(control_inputs):
"""Wrapper for `Graph.control_dependencies()` using the default graph.
- See @{tf.Graph.control_dependencies}
+ See `tf.Graph.control_dependencies`
for more details.
When eager execution is enabled, any callable object in the `control_inputs`
@@ -5316,7 +5316,7 @@ def enable_eager_execution(config=None,
Eager execution provides an imperative interface to TensorFlow. With eager
execution enabled, TensorFlow functions execute operations immediately (as
- opposed to adding to a graph to be executed later in a @{tf.Session}) and
+ opposed to adding to a graph to be executed later in a `tf.Session`) and
return concrete values (as opposed to symbolic references to a node in a
computational graph).
@@ -5336,9 +5336,9 @@ def enable_eager_execution(config=None,
both with and without eager execution).
Args:
- config: (Optional.) A @{tf.ConfigProto} to use to configure the environment
- in which operations are executed. Note that @{tf.ConfigProto} is also
- used to configure graph execution (via @{tf.Session}) and many options
+ config: (Optional.) A `tf.ConfigProto` to use to configure the environment
+ in which operations are executed. Note that `tf.ConfigProto` is also
+ used to configure graph execution (via `tf.Session`) and many options
within `tf.ConfigProto` are not implemented (or are irrelevant) when
eager execution is enabled.
device_policy: (Optional.) Policy controlling how operations requiring
@@ -5638,7 +5638,7 @@ class GraphKeys(object):
* `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared
across distributed environment (model variables are subset of these). See
- @{tf.global_variables}
+ `tf.global_variables`
for more details.
Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`,
and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`.
@@ -5650,19 +5650,19 @@ class GraphKeys(object):
`tf.contrib.framework.model_variable` to add to this collection.
* `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will
be trained by an optimizer. See
- @{tf.trainable_variables}
+ `tf.trainable_variables`
for more details.
* `SUMMARIES`: the summary `Tensor` objects that have been created in the
graph. See
- @{tf.summary.merge_all}
+ `tf.summary.merge_all`
for more details.
* `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to
produce input for a computation. See
- @{tf.train.start_queue_runners}
+ `tf.train.start_queue_runners`
for more details.
* `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also
keep moving averages. See
- @{tf.moving_average_variables}
+ `tf.moving_average_variables`
for more details.
* `REGULARIZATION_LOSSES`: regularization losses collected during graph
construction.
@@ -5776,7 +5776,7 @@ class GraphKeys(object):
def add_to_collection(name, value):
"""Wrapper for `Graph.add_to_collection()` using the default graph.
- See @{tf.Graph.add_to_collection}
+ See `tf.Graph.add_to_collection`
for more details.
Args:
@@ -5795,7 +5795,7 @@ def add_to_collection(name, value):
def add_to_collections(names, value):
"""Wrapper for `Graph.add_to_collections()` using the default graph.
- See @{tf.Graph.add_to_collections}
+ See `tf.Graph.add_to_collections`
for more details.
Args:
@@ -5815,7 +5815,7 @@ def add_to_collections(names, value):
def get_collection_ref(key):
"""Wrapper for `Graph.get_collection_ref()` using the default graph.
- See @{tf.Graph.get_collection_ref}
+ See `tf.Graph.get_collection_ref`
for more details.
Args:
@@ -5839,7 +5839,7 @@ def get_collection_ref(key):
def get_collection(key, scope=None):
"""Wrapper for `Graph.get_collection()` using the default graph.
- See @{tf.Graph.get_collection}
+ See `tf.Graph.get_collection`
for more details.
Args:
@@ -5882,7 +5882,7 @@ class name_scope(object): # pylint: disable=invalid-name
This context manager validates that the given `values` are from the
same graph, makes that graph the default graph, and pushes a
name scope in that graph (see
- @{tf.Graph.name_scope}
+ `tf.Graph.name_scope`
for more details on that).
For example, to define a new Python op called `my_op`:
diff --git a/tensorflow/python/framework/random_seed.py b/tensorflow/python/framework/random_seed.py
index b724432e00..2f9504889a 100644
--- a/tensorflow/python/framework/random_seed.py
+++ b/tensorflow/python/framework/random_seed.py
@@ -43,7 +43,7 @@ def get_seed(op_seed):
graph, or for only specific operations.
For details on how the graph-level seed interacts with op seeds, see
- @{tf.set_random_seed}.
+ `tf.set_random_seed`.
Args:
op_seed: integer.
diff --git a/tensorflow/python/framework/sparse_tensor.py b/tensorflow/python/framework/sparse_tensor.py
index 6a5c6468f7..a45581190f 100644
--- a/tensorflow/python/framework/sparse_tensor.py
+++ b/tensorflow/python/framework/sparse_tensor.py
@@ -205,7 +205,7 @@ class SparseTensor(_TensorLike):
Args:
feed_dict: A dictionary that maps `Tensor` objects to feed values.
- See @{tf.Session.run} for a
+ See `tf.Session.run` for a
description of the valid feed values.
session: (Optional.) The `Session` to be used to evaluate this sparse
tensor. If none, the default session will be used.
diff --git a/tensorflow/python/framework/tensor_shape.py b/tensorflow/python/framework/tensor_shape.py
index c9be3d5005..bd0f691a61 100644
--- a/tensorflow/python/framework/tensor_shape.py
+++ b/tensorflow/python/framework/tensor_shape.py
@@ -500,7 +500,7 @@ class TensorShape(object):
may be inferred if there is a registered shape function for
`"Foo"`. See @{$adding_an_op#shape-functions-in-c$`Shape functions in C++`}
for details of shape functions and how to register them. Alternatively,
- the shape may be set explicitly using @{tf.Tensor.set_shape}.
+ the shape may be set explicitly using `tf.Tensor.set_shape`.
"""
def __init__(self, dims):
diff --git a/tensorflow/python/framework/test_util.py b/tensorflow/python/framework/test_util.py
index 764e8bfacb..9be6391b04 100644
--- a/tensorflow/python/framework/test_util.py
+++ b/tensorflow/python/framework/test_util.py
@@ -659,10 +659,10 @@ def run_in_graph_and_eager_modes(func=None,
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
- a @{tf.test.TestCase} class. Doing so will cause the contents of the test
+ a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
- and graph execution (see @{tf.enable_eager_execution}).
+ and graph execution (see `tf.enable_eager_execution`).
For example, consider the following unittest:
diff --git a/tensorflow/python/keras/engine/base_layer.py b/tensorflow/python/keras/engine/base_layer.py
index 33ad155072..d6d3db21fb 100644
--- a/tensorflow/python/keras/engine/base_layer.py
+++ b/tensorflow/python/keras/engine/base_layer.py
@@ -500,13 +500,13 @@ class Layer(checkpointable.CheckpointableBase):
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
- @{tf.VariableSynchronization}. By default the synchronization is set to
+ `tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
- @{tf.VariableAggregation}.
+ `tf.VariableAggregation`.
getter: Variable getter argument to be passed to the `Checkpointable` API.
Returns:
@@ -1921,13 +1921,13 @@ def make_variable(name,
use_resource: Whether to use a `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
- @{tf.VariableSynchronization}. By default the synchronization is set to
+ `tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
- @{tf.VariableAggregation}.
+ `tf.VariableAggregation`.
partitioner: Not handled at this time.
Returns:
diff --git a/tensorflow/python/layers/base.py b/tensorflow/python/layers/base.py
index cf13b52617..ab08865532 100644
--- a/tensorflow/python/layers/base.py
+++ b/tensorflow/python/layers/base.py
@@ -183,13 +183,13 @@ class Layer(base_layer.Layer):
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
- @{tf.VariableSynchronization}. By default the synchronization is set to
+ `tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
- @{tf.VariableAggregation}.
+ `tf.VariableAggregation`.
partitioner: (optional) partitioner instance (callable). If
provided, when the requested variable is created it will be split
into multiple partitions according to `partitioner`. In this case,
diff --git a/tensorflow/python/layers/core.py b/tensorflow/python/layers/core.py
index 261281ae7e..50a56736fc 100644
--- a/tensorflow/python/layers/core.py
+++ b/tensorflow/python/layers/core.py
@@ -203,7 +203,7 @@ class Dropout(keras_layers.Dropout, base.Layer):
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed}.
+ `tf.set_random_seed`.
for behavior.
name: The name of the layer (string).
"""
@@ -248,7 +248,7 @@ def dropout(inputs,
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
diff --git a/tensorflow/python/ops/array_ops.py b/tensorflow/python/ops/array_ops.py
index ec6488ea63..a917f51087 100644
--- a/tensorflow/python/ops/array_ops.py
+++ b/tensorflow/python/ops/array_ops.py
@@ -538,7 +538,7 @@ def slice(input_, begin, size, name=None):
words, `begin[i]` is the offset into the 'i'th dimension of `input` that you
want to slice from.
- Note that @{tf.Tensor.__getitem__} is typically a more pythonic way to
+ Note that `tf.Tensor.__getitem__` is typically a more pythonic way to
perform slices, as it allows you to write `foo[3:7, :-2]` instead of
`tf.slice(foo, [3, 0], [4, foo.get_shape()[1]-2])`.
@@ -594,7 +594,7 @@ def strided_slice(input_,
**Instead of calling this op directly most users will want to use the
NumPy-style slicing syntax (e.g. `tensor[..., 3:4:-1, tf.newaxis, 3]`), which
- is supported via @{tf.Tensor.__getitem__} and @{tf.Variable.__getitem__}.**
+ is supported via `tf.Tensor.__getitem__` and `tf.Variable.__getitem__`.**
The interface of this op is a low-level encoding of the slicing syntax.
Roughly speaking, this op extracts a slice of size `(end-begin)/stride`
@@ -723,7 +723,7 @@ def _SliceHelperVar(var, slice_spec):
"""Creates a slice helper object given a variable.
This allows creating a sub-tensor from part of the current contents
- of a variable. See @{tf.Tensor.__getitem__} for detailed examples
+ of a variable. See `tf.Tensor.__getitem__` for detailed examples
of slicing.
This function in addition also allows assignment to a sliced range.
diff --git a/tensorflow/python/ops/control_flow_ops.py b/tensorflow/python/ops/control_flow_ops.py
index c7061b36dd..faebdc3780 100644
--- a/tensorflow/python/ops/control_flow_ops.py
+++ b/tensorflow/python/ops/control_flow_ops.py
@@ -3069,7 +3069,7 @@ def while_loop(cond,
`loop_vars` is the same in every iteration. The `shape_invariants` argument
allows the caller to specify a less specific shape invariant for each loop
variable, which is needed if the shape varies between iterations. The
- @{tf.Tensor.set_shape}
+ `tf.Tensor.set_shape`
function may also be used in the `body` function to indicate that
the output loop variable has a particular shape. The shape invariant for
SparseTensor and IndexedSlices are treated specially as follows:
@@ -3320,7 +3320,7 @@ def with_dependencies(dependencies, output_tensor, name=None):
no guarantee that `output_tensor` will be evaluated after any `dependencies`
have run.
- See also @{tf.tuple$tuple} and @{tf.group$group}.
+ See also `tf.tuple` and `tf.group`.
Args:
dependencies: Iterable of operations to run before this op finishes.
@@ -3365,8 +3365,8 @@ def group(*inputs, **kwargs):
When this op finishes, all ops in `inputs` have finished. This op has no
output.
- See also @{tf.tuple$tuple} and
- @{tf.control_dependencies$control_dependencies}.
+ See also `tf.tuple` and
+ `tf.control_dependencies`.
Args:
*inputs: Zero or more tensors to group.
@@ -3435,8 +3435,8 @@ def tuple(tensors, name=None, control_inputs=None): # pylint: disable=redefined
returned by `tuple` are only available after all the parallel computations
are done.
- See also @{tf.group$group} and
- @{tf.control_dependencies$control_dependencies}.
+ See also `tf.group` and
+ `tf.control_dependencies`.
Args:
tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`.
diff --git a/tensorflow/python/ops/custom_gradient.py b/tensorflow/python/ops/custom_gradient.py
index 9f77a6cca1..871f236f78 100644
--- a/tensorflow/python/ops/custom_gradient.py
+++ b/tensorflow/python/ops/custom_gradient.py
@@ -73,7 +73,7 @@ def custom_gradient(f):
With this definition, the gradient at x=100 will be correctly evaluated as
1.0.
- See also @{tf.RegisterGradient} which registers a gradient function for a
+ See also `tf.RegisterGradient` which registers a gradient function for a
primitive TensorFlow operation. `tf.custom_gradient` on the other hand allows
for fine grained control over the gradient computation of a sequence of
operations.
@@ -100,7 +100,7 @@ def custom_gradient(f):
Returns:
A function `h(x)` which returns the same value as `f(x)[0]` and whose
- gradient (as calculated by @{tf.gradients}) is determined by `f(x)[1]`.
+ gradient (as calculated by `tf.gradients`) is determined by `f(x)[1]`.
"""
def decorated(*args, **kwargs):
diff --git a/tensorflow/python/ops/data_flow_ops.py b/tensorflow/python/ops/data_flow_ops.py
index abf597ca55..7af2ca56be 100644
--- a/tensorflow/python/ops/data_flow_ops.py
+++ b/tensorflow/python/ops/data_flow_ops.py
@@ -126,8 +126,8 @@ class QueueBase(object):
handle single elements, versions that support enqueuing and
dequeuing a batch of elements at once.
- See @{tf.FIFOQueue} and
- @{tf.RandomShuffleQueue} for concrete
+ See `tf.FIFOQueue` and
+ `tf.RandomShuffleQueue` for concrete
implementations of this class, and instructions on how to create
them.
"""
@@ -309,12 +309,12 @@ class QueueBase(object):
until the element has been enqueued.
At runtime, this operation may raise an error if the queue is
- @{tf.QueueBase.close} before or during its execution. If the
+ `tf.QueueBase.close` before or during its execution. If the
queue is closed before this operation runs,
`tf.errors.CancelledError` will be raised. If this operation is
blocked, and either (i) the queue is closed by a close operation
with `cancel_pending_enqueues=True`, or (ii) the session is
- @{tf.Session.close},
+ `tf.Session.close`,
`tf.errors.CancelledError` will be raised.
Args:
@@ -352,12 +352,12 @@ class QueueBase(object):
until all of the elements have been enqueued.
At runtime, this operation may raise an error if the queue is
- @{tf.QueueBase.close} before or during its execution. If the
+ `tf.QueueBase.close` before or during its execution. If the
queue is closed before this operation runs,
`tf.errors.CancelledError` will be raised. If this operation is
blocked, and either (i) the queue is closed by a close operation
with `cancel_pending_enqueues=True`, or (ii) the session is
- @{tf.Session.close},
+ `tf.Session.close`,
`tf.errors.CancelledError` will be raised.
Args:
@@ -413,11 +413,11 @@ class QueueBase(object):
until there is an element to dequeue.
At runtime, this operation may raise an error if the queue is
- @{tf.QueueBase.close} before or during its execution. If the
+ `tf.QueueBase.close` before or during its execution. If the
queue is closed, the queue is empty, and there are no pending
enqueue operations that can fulfill this request,
`tf.errors.OutOfRangeError` will be raised. If the session is
- @{tf.Session.close},
+ `tf.Session.close`,
`tf.errors.CancelledError` will be raised.
Args:
@@ -455,11 +455,11 @@ class QueueBase(object):
`OutOfRange` exception is raised.
At runtime, this operation may raise an error if the queue is
- @{tf.QueueBase.close} before or during its execution. If the
+ `tf.QueueBase.close` before or during its execution. If the
queue is closed, the queue contains fewer than `n` elements, and
there are no pending enqueue operations that can fulfill this
request, `tf.errors.OutOfRangeError` will be raised. If the
- session is @{tf.Session.close},
+ session is `tf.Session.close`,
`tf.errors.CancelledError` will be raised.
Args:
@@ -500,7 +500,7 @@ class QueueBase(object):
If the queue is closed and there are more than `0` but fewer than
`n` elements remaining, then instead of raising a
- `tf.errors.OutOfRangeError` like @{tf.QueueBase.dequeue_many},
+ `tf.errors.OutOfRangeError` like `tf.QueueBase.dequeue_many`,
less than `n` elements are returned immediately. If the queue is
closed and there are `0` elements left in the queue, then a
`tf.errors.OutOfRangeError` is raised just like in `dequeue_many`.
@@ -608,7 +608,7 @@ def _shared_name(shared_name):
class RandomShuffleQueue(QueueBase):
"""A queue implementation that dequeues elements in a random order.
- See @{tf.QueueBase} for a description of the methods on
+ See `tf.QueueBase` for a description of the methods on
this class.
"""
@@ -657,7 +657,7 @@ class RandomShuffleQueue(QueueBase):
with the same length as `dtypes`, or `None`. If specified the dequeue
methods return a dictionary with the names as keys.
seed: A Python integer. Used to create a random seed. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
shared_name: (Optional.) If non-empty, this queue will be shared under
the given name across multiple sessions.
@@ -693,7 +693,7 @@ class RandomShuffleQueue(QueueBase):
class FIFOQueue(QueueBase):
"""A queue implementation that dequeues elements in first-in first-out order.
- See @{tf.QueueBase} for a description of the methods on
+ See `tf.QueueBase` for a description of the methods on
this class.
"""
@@ -753,7 +753,7 @@ class PaddingFIFOQueue(QueueBase):
A `PaddingFIFOQueue` may contain components with dynamic shape, while also
supporting `dequeue_many`. See the constructor for more details.
- See @{tf.QueueBase} for a description of the methods on
+ See `tf.QueueBase` for a description of the methods on
this class.
"""
@@ -824,7 +824,7 @@ class PaddingFIFOQueue(QueueBase):
class PriorityQueue(QueueBase):
"""A queue implementation that dequeues elements in prioritized order.
- See @{tf.QueueBase} for a description of the methods on
+ See `tf.QueueBase` for a description of the methods on
this class.
"""
diff --git a/tensorflow/python/ops/embedding_ops.py b/tensorflow/python/ops/embedding_ops.py
index 27c2fa7017..7b9e7de145 100644
--- a/tensorflow/python/ops/embedding_ops.py
+++ b/tensorflow/python/ops/embedding_ops.py
@@ -253,7 +253,7 @@ def embedding_lookup(
This function is used to perform parallel lookups on the list of
tensors in `params`. It is a generalization of
- @{tf.gather}, where `params` is
+ `tf.gather`, where `params` is
interpreted as a partitioning of a large embedding tensor. `params` may be
a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
diff --git a/tensorflow/python/ops/image_ops_impl.py b/tensorflow/python/ops/image_ops_impl.py
index 855a4d0c33..ca44a12237 100644
--- a/tensorflow/python/ops/image_ops_impl.py
+++ b/tensorflow/python/ops/image_ops_impl.py
@@ -265,7 +265,7 @@ def random_flip_up_down(image, seed=None):
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
seed: A Python integer. Used to create a random seed. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
Returns:
@@ -287,7 +287,7 @@ def random_flip_left_right(image, seed=None):
image: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
seed: A Python integer. Used to create a random seed. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
Returns:
@@ -307,7 +307,7 @@ def _random_flip(image, flip_index, seed, scope_name):
flip_index: The dimension along which to flip the image.
Vertical: 0, Horizontal: 1
seed: A Python integer. Used to create a random seed. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
scope_name: Name of the scope in which the ops are added.
@@ -948,7 +948,7 @@ def resize_images(images,
Resized images will be distorted if their original aspect ratio is not
the same as `size`. To avoid distortions see
- @{tf.image.resize_image_with_pad}.
+ `tf.image.resize_image_with_pad`.
`method` can be one of:
@@ -1227,7 +1227,7 @@ def random_brightness(image, max_delta, seed=None):
image: An image.
max_delta: float, must be non-negative.
seed: A Python integer. Used to create a random seed. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
Returns:
@@ -1255,7 +1255,7 @@ def random_contrast(image, lower, upper, seed=None):
lower: float. Lower bound for the random contrast factor.
upper: float. Upper bound for the random contrast factor.
seed: A Python integer. Used to create a random seed. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
Returns:
diff --git a/tensorflow/python/ops/init_ops.py b/tensorflow/python/ops/init_ops.py
index c315722b6b..4d75ee3974 100644
--- a/tensorflow/python/ops/init_ops.py
+++ b/tensorflow/python/ops/init_ops.py
@@ -238,7 +238,7 @@ class RandomUniform(Initializer):
maxval: A python scalar or a scalar tensor. Upper bound of the range
of random values to generate. Defaults to 1 for float types.
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
dtype: The data type.
"""
@@ -276,7 +276,7 @@ class RandomNormal(Initializer):
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.
"""
@@ -319,7 +319,7 @@ class TruncatedNormal(Initializer):
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.
"""
@@ -369,7 +369,7 @@ class UniformUnitScaling(Initializer):
Args:
factor: Float. A multiplicative factor by which the values will be scaled.
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.
"""
@@ -427,7 +427,7 @@ class VarianceScaling(Initializer):
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.
@@ -517,7 +517,7 @@ class Orthogonal(Initializer):
Args:
gain: multiplicative factor to apply to the orthogonal matrix
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
dtype: The data type.
"""
@@ -572,7 +572,7 @@ class ConvolutionDeltaOrthogonal(Initializer):
The 2-norm of an input is multiplied by a factor of 'sqrt(gain)' after
applying this convolution.
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed} for behavior.
+ `tf.set_random_seed` for behavior.
dtype: The data type.
"""
@@ -628,7 +628,7 @@ class ConvolutionOrthogonal(Initializer):
The 2-norm of an input is multiplied by a factor of 'sqrt(gain)' after
applying this convolution.
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed} for behavior.
+ `tf.set_random_seed` for behavior.
dtype: The data type.
"""
@@ -693,7 +693,7 @@ class ConvolutionOrthogonal2D(ConvolutionOrthogonal):
This has the effect of scaling the output 2-norm by a factor of
`sqrt(gain)`.
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed} for behavior.
+ `tf.set_random_seed` for behavior.
dtype: The data type.
"""
@@ -829,7 +829,7 @@ class ConvolutionOrthogonal1D(ConvolutionOrthogonal):
The 2-norm of an input is multiplied by a factor of 'sqrt(gain)' after
applying this convolution.
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
dtype: The data type.
"""
@@ -946,7 +946,7 @@ class ConvolutionOrthogonal3D(ConvolutionOrthogonal):
The 2-norm of an input is multiplied by a factor of 'sqrt(gain)' after
applying this convolution.
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed} for behavior.
+ `tf.set_random_seed` for behavior.
dtype: The data type.
"""
@@ -1150,7 +1150,7 @@ def glorot_uniform_initializer(seed=None, dtype=dtypes.float32):
Args:
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.
@@ -1175,7 +1175,7 @@ def glorot_normal_initializer(seed=None, dtype=dtypes.float32):
Args:
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.
diff --git a/tensorflow/python/ops/losses/losses_impl.py b/tensorflow/python/ops/losses/losses_impl.py
index 66633c8b12..51fb4cbac8 100644
--- a/tensorflow/python/ops/losses/losses_impl.py
+++ b/tensorflow/python/ops/losses/losses_impl.py
@@ -190,7 +190,7 @@ def compute_weighted_loss(
When calculating the gradient of a weighted loss contributions from
both `losses` and `weights` are considered. If your `weights` depend
on some model parameters but you do not want this to affect the loss
- gradient, you need to apply @{tf.stop_gradient} to `weights` before
+ gradient, you need to apply `tf.stop_gradient` to `weights` before
passing them to `compute_weighted_loss`.
@compatbility(eager)
diff --git a/tensorflow/python/ops/nn_impl.py b/tensorflow/python/ops/nn_impl.py
index f47f38e29e..51f812b395 100644
--- a/tensorflow/python/ops/nn_impl.py
+++ b/tensorflow/python/ops/nn_impl.py
@@ -425,7 +425,7 @@ def depthwise_conv2d(input,
strides: 1-D of size 4. The stride of the sliding window for each
dimension of `input`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
- See the @{tf.nn.convolution$comment here}
+ See the "returns" section of `tf.nn.convolution` for details.
rate: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
@@ -507,7 +507,7 @@ def separable_conv2d(input,
strides: 1-D of size 4. The strides for the depthwise convolution for
each dimension of `input`.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
- See the @{tf.nn.convolution$comment here}
+ See the "returns" section of `tf.nn.convolution` for details.
rate: 1-D of size 2. The dilation rate in which we sample input values
across the `height` and `width` dimensions in atrous convolution. If it is
greater than 1, then all values of strides must be 1.
@@ -1189,7 +1189,7 @@ def nce_loss(weights,
Note: By default this uses a log-uniform (Zipfian) distribution for sampling,
so your labels must be sorted in order of decreasing frequency to achieve
good results. For more details, see
- @{tf.nn.log_uniform_candidate_sampler}.
+ `tf.nn.log_uniform_candidate_sampler`.
Note: In the case where `num_true` > 1, we assign to each target class
the target probability 1 / `num_true` so that the target probabilities
diff --git a/tensorflow/python/ops/nn_ops.py b/tensorflow/python/ops/nn_ops.py
index 5cdb7726a7..8761c1139e 100644
--- a/tensorflow/python/ops/nn_ops.py
+++ b/tensorflow/python/ops/nn_ops.py
@@ -898,8 +898,8 @@ def pool(
```
where the reduction function REDUCE depends on the value of `pooling_type`,
- and pad_before is defined based on the value of `padding` as described in the
- @{tf.nn.convolution$comment here}.
+ and pad_before is defined based on the value of `padding` as described in
+ the "returns" section of `tf.nn.convolution` for details.
The reduction never includes out-of-bounds positions.
In the case that `data_format` starts with `"NC"`, the `input` and output are
@@ -921,7 +921,7 @@ def pool(
window_shape: Sequence of N ints >= 1.
pooling_type: Specifies pooling operation, must be "AVG" or "MAX".
padding: The padding algorithm, must be "SAME" or "VALID".
- See the @{tf.nn.convolution$comment here}
+ See the "returns" section of `tf.nn.convolution` for details.
dilation_rate: Optional. Dilation rate. List of N ints >= 1.
Defaults to [1]*N. If any value of dilation_rate is > 1, then all values
of strides must be 1.
@@ -1045,8 +1045,8 @@ def atrous_conv2d(value, filters, rate, padding, name=None):
"""Atrous convolution (a.k.a. convolution with holes or dilated convolution).
This function is a simpler wrapper around the more general
- @{tf.nn.convolution}, and exists only for backwards compatibility. You can
- use @{tf.nn.convolution} to perform 1-D, 2-D, or 3-D atrous convolution.
+ `tf.nn.convolution`, and exists only for backwards compatibility. You can
+ use `tf.nn.convolution` to perform 1-D, 2-D, or 3-D atrous convolution.
Computes a 2-D atrous convolution, also known as convolution with holes or
@@ -1205,7 +1205,7 @@ def conv2d_transpose(
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
- See the @{tf.nn.convolution$comment here}
+ See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
@@ -1430,7 +1430,7 @@ def conv3d_transpose(
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
- See the @{tf.nn.convolution$comment here}
+ See the "returns" section of `tf.nn.convolution` for details.
data_format: A string, either `'NDHWC'` or `'NCDHW`' specifying the layout
of the input and output tensors. Defaults to `'NDHWC'`.
name: Optional name for the returned tensor.
@@ -1819,7 +1819,7 @@ def softmax_cross_entropy_with_logits_v2(
or `float64`).
Backpropagation will happen into both `logits` and `labels`. To disallow
- backpropagation into `labels`, pass label tensors through @{tf.stop_gradient}
+ backpropagation into `labels`, pass label tensors through `tf.stop_gradient`
before feeding it to this function.
**Note that to avoid confusion, it is required to pass only named arguments to
@@ -1909,7 +1909,7 @@ _XENT_DEPRECATION = """
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
-See @{tf.nn.softmax_cross_entropy_with_logits_v2}.
+See `tf.nn.softmax_cross_entropy_with_logits_v2`.
"""
@@ -1946,7 +1946,7 @@ def softmax_cross_entropy_with_logits(
Backpropagation will happen only into `logits`. To calculate a cross entropy
loss that allows backpropagation into both `logits` and `labels`, see
- @{tf.nn.softmax_cross_entropy_with_logits_v2}.
+ `tf.nn.softmax_cross_entropy_with_logits_v2`.
**Note that to avoid confusion, it is required to pass only named arguments to
this function.**
@@ -2114,7 +2114,7 @@ def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
strides: A list or tuple of 4 ints. The stride of the sliding window for
each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
- See the @{tf.nn.convolution$comment here}
+ See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the operation.
@@ -2143,7 +2143,7 @@ def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
strides: A list or tuple of 4 ints. The stride of the sliding window for
each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
- See the @{tf.nn.convolution$comment here}
+ See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC', 'NCHW' and 'NCHW_VECT_C' are supported.
name: Optional name for the operation.
@@ -2301,7 +2301,7 @@ def dropout(x, keep_prob, noise_shape=None, seed=None, name=None): # pylint: di
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
name: A name for this operation (optional).
@@ -2521,7 +2521,7 @@ def conv1d_transpose(
stride: An `integer`. The number of entries by which
the filter is moved right at each step.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
- See the @{tf.nn.convolution$comment here}
+ See the "returns" section of `tf.nn.convolution` for details.
data_format: A string. 'NHWC' and 'NCHW' are supported.
name: Optional name for the returned tensor.
diff --git a/tensorflow/python/ops/numerics.py b/tensorflow/python/ops/numerics.py
index d348e47f57..8fcbd7d834 100644
--- a/tensorflow/python/ops/numerics.py
+++ b/tensorflow/python/ops/numerics.py
@@ -56,8 +56,8 @@ def add_check_numerics_ops():
`check_numerics` op for all of its (`half`, `float`, or `double`) inputs
is guaranteed to run before the `check_numerics` op on any of its outputs.
- Note: This API is not compatible with the use of @{tf.cond} or
- @{tf.while_loop}, and will raise a `ValueError` if you attempt to call it
+ Note: This API is not compatible with the use of `tf.cond` or
+ `tf.while_loop`, and will raise a `ValueError` if you attempt to call it
in such a graph.
Returns:
diff --git a/tensorflow/python/ops/random_ops.py b/tensorflow/python/ops/random_ops.py
index b8738adf66..4baf506385 100644
--- a/tensorflow/python/ops/random_ops.py
+++ b/tensorflow/python/ops/random_ops.py
@@ -61,7 +61,7 @@ def random_normal(shape,
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
name: A name for the operation (optional).
@@ -110,7 +110,7 @@ def parameterized_truncated_normal(shape,
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
name: A name for the operation (optional).
@@ -158,7 +158,7 @@ def truncated_normal(shape,
dtype: The type of the output.
seed: A Python integer. Used to create a random seed for the distribution.
See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
name: A name for the operation (optional).
@@ -212,7 +212,7 @@ def random_uniform(shape,
dtype: The type of the output: `float16`, `float32`, `float64`, `int32`,
or `int64`.
seed: A Python integer. Used to create a random seed for the distribution.
- See @{tf.set_random_seed}
+ See `tf.set_random_seed`
for behavior.
name: A name for the operation (optional).
@@ -264,7 +264,7 @@ def random_shuffle(value, seed=None, name=None):
value: A Tensor to be shuffled.
seed: A Python integer. Used to create a random seed for the distribution.
See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
name: A name for the operation (optional).
@@ -292,7 +292,7 @@ def random_crop(value, size, seed=None, name=None):
value: Input tensor to crop.
size: 1-D tensor with size the rank of `value`.
seed: Python integer. Used to create a random seed. See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
name: A name for this operation (optional).
@@ -338,7 +338,7 @@ def multinomial(logits, num_samples, seed=None, name=None, output_dtype=None):
num_samples: 0-D. Number of independent samples to draw for each row slice.
seed: A Python integer. Used to create a random seed for the distribution.
See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
name: Optional name for the operation.
output_dtype: integer type to use for the output. Defaults to int64.
@@ -417,7 +417,7 @@ def random_gamma(shape,
`float64`.
seed: A Python integer. Used to create a random seed for the distributions.
See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
name: Optional name for the operation.
@@ -467,7 +467,7 @@ def random_poisson(lam, shape, dtype=dtypes.float32, seed=None, name=None):
`int64`.
seed: A Python integer. Used to create a random seed for the distributions.
See
- @{tf.set_random_seed}
+ `tf.set_random_seed`
for behavior.
name: Optional name for the operation.
diff --git a/tensorflow/python/ops/rnn_cell_impl.py b/tensorflow/python/ops/rnn_cell_impl.py
index 8356fbbb9d..aca20b6787 100644
--- a/tensorflow/python/ops/rnn_cell_impl.py
+++ b/tensorflow/python/ops/rnn_cell_impl.py
@@ -531,7 +531,7 @@ class BasicLSTMCell(LayerRNNCell):
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
- For advanced models, please use the full @{tf.nn.rnn_cell.LSTMCell}
+ For advanced models, please use the full `tf.nn.rnn_cell.LSTMCell`
that follows.
"""
diff --git a/tensorflow/python/ops/script_ops.py b/tensorflow/python/ops/script_ops.py
index af103d3cc7..d11e446dbf 100644
--- a/tensorflow/python/ops/script_ops.py
+++ b/tensorflow/python/ops/script_ops.py
@@ -313,8 +313,8 @@ def eager_py_func(func, inp, Tout, name=None):
in a once-differentiable TensorFlow operation that executes it with eager
exeuction enabled. As a consequence, `tf.contrib.eager.py_func` makes it
possible to express control flow using Python constructs (`if`, `while`,
- `for`, etc.), instead of TensorFlow control flow constructs (@{tf.cond},
- @{tf.while_loop}). For example, you might use `tf.contrib.eager.py_func` to
+ `for`, etc.), instead of TensorFlow control flow constructs (`tf.cond`,
+ `tf.while_loop`). For example, you might use `tf.contrib.eager.py_func` to
implement the log huber function:
```python
@@ -345,15 +345,15 @@ def eager_py_func(func, inp, Tout, name=None):
For more information on eager execution, see @{$guide/eager}.
- `tf.contrib.eager.py_func` is similar in spirit to @{tf.py_func}, but unlike
+ `tf.contrib.eager.py_func` is similar in spirit to `tf.py_func`, but unlike
the latter, the former lets you use TensorFlow operations in the wrapped
- Python function. In particular, while @{tf.py_func} only runs on CPUs and
+ Python function. In particular, while `tf.py_func` only runs on CPUs and
wraps functions that take NumPy arrays as inputs and return NumPy arrays as
outputs, `tf.contrib.eager.py_func` can be placed on GPUs and wraps functions
that take Tensors as inputs, execute TensorFlow operations in their bodies,
and return Tensors as outputs.
- Like @{tf.py_func}, `tf.contrib.eager.py_func` has the following limitations
+ Like `tf.py_func`, `tf.contrib.eager.py_func` has the following limitations
with respect to serialization and distribution:
* The body of the function (i.e. `func`) will not be serialized in a
diff --git a/tensorflow/python/ops/spectral_ops.py b/tensorflow/python/ops/spectral_ops.py
index 293aace728..da5884e746 100644
--- a/tensorflow/python/ops/spectral_ops.py
+++ b/tensorflow/python/ops/spectral_ops.py
@@ -180,9 +180,9 @@ def dct(input, type=2, n=None, axis=-1, norm=None, name=None): # pylint: disabl
"""Computes the 1D [Discrete Cosine Transform (DCT)][dct] of `input`.
Currently only Types II and III are supported. Type II is implemented using a
- length `2N` padded @{tf.spectral.rfft}, as described here:
+ length `2N` padded `tf.spectral.rfft`, as described here:
https://dsp.stackexchange.com/a/10606. Type III is a fairly straightforward
- inverse of Type II (i.e. using a length `2N` padded @{tf.spectral.irfft}).
+ inverse of Type II (i.e. using a length `2N` padded `tf.spectral.irfft`).
@compatibility(scipy)
Equivalent to scipy.fftpack.dct for Type-II and Type-III DCT.
diff --git a/tensorflow/python/ops/state_ops.py b/tensorflow/python/ops/state_ops.py
index 2c93cf72c7..35fc1226ec 100644
--- a/tensorflow/python/ops/state_ops.py
+++ b/tensorflow/python/ops/state_ops.py
@@ -329,7 +329,7 @@ def scatter_nd_update(ref, indices, updates, use_locking=True, name=None):
[1, 11, 3, 10, 9, 6, 7, 12]
- See @{tf.scatter_nd} for more details about how to make updates to
+ See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
@@ -443,7 +443,7 @@ def scatter_nd_add(ref, indices, updates, use_locking=False, name=None):
[1, 13, 3, 14, 14, 6, 7, 20]
- See @{tf.scatter_nd} for more details about how to make updates to
+ See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
diff --git a/tensorflow/python/ops/summary_ops_v2.py b/tensorflow/python/ops/summary_ops_v2.py
index 00150fe688..94c7d88b5c 100644
--- a/tensorflow/python/ops/summary_ops_v2.py
+++ b/tensorflow/python/ops/summary_ops_v2.py
@@ -110,8 +110,8 @@ class SummaryWriter(object):
"""Encapsulates a stateful summary writer resource.
See also:
- - @{tf.contrib.summary.create_file_writer}
- - @{tf.contrib.summary.create_db_writer}
+ - `tf.contrib.summary.create_file_writer`
+ - `tf.contrib.summary.create_db_writer`
"""
def __init__(self, resource, init_op_fn):
@@ -174,22 +174,22 @@ def initialize(
"""Initializes summary writing for graph execution mode.
This helper method provides a higher-level alternative to using
- @{tf.contrib.summary.summary_writer_initializer_op} and
- @{tf.contrib.summary.graph}.
+ `tf.contrib.summary.summary_writer_initializer_op` and
+ `tf.contrib.summary.graph`.
- Most users will also want to call @{tf.train.create_global_step}
+ Most users will also want to call `tf.train.create_global_step`
which can happen before or after this function is called.
Args:
- graph: A @{tf.Graph} or @{tf.GraphDef} to output to the writer.
+ graph: A `tf.Graph` or `tf.GraphDef` to output to the writer.
This function will not write the default graph by default. When
writing to an event log file, the associated step will be zero.
- session: So this method can call @{tf.Session.run}. This defaults
- to @{tf.get_default_session}.
+ session: So this method can call `tf.Session.run`. This defaults
+ to `tf.get_default_session`.
Raises:
RuntimeError: If the current thread has no default
- @{tf.contrib.summary.SummaryWriter}.
+ `tf.contrib.summary.SummaryWriter`.
ValueError: If session wasn't passed and no default session.
"""
if context.executing_eagerly():
@@ -278,10 +278,10 @@ def create_db_writer(db_uri,
Experiment will not be associated with a User. Must be valid as
both a DNS label and Linux username.
name: Shared name for this SummaryWriter resource stored to default
- @{tf.Graph}.
+ `tf.Graph`.
Returns:
- A @{tf.contrib.summary.SummaryWriter} instance.
+ A `tf.contrib.summary.SummaryWriter` instance.
"""
with ops.device("cpu:0"):
if experiment_name is None:
@@ -328,7 +328,7 @@ def _nothing():
def all_summary_ops():
"""Graph-mode only. Returns all summary ops.
- Please note this excludes @{tf.contrib.summary.graph} ops.
+ Please note this excludes `tf.contrib.summary.graph` ops.
Returns:
The summary ops.
@@ -410,20 +410,20 @@ def generic(name, tensor, metadata=None, family=None, step=None):
def scalar(name, tensor, family=None, step=None):
"""Writes a scalar summary if possible.
- Unlike @{tf.contrib.summary.generic} this op may change the dtype
+ Unlike `tf.contrib.summary.generic` this op may change the dtype
depending on the writer, for both practical and efficiency concerns.
Args:
name: An arbitrary name for this summary.
- tensor: A @{tf.Tensor} Must be one of the following types:
+ tensor: A `tf.Tensor` Must be one of the following types:
`float32`, `float64`, `int32`, `int64`, `uint8`, `int16`,
`int8`, `uint16`, `half`, `uint32`, `uint64`.
family: Optional, the summary's family.
step: The `int64` monotonic step variable, which defaults
- to @{tf.train.get_global_step}.
+ to `tf.train.get_global_step`.
Returns:
- The created @{tf.Operation} or a @{tf.no_op} if summary writing has
+ The created `tf.Operation` or a `tf.no_op` if summary writing has
not been enabled for this context.
"""
@@ -494,31 +494,31 @@ def graph(param, step=None, name=None):
"""Writes a TensorFlow graph to the summary interface.
The graph summary is, strictly speaking, not a summary. Conditions
- like @{tf.contrib.summary.never_record_summaries} do not apply. Only
+ like `tf.contrib.summary.never_record_summaries` do not apply. Only
a single graph can be associated with a particular run. If multiple
graphs are written, then only the last one will be considered by
TensorBoard.
When not using eager execution mode, the user should consider passing
- the `graph` parameter to @{tf.contrib.summary.initialize} instead of
+ the `graph` parameter to `tf.contrib.summary.initialize` instead of
calling this function. Otherwise special care needs to be taken when
using the graph to record the graph.
Args:
- param: A @{tf.Tensor} containing a serialized graph proto. When
+ param: A `tf.Tensor` containing a serialized graph proto. When
eager execution is enabled, this function will automatically
- coerce @{tf.Graph}, @{tf.GraphDef}, and string types.
+ coerce `tf.Graph`, `tf.GraphDef`, and string types.
step: The global step variable. This doesn't have useful semantics
for graph summaries, but is used anyway, due to the structure of
event log files. This defaults to the global step.
name: A name for the operation (optional).
Returns:
- The created @{tf.Operation} or a @{tf.no_op} if summary writing has
+ The created `tf.Operation` or a `tf.no_op` if summary writing has
not been enabled for this context.
Raises:
- TypeError: If `param` isn't already a @{tf.Tensor} in graph mode.
+ TypeError: If `param` isn't already a `tf.Tensor` in graph mode.
"""
if not context.executing_eagerly() and not isinstance(param, ops.Tensor):
raise TypeError("graph() needs a tf.Tensor (e.g. tf.placeholder) in graph "
@@ -539,21 +539,21 @@ _graph = graph # for functions with a graph parameter
def import_event(tensor, name=None):
- """Writes a @{tf.Event} binary proto.
+ """Writes a `tf.Event` binary proto.
When using create_db_writer(), this can be used alongside
- @{tf.TFRecordReader} to load event logs into the database. Please
+ `tf.TFRecordReader` to load event logs into the database. Please
note that this is lower level than the other summary functions and
will ignore any conditions set by methods like
- @{tf.contrib.summary.should_record_summaries}.
+ `tf.contrib.summary.should_record_summaries`.
Args:
- tensor: A @{tf.Tensor} of type `string` containing a serialized
- @{tf.Event} proto.
+ tensor: A `tf.Tensor` of type `string` containing a serialized
+ `tf.Event` proto.
name: A name for the operation (optional).
Returns:
- The created @{tf.Operation}.
+ The created `tf.Operation`.
"""
return gen_summary_ops.import_event(
context.context().summary_writer_resource, tensor, name=name)
@@ -565,13 +565,13 @@ def flush(writer=None, name=None):
This operation blocks until that finishes.
Args:
- writer: The @{tf.contrib.summary.SummaryWriter} resource to flush.
+ writer: The `tf.contrib.summary.SummaryWriter` resource to flush.
The thread default will be used if this parameter is None.
- Otherwise a @{tf.no_op} is returned.
+ Otherwise a `tf.no_op` is returned.
name: A name for the operation (optional).
Returns:
- The created @{tf.Operation}.
+ The created `tf.Operation`.
"""
if writer is None:
writer = context.context().summary_writer_resource
@@ -593,7 +593,7 @@ def eval_dir(model_dir, name=None):
def create_summary_file_writer(*args, **kwargs):
- """Please use @{tf.contrib.summary.create_file_writer}."""
+ """Please use `tf.contrib.summary.create_file_writer`."""
logging.warning("Deprecation Warning: create_summary_file_writer was renamed "
"to create_file_writer")
return create_file_writer(*args, **kwargs)
diff --git a/tensorflow/python/ops/template.py b/tensorflow/python/ops/template.py
index da9b64fe34..e7ad261615 100644
--- a/tensorflow/python/ops/template.py
+++ b/tensorflow/python/ops/template.py
@@ -128,7 +128,7 @@ def make_template(name_, func_, create_scope_now_=False, unique_name_=None,
template of the same scope/unique_name already exists and reuse is false,
an error is raised. Defaults to None.
custom_getter_: Optional custom getter for variables used in `func_`. See
- the @{tf.get_variable} `custom_getter` documentation for
+ the `tf.get_variable` `custom_getter` documentation for
more information.
**kwargs: Keyword arguments to apply to `func_`.
@@ -176,7 +176,7 @@ def make_template_internal(name_,
template of the same scope/unique_name already exists and reuse is false,
an error is raised. Defaults to None. If executing eagerly, must be None.
custom_getter_: Optional custom getter for variables used in `func_`. See
- the @{tf.get_variable} `custom_getter` documentation for
+ the `tf.get_variable` `custom_getter` documentation for
more information.
create_graph_function_: When True, `func_` will be executed as a graph
function. This implies that `func_` must satisfy the properties that
diff --git a/tensorflow/python/ops/variable_scope.py b/tensorflow/python/ops/variable_scope.py
index aca44bcd44..c248dd9172 100644
--- a/tensorflow/python/ops/variable_scope.py
+++ b/tensorflow/python/ops/variable_scope.py
@@ -314,13 +314,13 @@ class _VariableStore(object):
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
- @{tf.VariableSynchronization}. By default the synchronization is set to
+ `tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
- @{tf.VariableAggregation}.
+ `tf.VariableAggregation`.
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
@@ -1484,7 +1484,7 @@ Args:
unless validate_shape is False.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
- @{tf.GraphKeys.REGULARIZATION_LOSSES} and can be used for regularization.
+ `tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
@@ -2445,13 +2445,13 @@ def variable_creator_scope(variable_creator):
use_resource: if True, a ResourceVariable is always created.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
- @{tf.VariableSynchronization}. By default the synchronization is set to
+ `tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
- @{tf.VariableAggregation}.
+ `tf.VariableAggregation`.
This set may grow over time, so it's important the signature of creators is as
mentioned above.
diff --git a/tensorflow/python/ops/variables.py b/tensorflow/python/ops/variables.py
index fc00ce68ae..464c1167d9 100644
--- a/tensorflow/python/ops/variables.py
+++ b/tensorflow/python/ops/variables.py
@@ -320,13 +320,13 @@ class Variable(six.with_metaclass(VariableMetaclass,
a resource variable is always created.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
- @{tf.VariableSynchronization}. By default the synchronization is set to
+ `tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
- @{tf.VariableAggregation}.
+ `tf.VariableAggregation`.
Raises:
ValueError: If both `variable_def` and initial_value are specified.
@@ -388,7 +388,7 @@ class Variable(six.with_metaclass(VariableMetaclass,
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
- passed, the default session is used. See @{tf.Session} for more
+ passed, the default session is used. See `tf.Session` for more
information on launching a graph and on sessions.
```python
@@ -551,7 +551,7 @@ class Variable(six.with_metaclass(VariableMetaclass,
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
- passed, the default session is used. See @{tf.Session} for more
+ passed, the default session is used. See `tf.Session` for more
information on launching a graph and on sessions.
```python
@@ -1106,7 +1106,7 @@ class RefVariable(Variable):
def _AsTensor(self): # pylint: disable=invalid-name
"""Converts this variable to a Tensor.
- See @{tf.Variable.value}.
+ See `tf.Variable.value`.
Returns:
A `Tensor` containing the value of the variable.
@@ -1163,7 +1163,7 @@ class RefVariable(Variable):
Returns is a `Tensor` which holds a reference to the variable. You can
assign a new value to the variable by passing the tensor to an assign op.
- See @{tf.Variable.value} if you want to get the value of the
+ See `tf.Variable.value` if you want to get the value of the
variable.
Returns:
@@ -1191,7 +1191,7 @@ class RefVariable(Variable):
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
- passed, the default session is used. See @{tf.Session} for more
+ passed, the default session is used. See `tf.Session` for more
information on launching a graph and on sessions.
```python
@@ -1386,7 +1386,7 @@ class RefVariable(Variable):
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
- passed, the default session is used. See @{tf.Session} for more
+ passed, the default session is used. See `tf.Session` for more
information on launching a graph and on sessions.
```python
@@ -1979,7 +1979,7 @@ def global_variables(scope=None):
This convenience function returns the contents of that collection.
An alternative to global variables are local variables. See
- @{tf.local_variables}
+ `tf.local_variables`
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered
@@ -2032,7 +2032,7 @@ def local_variables(scope=None):
This convenience function returns the contents of that collection.
An alternative to local variables are global variables. See
- @{tf.global_variables}
+ `tf.global_variables`
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered
diff --git a/tensorflow/python/summary/writer/writer.py b/tensorflow/python/summary/writer/writer.py
index 60e96ee947..861a3e920d 100644
--- a/tensorflow/python/summary/writer/writer.py
+++ b/tensorflow/python/summary/writer/writer.py
@@ -104,8 +104,8 @@ class SummaryToEventTransformer(object):
and adds it to the event file.
You can pass the result of evaluating any summary op, using
- @{tf.Session.run} or
- @{tf.Tensor.eval}, to this
+ `tf.Session.run` or
+ `tf.Tensor.eval`, to this
function. Alternatively, you can pass a `tf.Summary` protocol
buffer that you populate with your own data. The latter is
commonly done to report evaluation results in event files.
diff --git a/tensorflow/python/training/distribute.py b/tensorflow/python/training/distribute.py
index 5f7a53e186..581db45e80 100644
--- a/tensorflow/python/training/distribute.py
+++ b/tensorflow/python/training/distribute.py
@@ -864,7 +864,7 @@ class DistributionStrategy(object):
Args:
aggregation: Indicates how a variable will be aggregated. Accepted values
- are @{tf.VariableAggregation.SUM}, @{tf.VariableAggregation.MEAN}.
+ are `tf.VariableAggregation.SUM`, `tf.VariableAggregation.MEAN`.
value: A per-device value with one value per tower.
destinations: An optional mirrored variable, a device string,
list of device strings. The return value will be copied to all
@@ -893,7 +893,7 @@ class DistributionStrategy(object):
Args:
aggregation: Indicates how a variable will be aggregated. Accepted values
- are @{tf.VariableAggregation.SUM}, @{tf.VariableAggregation.MEAN}.
+ are `tf.VariableAggregation.SUM`, `tf.VariableAggregation.MEAN`.
value_destination_pairs: A sequence of (value, destinations)
pairs. See `reduce()` for a description.
diff --git a/tensorflow/python/training/moving_averages.py b/tensorflow/python/training/moving_averages.py
index 60cc54c264..4b91d1e963 100644
--- a/tensorflow/python/training/moving_averages.py
+++ b/tensorflow/python/training/moving_averages.py
@@ -300,7 +300,7 @@ class ExponentialMovingAverage(object):
for a given variable.
* Build a model normally but load the checkpoint files to evaluate by using
the shadow variable names. For this use the `average_name()` method. See
- the @{tf.train.Saver} for more
+ the `tf.train.Saver` for more
information on restoring saved variables.
Example of restoring the shadow variable values:
diff --git a/tensorflow/python/training/quantize_training.i b/tensorflow/python/training/quantize_training.i
index 54d6789616..41e62e0252 100644
--- a/tensorflow/python/training/quantize_training.i
+++ b/tensorflow/python/training/quantize_training.i
@@ -56,7 +56,7 @@ PyObject* DoQuantizeTrainingOnGraphDefHelper(
%insert("python") %{
def do_quantize_training_on_graphdef(input_graph, num_bits):
- """A general quantization scheme is being developed in @{tf.contrib.quantize}.
+ """A general quantization scheme is being developed in `tf.contrib.quantize`.
Consider using that instead, though since it is in the tf.contrib namespace,
it is not subject to backward compatibility guarantees.
diff --git a/tensorflow/python/training/server_lib.py b/tensorflow/python/training/server_lib.py
index 58cf5277fe..46543413e4 100644
--- a/tensorflow/python/training/server_lib.py
+++ b/tensorflow/python/training/server_lib.py
@@ -98,9 +98,9 @@ class Server(object):
"""An in-process TensorFlow server, for use in distributed training.
A `tf.train.Server` instance encapsulates a set of devices and a
- @{tf.Session} target that
+ `tf.Session` target that
can participate in distributed training. A server belongs to a
- cluster (specified by a @{tf.train.ClusterSpec}), and
+ cluster (specified by a `tf.train.ClusterSpec`), and
corresponds to a particular task in a named job. The server can
communicate with any other server in the same cluster.
"""
@@ -186,7 +186,7 @@ class Server(object):
"""Returns the target for a `tf.Session` to connect to this server.
To create a
- @{tf.Session} that
+ `tf.Session` that
connects to this server, use the following snippet:
```python
@@ -230,7 +230,7 @@ class ClusterSpec(object):
A `tf.train.ClusterSpec` represents the set of processes that
participate in a distributed TensorFlow computation. Every
- @{tf.train.Server} is constructed in a particular cluster.
+ `tf.train.Server` is constructed in a particular cluster.
To create a cluster with two jobs and five tasks, you specify the
mapping from job names to lists of network addresses (typically
@@ -421,7 +421,7 @@ class ClusterSpec(object):
NOTE: For backwards compatibility, this method returns a list. If
the given job was defined with a sparse set of task indices, the
length of this list may not reflect the number of tasks defined in
- this job. Use the @{tf.train.ClusterSpec.num_tasks} method
+ this job. Use the `tf.train.ClusterSpec.num_tasks` method
to find the number of tasks defined in a particular job.
Args:
diff --git a/tensorflow/python/training/supervisor.py b/tensorflow/python/training/supervisor.py
index 372ea415df..0755364bbe 100644
--- a/tensorflow/python/training/supervisor.py
+++ b/tensorflow/python/training/supervisor.py
@@ -45,7 +45,7 @@ class Supervisor(object):
"""A training helper that checkpoints models and computes summaries.
This class is deprecated. Please use
- @{tf.train.MonitoredTrainingSession} instead.
+ `tf.train.MonitoredTrainingSession` instead.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of TensorFlow
@@ -134,7 +134,7 @@ class Supervisor(object):
* Specifying `'local'` requests a session that uses the RPC-based
"Master interface" to run TensorFlow programs. See
- @{tf.train.Server.create_local_server} for
+ `tf.train.Server.create_local_server` for
details.
* Specifying `'grpc://hostname:port'` requests a session that uses
diff --git a/tensorflow/python/training/warm_starting_util.py b/tensorflow/python/training/warm_starting_util.py
index b1a7cfab83..0ba7ba983d 100644
--- a/tensorflow/python/training/warm_starting_util.py
+++ b/tensorflow/python/training/warm_starting_util.py
@@ -44,7 +44,7 @@ class VocabInfo(
])):
"""Vocabulary information for warm-starting.
- See @{tf.estimator.WarmStartSettings$WarmStartSettings} for examples of using
+ See `tf.estimator.WarmStartSettings` for examples of using
VocabInfo to warm-start.
Attributes: