aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/mixed_precision
diff options
context:
space:
mode:
authorGravatar Mark Daoust <markdaoust@google.com>2018-08-09 07:03:39 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-08-09 07:08:30 -0700
commitf40a875355557483aeae60ffcf757fc9626c752b (patch)
tree7f642a6fd12495c1c7d9b2f3a37e376d8ee6d2c9 /tensorflow/contrib/mixed_precision
parentfd9fc4b4b69f7fce60497bbaf5cbd958f12ead8d (diff)
Remove usage of magic-api-link syntax from source files.
Back-ticks are now converted to links in the api_docs generator. With the new docs repo we're moving to simplify the docs pipeline, and make everything more readable. By doing this we no longer get test failures for symbols that don't exist (`tf.does_not_exist` will not get a link). There is also no way, not to set custom link text. That's okay. This is the result of the following regex replacement (+ a couple of manual edits.): re: @\{([^$].*?)(\$.+?)?} sub: `\1` Which does the following replacements: "@{tf.symbol}" --> "`tf.symbol`" "@{tf.symbol$link_text}" --> "`tf.symbol`" PiperOrigin-RevId: 208042358
Diffstat (limited to 'tensorflow/contrib/mixed_precision')
-rw-r--r--tensorflow/contrib/mixed_precision/python/loss_scale_manager.py4
-rw-r--r--tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py6
2 files changed, 5 insertions, 5 deletions
diff --git a/tensorflow/contrib/mixed_precision/python/loss_scale_manager.py b/tensorflow/contrib/mixed_precision/python/loss_scale_manager.py
index be7377b151..eba505881f 100644
--- a/tensorflow/contrib/mixed_precision/python/loss_scale_manager.py
+++ b/tensorflow/contrib/mixed_precision/python/loss_scale_manager.py
@@ -41,12 +41,12 @@ class LossScaleManager(object):
applied on variables.
This class is used together with
- @{tf.contrib.mixed_precision.LossScaleOptimizer} for mixed precision training
+ `tf.contrib.mixed_precision.LossScaleOptimizer` for mixed precision training
(float32 variables and float16 ops) on Nvidia GPUs in order to achieve the
same model quality as single precision training, with the benefits of
potential higher throughput.
- See @{tf.contrib.mixed_precision.LossScaleOptimizer} for more details.
+ See `tf.contrib.mixed_precision.LossScaleOptimizer` for more details.
"""
@abc.abstractmethod
diff --git a/tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py b/tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py
index 93050a3ae3..fcce52a07a 100644
--- a/tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py
+++ b/tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py
@@ -103,7 +103,7 @@ class LossScaleOptimizer(optimizer.Optimizer):
Args:
opt: The actual optimizer that will be used to compute and apply the
- gradients. Must be an implementation of the @{tf.train.Optimizer}
+ gradients. Must be an implementation of the `tf.train.Optimizer`
interface.
loss_scale_manager: A LossScaleManager object.
"""
@@ -117,7 +117,7 @@ class LossScaleOptimizer(optimizer.Optimizer):
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
- """Compute gradients. See base class @{tf.train.Optimizer}."""
+ """Compute gradients. See base class `tf.train.Optimizer`."""
loss_scale = self._loss_scale_manager.get_loss_scale()
if context.executing_eagerly():
@@ -141,7 +141,7 @@ class LossScaleOptimizer(optimizer.Optimizer):
return self._down_scale(grads_and_vars, loss_scale)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
- """Apply gradients. See base class @{tf.train.Optimizer}."""
+ """Apply gradients. See base class `tf.train.Optimizer`."""
grads = [g for (g, _) in grads_and_vars]
is_finite_grad = []