From f40a875355557483aeae60ffcf757fc9626c752b Mon Sep 17 00:00:00 2001 From: Mark Daoust Date: Thu, 9 Aug 2018 07:03:39 -0700 Subject: Remove usage of magic-api-link syntax from source files. Back-ticks are now converted to links in the api_docs generator. With the new docs repo we're moving to simplify the docs pipeline, and make everything more readable. By doing this we no longer get test failures for symbols that don't exist (`tf.does_not_exist` will not get a link). There is also no way, not to set custom link text. That's okay. This is the result of the following regex replacement (+ a couple of manual edits.): re: @\{([^$].*?)(\$.+?)?} sub: `\1` Which does the following replacements: "@{tf.symbol}" --> "`tf.symbol`" "@{tf.symbol$link_text}" --> "`tf.symbol`" PiperOrigin-RevId: 208042358 --- tensorflow/contrib/mixed_precision/python/loss_scale_manager.py | 4 ++-- tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'tensorflow/contrib/mixed_precision') diff --git a/tensorflow/contrib/mixed_precision/python/loss_scale_manager.py b/tensorflow/contrib/mixed_precision/python/loss_scale_manager.py index be7377b151..eba505881f 100644 --- a/tensorflow/contrib/mixed_precision/python/loss_scale_manager.py +++ b/tensorflow/contrib/mixed_precision/python/loss_scale_manager.py @@ -41,12 +41,12 @@ class LossScaleManager(object): applied on variables. This class is used together with - @{tf.contrib.mixed_precision.LossScaleOptimizer} for mixed precision training + `tf.contrib.mixed_precision.LossScaleOptimizer` for mixed precision training (float32 variables and float16 ops) on Nvidia GPUs in order to achieve the same model quality as single precision training, with the benefits of potential higher throughput. - See @{tf.contrib.mixed_precision.LossScaleOptimizer} for more details. + See `tf.contrib.mixed_precision.LossScaleOptimizer` for more details. """ @abc.abstractmethod diff --git a/tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py b/tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py index 93050a3ae3..fcce52a07a 100644 --- a/tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py +++ b/tensorflow/contrib/mixed_precision/python/loss_scale_optimizer.py @@ -103,7 +103,7 @@ class LossScaleOptimizer(optimizer.Optimizer): Args: opt: The actual optimizer that will be used to compute and apply the - gradients. Must be an implementation of the @{tf.train.Optimizer} + gradients. Must be an implementation of the `tf.train.Optimizer` interface. loss_scale_manager: A LossScaleManager object. """ @@ -117,7 +117,7 @@ class LossScaleOptimizer(optimizer.Optimizer): aggregation_method=None, colocate_gradients_with_ops=False, grad_loss=None): - """Compute gradients. See base class @{tf.train.Optimizer}.""" + """Compute gradients. See base class `tf.train.Optimizer`.""" loss_scale = self._loss_scale_manager.get_loss_scale() if context.executing_eagerly(): @@ -141,7 +141,7 @@ class LossScaleOptimizer(optimizer.Optimizer): return self._down_scale(grads_and_vars, loss_scale) def apply_gradients(self, grads_and_vars, global_step=None, name=None): - """Apply gradients. See base class @{tf.train.Optimizer}.""" + """Apply gradients. See base class `tf.train.Optimizer`.""" grads = [g for (g, _) in grads_and_vars] is_finite_grad = [] -- cgit v1.2.3