aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar Vahid Kazemi <vahid@google.com>2016-09-17 20:55:13 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-09-17 22:03:12 -0700
commit139fefbe5edc203ed9a52847636d58026fe1bfd5 (patch)
tree0af5207393cda77def2246e493ec39575f07cdec
parentc05dbaefe82ed1f1141ecc7edad530402e77acc5 (diff)
Remove deprecated argument moving_average_decay.
Change: 133501226
-rw-r--r--tensorflow/contrib/layers/python/layers/optimizers.py16
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.layers.optimize_loss.md4
2 files changed, 1 insertions, 19 deletions
diff --git a/tensorflow/contrib/layers/python/layers/optimizers.py b/tensorflow/contrib/layers/python/layers/optimizers.py
index 21fe8c7341..18c1f313ce 100644
--- a/tensorflow/contrib/layers/python/layers/optimizers.py
+++ b/tensorflow/contrib/layers/python/layers/optimizers.py
@@ -30,7 +30,6 @@ from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as vars_
-from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer as optimizer_
from tensorflow.python.training import training as train
@@ -58,7 +57,6 @@ def optimize_loss(loss,
gradient_noise_scale=None,
gradient_multipliers=None,
clip_gradients=None,
- moving_average_decay=None,
learning_rate_decay_fn=None,
update_ops=None,
variables=None,
@@ -99,8 +97,6 @@ def optimize_loss(loss,
If present, gradients for specified
variables will be multiplied by given constant.
clip_gradients: float or `None`, clips gradients by this value.
- moving_average_decay: Deprecated. float or None, takes into account previous
- loss to make learning smoother due to outliers.
learning_rate_decay_fn: function, takes `learning_rate` and `global_step`
`Tensor`s, returns `Tensor`.
Can be used to implement any learning rate decay
@@ -130,18 +126,6 @@ def optimize_loss(loss,
if update_ops:
loss = control_flow_ops.with_dependencies(list(update_ops), loss)
- # Moving average of the loss with decay.
- # TODO(b/30439864): moving_average_decay should be removed.
- if moving_average_decay is not None:
- logging.warn("'moving_average_decay' is deprecated. Please use "
- "tensorboard's builtin averaging instead.")
- # Generate moving averages of the loss.
- loss_averages = train.ExponentialMovingAverage(moving_average_decay,
- name="avg")
- loss_averages_op = loss_averages.apply([loss])
- logging_ops.scalar_summary("loss/mean", loss_averages.average(loss))
- loss = control_flow_ops.with_dependencies([loss_averages_op], loss)
-
# Learning rate variable, with possible decay.
lr = None
if learning_rate is not None:
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.layers.optimize_loss.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.layers.optimize_loss.md
index 3213b3e8ff..c4ca2727e0 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.layers.optimize_loss.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard2/tf.contrib.layers.optimize_loss.md
@@ -1,4 +1,4 @@
-### `tf.contrib.layers.optimize_loss(loss, global_step, learning_rate, optimizer, gradient_noise_scale=None, gradient_multipliers=None, clip_gradients=None, moving_average_decay=None, learning_rate_decay_fn=None, update_ops=None, variables=None, name=None, summaries=None)` {#optimize_loss}
+### `tf.contrib.layers.optimize_loss(loss, global_step, learning_rate, optimizer, gradient_noise_scale=None, gradient_multipliers=None, clip_gradients=None, learning_rate_decay_fn=None, update_ops=None, variables=None, name=None, summaries=None)` {#optimize_loss}
Given loss and parameters for optimizer, returns a training op.
@@ -37,8 +37,6 @@ Various ways of passing optimizers, include:
If present, gradients for specified
variables will be multiplied by given constant.
* <b>`clip_gradients`</b>: float or `None`, clips gradients by this value.
-* <b>`moving_average_decay`</b>: Deprecated. float or None, takes into account previous
- loss to make learning smoother due to outliers.
* <b>`learning_rate_decay_fn`</b>: function, takes `learning_rate` and `global_step`
`Tensor`s, returns `Tensor`.
Can be used to implement any learning rate decay