aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-07-13 13:09:46 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-07-13 13:13:54 -0700
commitbb7541b96c49b06b5c13775f3666ae2b8450a457 (patch)
treec62b1c22878eb0166a370da3dd1d9ec338415040
parent63e6b9bf43049472b33393df74de271b6aa33863 (diff)
Automated rollback of commit 57527f7e47e3e67966b432065f510a601a4d8647
PiperOrigin-RevId: 204516578
-rw-r--r--tensorflow/python/keras/callbacks.py28
-rw-r--r--tensorflow/python/keras/callbacks_test.py76
2 files changed, 4 insertions, 100 deletions
diff --git a/tensorflow/python/keras/callbacks.py b/tensorflow/python/keras/callbacks.py
index 5d66db232a..53d907a2cc 100644
--- a/tensorflow/python/keras/callbacks.py
+++ b/tensorflow/python/keras/callbacks.py
@@ -32,10 +32,8 @@ import numpy as np
import six
from tensorflow.python.keras import backend as K
-from tensorflow.python.keras import optimizers
from tensorflow.python.keras.utils.generic_utils import Progbar
from tensorflow.python.ops import array_ops
-from tensorflow.python.ops.resource_variable_ops import ResourceVariable as Variable
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary as tf_summary
from tensorflow.python.util.tf_export import tf_export
@@ -644,35 +642,17 @@ class LearningRateScheduler(Callback):
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
- # TODO(yashkatariya): Change the property checking when the learning
- # rate attribute is unified across all TF Optimizers.
- if isinstance(self.model.optimizer, optimizers.TFOptimizer):
- if not hasattr(self.model.optimizer.optimizer, '_lr') and not hasattr(
- self.model.optimizer.optimizer, '_learning_rate'):
- raise ValueError(
- 'TF Optimizer must have a "_lr" or "_learning_rate" attribute.')
- else:
- opt = self.model.optimizer.optimizer
- if hasattr(opt, '_lr'):
- opt_lr = Variable(opt._lr) # pylint: disable=protected-access
- elif hasattr(opt, '_learning_rate'):
- opt_lr = Variable(opt._learning_rate) # pylint: disable=protected-access
- else:
- if not hasattr(self.model.optimizer, 'lr'):
- raise ValueError('Optimizer must have a "lr" attribute.')
- else:
- opt = self.model.optimizer
- opt_lr = opt.lr
-
+ if not hasattr(self.model.optimizer, 'lr'):
+ raise ValueError('Optimizer must have a "lr" attribute.')
try: # new API
- lr = float(K.get_value(opt_lr))
+ lr = float(K.get_value(self.model.optimizer.lr))
lr = self.schedule(epoch, lr)
except TypeError: # Support for old API for backward compatibility
lr = self.schedule(epoch)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
- K.set_value(opt_lr, lr)
+ K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nEpoch %05d: LearningRateScheduler reducing learning '
'rate to %s.' % (epoch + 1, lr))
diff --git a/tensorflow/python/keras/callbacks_test.py b/tensorflow/python/keras/callbacks_test.py
index 244d48591c..45598cafd3 100644
--- a/tensorflow/python/keras/callbacks_test.py
+++ b/tensorflow/python/keras/callbacks_test.py
@@ -29,16 +29,10 @@ import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
-from tensorflow.python.eager import context
-from tensorflow.python.framework import test_util
from tensorflow.python.keras import testing_utils
-from tensorflow.python.ops.resource_variable_ops import ResourceVariable as Variable
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary.writer import writer_cache
-from tensorflow.python.training.adam import AdamOptimizer
-from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
-
try:
import h5py # pylint:disable=g-import-not-at-top
@@ -376,76 +370,6 @@ class KerasCallbacksTest(test.TestCase):
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
- @test_util.run_in_graph_and_eager_modes
- def test_TF_LearningRateScheduler_Adam(self):
- with self.test_session():
- with context.eager_mode():
- np.random.seed(1337)
- (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
- train_samples=TRAIN_SAMPLES,
- test_samples=TEST_SAMPLES,
- input_shape=(INPUT_DIM,),
- num_classes=NUM_CLASSES)
- y_test = keras.utils.to_categorical(y_test)
- y_train = keras.utils.to_categorical(y_train)
- model = keras.models.Sequential()
- model.add(
- keras.layers.Dense(
- NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
- model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
- model.compile(
- loss='categorical_crossentropy',
- optimizer=AdamOptimizer(),
- metrics=['accuracy'])
- cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
- model.fit(
- x_train,
- y_train,
- batch_size=BATCH_SIZE,
- validation_data=(x_test, y_test),
- callbacks=cbks,
- epochs=5,
- verbose=0)
- opt_lr = model.optimizer.optimizer._lr
- self.assertLess(
- float(keras.backend.get_value(
- Variable(opt_lr))) - 0.2, keras.backend.epsilon())
-
- @test_util.run_in_graph_and_eager_modes
- def test_TF_LearningRateScheduler_GradientDescent(self):
- with self.test_session():
- with context.eager_mode():
- np.random.seed(1337)
- (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
- train_samples=TRAIN_SAMPLES,
- test_samples=TEST_SAMPLES,
- input_shape=(INPUT_DIM,),
- num_classes=NUM_CLASSES)
- y_test = keras.utils.to_categorical(y_test)
- y_train = keras.utils.to_categorical(y_train)
- model = keras.models.Sequential()
- model.add(
- keras.layers.Dense(
- NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
- model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
- model.compile(
- loss='categorical_crossentropy',
- optimizer=GradientDescentOptimizer(1e-3),
- metrics=['accuracy'])
- cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
- model.fit(
- x_train,
- y_train,
- batch_size=BATCH_SIZE,
- validation_data=(x_test, y_test),
- callbacks=cbks,
- epochs=5,
- verbose=0)
- opt_lr = model.optimizer.optimizer._learning_rate
- self.assertLess(
- float(keras.backend.get_value(
- Variable(opt_lr))) - 0.2, keras.backend.epsilon())
-
def test_ReduceLROnPlateau(self):
with self.test_session():
np.random.seed(1337)