diff options
author | A. Unique TensorFlower <gardener@tensorflow.org> | 2018-10-05 12:44:45 -0700 |
---|---|---|
committer | TensorFlower Gardener <gardener@tensorflow.org> | 2018-10-05 12:49:14 -0700 |
commit | ef838969b95de39353a3ba495c335cbb14a0c9b5 (patch) | |
tree | 800857a506c3d3695a7b3da2fd269a9fec85d93b /tensorflow/contrib/distribute | |
parent | 6919ab5787e6384d709adf051dc1ce99236b76bc (diff) |
Brings V2 Optimizers into Keras w/ Keras signatures
PiperOrigin-RevId: 215950207
Diffstat (limited to 'tensorflow/contrib/distribute')
-rw-r--r-- | tensorflow/contrib/distribute/python/combinations.py | 16 | ||||
-rw-r--r-- | tensorflow/contrib/distribute/python/minimize_loss_test.py | 5 |
2 files changed, 8 insertions, 13 deletions
diff --git a/tensorflow/contrib/distribute/python/combinations.py b/tensorflow/contrib/distribute/python/combinations.py index cff4b0a463..63a163e76c 100644 --- a/tensorflow/contrib/distribute/python/combinations.py +++ b/tensorflow/contrib/distribute/python/combinations.py @@ -349,26 +349,26 @@ mirrored_strategy_with_two_gpus = NamedDistribution( required_gpus=2) -adam_optimizer_v1_fn = NamedObject( - "AdamV1", lambda: adam.AdamOptimizer(0.001, epsilon=1)) gradient_descent_optimizer_v1_fn = NamedObject( "GradientDescentV1", lambda: gradient_descent.GradientDescentOptimizer(0.2)) adagrad_optimizer_v1_fn = NamedObject( "AdagradV1", lambda: adagrad.AdagradOptimizer(0.001)) +adam_optimizer_v1_fn = NamedObject("AdamV1", + lambda: adam.AdamOptimizer(0.001, epsilon=1)) rmsprop_optimizer_v1_fn = NamedObject( "RmsPropV1", lambda: rmsprop.RMSPropOptimizer(0.001)) -optimizers_v1 = [adam_optimizer_v1_fn, gradient_descent_optimizer_v1_fn, - adagrad_optimizer_v1_fn] -adam_optimizer_v2_fn = NamedObject( - "AdamV2", lambda: adam_v2.AdamOptimizer(0.001, epsilon=1)) +optimizers_v1 = [gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn] + gradient_descent_optimizer_v2_fn = NamedObject( "GradientDescentV2", lambda: gradient_descent_v2.GradientDescentOptimizer(0.2)) adagrad_optimizer_v2_fn = NamedObject( "AdagradV2", lambda: adagrad_v2.AdagradOptimizer(0.001)) -optimizers_v2 = [adam_optimizer_v2_fn, gradient_descent_optimizer_v2_fn, - adagrad_optimizer_v2_fn] +adam_optimizer_v2_fn = NamedObject( + "AdamV2", lambda: adam_v2.AdamOptimizer(0.001, epsilon=1)) + +optimizers_v2 = [gradient_descent_optimizer_v2_fn, adagrad_optimizer_v2_fn] graph_and_eager_modes = ["graph", "eager"] diff --git a/tensorflow/contrib/distribute/python/minimize_loss_test.py b/tensorflow/contrib/distribute/python/minimize_loss_test.py index ba147e7824..60e134055f 100644 --- a/tensorflow/contrib/distribute/python/minimize_loss_test.py +++ b/tensorflow/contrib/distribute/python/minimize_loss_test.py @@ -179,11 +179,6 @@ class MinimizeLossStepTest(test.TestCase, parameterized.TestCase): def get_expected_variables(optimizer_fn, num_parameter_devices): variables_map = { "GradientDescent": ["dense/kernel", "dense/bias"], - "Adam": [ - "dense/kernel", "dense/bias", "beta1_power", "beta2_power", - "dense/kernel/Adam", "dense/kernel/Adam_1", "dense/bias/Adam", - "dense/bias/Adam_1" - ], "Adagrad": [ "dense/kernel/Adagrad", "dense/kernel", "dense/bias/Adagrad", "dense/bias" |