aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/tpu
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-10-02 18:10:46 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-10-02 18:14:15 -0700
commit05bc6c6762d5a58bacd585e9243133bf0378515f (patch)
treef57bdb78e9cf488cdc04ed94d04e26e507f8ad8b /tensorflow/contrib/tpu
parent9f7a138640408cea58698a432fd1596cf436b484 (diff)
Remove initial accumulator (and other auxiliary parameter) values from
optimization parameter protos and removed uses of that functionality in tests. PiperOrigin-RevId: 215494433
Diffstat (limited to 'tensorflow/contrib/tpu')
-rw-r--r--tensorflow/contrib/tpu/proto/optimization_parameters.proto17
1 files changed, 0 insertions, 17 deletions
diff --git a/tensorflow/contrib/tpu/proto/optimization_parameters.proto b/tensorflow/contrib/tpu/proto/optimization_parameters.proto
index 8529b48c15..b9e0747fa4 100644
--- a/tensorflow/contrib/tpu/proto/optimization_parameters.proto
+++ b/tensorflow/contrib/tpu/proto/optimization_parameters.proto
@@ -28,7 +28,6 @@ message LearningRate {
// https://www.tensorflow.org/api_docs/python/tf/train/AdagradOptimizer
// https://github.com/tensorflow/tensorflow/blob/c19e29306ce1777456b2dbb3a14f511edf7883a8/tensorflow/core/kernels/training_ops.cc#L151
message AdagradParameters {
- float initial_accumulator = 1;
}
// https://www.tensorflow.org/api_docs/python/tf/train/GradientDescentOptimizer
@@ -42,8 +41,6 @@ message FtrlParameters {
float l1 = 1;
float l2 = 2;
float lr_power = 3;
- float initial_accum = 4;
- float initial_linear = 5;
}
// The Adam optimizer does not implement hyper-parameter update; use the dynamic
@@ -70,8 +67,6 @@ message AdamParameters {
float beta1 = 3;
float beta2 = 4;
float epsilon = 5;
- float initial_m = 6;
- float initial_v = 7;
bool use_non_lazy_adam = 8;
bool use_max_with_epsilon = 9;
}
@@ -81,7 +76,6 @@ message AdamParameters {
message MomentumParameters {
float momentum = 1;
bool use_nesterov = 2;
- float initial_accum = 3;
}
// https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer
@@ -90,8 +84,6 @@ message RmsPropParameters {
float rho = 1;
float momentum = 2;
float epsilon = 3;
- float initial_ms = 4;
- float initial_mom = 5;
}
// https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer
@@ -100,9 +92,6 @@ message CenteredRmsPropParameters {
float rho = 1;
float momentum = 2;
float epsilon = 3;
- float initial_ms = 4;
- float initial_mom = 5;
- float initial_mg = 6;
}
// Variant of algorithm in http://proceedings.mlr.press/v44/shamir15.pdf
@@ -119,9 +108,6 @@ message MdlAdagradLightParameters {
float mdl_hard_limit = 10;
bool hard_limit_min_benefit = 11;
bool mdl_regularize = 12;
- float initial_accumulator = 13;
- float initial_weight = 14;
- float initial_benefit = 15;
}
// https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer
@@ -129,8 +115,6 @@ message MdlAdagradLightParameters {
message AdadeltaParameters {
float rho = 1;
float epsilon = 2;
- float initial_accumulator = 3;
- float initial_update = 4;
}
// https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer
@@ -138,7 +122,6 @@ message AdadeltaParameters {
message ProximalAdagradParameters {
float l1 = 1;
float l2 = 2;
- float initial_accumulator = 3;
}
message OptimizationParameters {