aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2016-12-13 09:54:24 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2016-12-13 10:04:25 -0800
commitc0076def01dae3c1774b4db156c0723b728704e4 (patch)
tree48549a3d8271bce011eb6026cfedd8f898c6232b
parent04f073a8c116a0aff2b5e01f7b26160d238fcde6 (diff)
Rename SyncReplicasOptimizerV2 to SyncReplicasOptimizer
Change: 141904790
-rw-r--r--tensorflow/contrib/slim/python/slim/learning.py8
-rw-r--r--tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.train.SyncReplicasOptimizerV2.md18
-rw-r--r--tensorflow/g3doc/api_docs/python/index.md2
-rw-r--r--tensorflow/g3doc/api_docs/python/train.md20
-rw-r--r--tensorflow/python/training/sync_replicas_optimizer_test.py4
-rw-r--r--tensorflow/python/training/training.py4
-rw-r--r--tensorflow/tools/dist_test/python/mnist_replica.py2
7 files changed, 29 insertions, 29 deletions
diff --git a/tensorflow/contrib/slim/python/slim/learning.py b/tensorflow/contrib/slim/python/slim/learning.py
index ca131cfb7c..fc2a6d0fe6 100644
--- a/tensorflow/contrib/slim/python/slim/learning.py
+++ b/tensorflow/contrib/slim/python/slim/learning.py
@@ -699,7 +699,7 @@ def train(train_op,
data_flow_ops.initialize_all_tables())
if sync_optimizer is not None and isinstance(
- sync_optimizer, sync_replicas_optimizer.SyncReplicasOptimizerV2):
+ sync_optimizer, sync_replicas_optimizer.SyncReplicasOptimizer):
with ops.control_dependencies([local_init_op] if local_init_op is
not None else []):
if is_chief:
@@ -720,11 +720,9 @@ def train(train_op,
if is_chief and sync_optimizer is not None:
if not isinstance(sync_optimizer,
- (sync_replicas_optimizer.SyncReplicasOptimizer,
- sync_replicas_optimizer.SyncReplicasOptimizerV2)):
+ (sync_replicas_optimizer.SyncReplicasOptimizer)):
raise ValueError(
- '`sync_optimizer` must be a tf.train.SyncReplicasOptimizer or '
- 'tf.train.SyncReplicasOptimizerV2.')
+ '`sync_optimizer` must be a tf.train.SyncReplicasOptimizer.')
# Need to create these BEFORE the supervisor finalizes the graph:
init_tokens_op = sync_optimizer.get_init_tokens_op()
diff --git a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.train.SyncReplicasOptimizerV2.md b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.train.SyncReplicasOptimizerV2.md
index 10660f6bcf..d8ff888955 100644
--- a/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.train.SyncReplicasOptimizerV2.md
+++ b/tensorflow/g3doc/api_docs/python/functions_and_classes/shard9/tf.train.SyncReplicasOptimizerV2.md
@@ -58,8 +58,8 @@ opt = GradientDescentOptimizer(learning_rate=0.1)
# Note that if you want to have 2 backup replicas, you can change
# total_num_replicas=52 and make sure this number matches how many physical
# replicas you started in your job.
-opt = tf.SyncReplicasOptimizerV2(opt, replicas_to_aggregate=50,
- total_num_replicas=50)
+opt = tf.SyncReplicasOptimizer(opt, replicas_to_aggregate=50,
+ total_num_replicas=50)
# Some models have startup_delays to help stabilize the model but when using
# sync_replicas training, set it to 0.
@@ -107,7 +107,7 @@ if is_chief and FLAGS.sync_replicas:
- - -
-#### `tf.train.SyncReplicasOptimizerV2.__init__(opt, replicas_to_aggregate, total_num_replicas=None, variable_averages=None, variables_to_average=None, use_locking=False, name='sync_replicas')` {#SyncReplicasOptimizerV2.__init__}
+#### `tf.train.SyncReplicasOptimizer.__init__(opt, replicas_to_aggregate, total_num_replicas=None, variable_averages=None, variables_to_average=None, use_locking=False, name='sync_replicas')` {#SyncReplicasOptimizer.__init__}
Construct a sync_replicas optimizer.
@@ -135,7 +135,7 @@ Construct a sync_replicas optimizer.
- - -
-#### `tf.train.SyncReplicasOptimizerV2.compute_gradients(*args, **kwargs)` {#SyncReplicasOptimizerV2.compute_gradients}
+#### `tf.train.SyncReplicasOptimizer.compute_gradients(*args, **kwargs)` {#SyncReplicasOptimizer.compute_gradients}
Compute gradients of "loss" for the variables in "var_list".
@@ -158,7 +158,7 @@ gradients can hurt the gradients from other replicas.
- - -
-#### `tf.train.SyncReplicasOptimizerV2.apply_gradients(grads_and_vars, global_step=None, name=None)` {#SyncReplicasOptimizerV2.apply_gradients}
+#### `tf.train.SyncReplicasOptimizer.apply_gradients(grads_and_vars, global_step=None, name=None)` {#SyncReplicasOptimizer.apply_gradients}
Apply gradients to variables.
@@ -191,7 +191,7 @@ apply_gradients() from the real optimizer.
- - -
-#### `tf.train.SyncReplicasOptimizerV2.get_chief_queue_runner()` {#SyncReplicasOptimizerV2.get_chief_queue_runner}
+#### `tf.train.SyncReplicasOptimizer.get_chief_queue_runner()` {#SyncReplicasOptimizer.get_chief_queue_runner}
Returns the QueueRunner for the chief to execute.
@@ -213,7 +213,7 @@ actually generates this queuerunner.
- - -
-#### `tf.train.SyncReplicasOptimizerV2.get_init_tokens_op(num_tokens=-1)` {#SyncReplicasOptimizerV2.get_init_tokens_op}
+#### `tf.train.SyncReplicasOptimizer.get_init_tokens_op(num_tokens=-1)` {#SyncReplicasOptimizer.get_init_tokens_op}
Returns the op to fill the sync_token_queue with the tokens.
@@ -244,7 +244,7 @@ variable update. Make sure:
#### Other Methods
- - -
-#### `tf.train.SyncReplicasOptimizerV2.get_slot(*args, **kwargs)` {#SyncReplicasOptimizerV2.get_slot}
+#### `tf.train.SyncReplicasOptimizer.get_slot(*args, **kwargs)` {#SyncReplicasOptimizer.get_slot}
Return a slot named "name" created for "var" by the Optimizer.
@@ -263,7 +263,7 @@ This simply wraps the get_slot() from the actual optimizer.
- - -
-#### `tf.train.SyncReplicasOptimizerV2.get_slot_names(*args, **kwargs)` {#SyncReplicasOptimizerV2.get_slot_names}
+#### `tf.train.SyncReplicasOptimizer.get_slot_names(*args, **kwargs)` {#SyncReplicasOptimizer.get_slot_names}
Return a list of the names of slots created by the `Optimizer`.
diff --git a/tensorflow/g3doc/api_docs/python/index.md b/tensorflow/g3doc/api_docs/python/index.md
index a474460723..5065da8be5 100644
--- a/tensorflow/g3doc/api_docs/python/index.md
+++ b/tensorflow/g3doc/api_docs/python/index.md
@@ -660,7 +660,7 @@
* [`summary_iterator`](../../api_docs/python/train.md#summary_iterator)
* [`SummarySaverHook`](../../api_docs/python/train.md#SummarySaverHook)
* [`Supervisor`](../../api_docs/python/train.md#Supervisor)
- * [`SyncReplicasOptimizerV2`](../../api_docs/python/train.md#SyncReplicasOptimizerV2)
+ * [`SyncReplicasOptimizer`](../../api_docs/python/train.md#SyncReplicasOptimizer)
* [`WorkerSessionCreator`](../../api_docs/python/train.md#WorkerSessionCreator)
* [`write_graph`](../../api_docs/python/train.md#write_graph)
diff --git a/tensorflow/g3doc/api_docs/python/train.md b/tensorflow/g3doc/api_docs/python/train.md
index 88ae6e3f26..19e6116e08 100644
--- a/tensorflow/g3doc/api_docs/python/train.md
+++ b/tensorflow/g3doc/api_docs/python/train.md
@@ -5350,7 +5350,7 @@ Called when the thread stops.
## Other Functions and Classes
- - -
-### `class tf.train.SyncReplicasOptimizerV2` {#SyncReplicasOptimizerV2}
+### `class tf.train.SyncReplicasOptimizer` {#SyncReplicasOptimizer}
Class to synchronize, aggregate gradients and pass them to the optimizer.
@@ -5412,8 +5412,8 @@ opt = GradientDescentOptimizer(learning_rate=0.1)
# Note that if you want to have 2 backup replicas, you can change
# total_num_replicas=52 and make sure this number matches how many physical
# replicas you started in your job.
-opt = tf.SyncReplicasOptimizerV2(opt, replicas_to_aggregate=50,
- total_num_replicas=50)
+opt = tf.SyncReplicasOptimizer(opt, replicas_to_aggregate=50,
+ total_num_replicas=50)
# Some models have startup_delays to help stabilize the model but when using
# sync_replicas training, set it to 0.
@@ -5461,7 +5461,7 @@ if is_chief and FLAGS.sync_replicas:
- - -
-#### `tf.train.SyncReplicasOptimizerV2.__init__(opt, replicas_to_aggregate, total_num_replicas=None, variable_averages=None, variables_to_average=None, use_locking=False, name='sync_replicas')` {#SyncReplicasOptimizerV2.__init__}
+#### `tf.train.SyncReplicasOptimizer.__init__(opt, replicas_to_aggregate, total_num_replicas=None, variable_averages=None, variables_to_average=None, use_locking=False, name='sync_replicas')` {#SyncReplicasOptimizer.__init__}
Construct a sync_replicas optimizer.
@@ -5489,7 +5489,7 @@ Construct a sync_replicas optimizer.
- - -
-#### `tf.train.SyncReplicasOptimizerV2.compute_gradients(*args, **kwargs)` {#SyncReplicasOptimizerV2.compute_gradients}
+#### `tf.train.SyncReplicasOptimizer.compute_gradients(*args, **kwargs)` {#SyncReplicasOptimizer.compute_gradients}
Compute gradients of "loss" for the variables in "var_list".
@@ -5512,7 +5512,7 @@ gradients can hurt the gradients from other replicas.
- - -
-#### `tf.train.SyncReplicasOptimizerV2.apply_gradients(grads_and_vars, global_step=None, name=None)` {#SyncReplicasOptimizerV2.apply_gradients}
+#### `tf.train.SyncReplicasOptimizer.apply_gradients(grads_and_vars, global_step=None, name=None)` {#SyncReplicasOptimizer.apply_gradients}
Apply gradients to variables.
@@ -5545,7 +5545,7 @@ apply_gradients() from the real optimizer.
- - -
-#### `tf.train.SyncReplicasOptimizerV2.get_chief_queue_runner()` {#SyncReplicasOptimizerV2.get_chief_queue_runner}
+#### `tf.train.SyncReplicasOptimizer.get_chief_queue_runner()` {#SyncReplicasOptimizer.get_chief_queue_runner}
Returns the QueueRunner for the chief to execute.
@@ -5567,7 +5567,7 @@ actually generates this queuerunner.
- - -
-#### `tf.train.SyncReplicasOptimizerV2.get_init_tokens_op(num_tokens=-1)` {#SyncReplicasOptimizerV2.get_init_tokens_op}
+#### `tf.train.SyncReplicasOptimizer.get_init_tokens_op(num_tokens=-1)` {#SyncReplicasOptimizer.get_init_tokens_op}
Returns the op to fill the sync_token_queue with the tokens.
@@ -5598,7 +5598,7 @@ variable update. Make sure:
#### Other Methods
- - -
-#### `tf.train.SyncReplicasOptimizerV2.get_slot(*args, **kwargs)` {#SyncReplicasOptimizerV2.get_slot}
+#### `tf.train.SyncReplicasOptimizer.get_slot(*args, **kwargs)` {#SyncReplicasOptimizer.get_slot}
Return a slot named "name" created for "var" by the Optimizer.
@@ -5617,7 +5617,7 @@ This simply wraps the get_slot() from the actual optimizer.
- - -
-#### `tf.train.SyncReplicasOptimizerV2.get_slot_names(*args, **kwargs)` {#SyncReplicasOptimizerV2.get_slot_names}
+#### `tf.train.SyncReplicasOptimizer.get_slot_names(*args, **kwargs)` {#SyncReplicasOptimizer.get_slot_names}
Return a list of the names of slots created by the `Optimizer`.
diff --git a/tensorflow/python/training/sync_replicas_optimizer_test.py b/tensorflow/python/training/sync_replicas_optimizer_test.py
index b9e612bb71..4ffe3a3ee9 100644
--- a/tensorflow/python/training/sync_replicas_optimizer_test.py
+++ b/tensorflow/python/training/sync_replicas_optimizer_test.py
@@ -70,7 +70,7 @@ def get_workers(num_workers, replicas_to_aggregate, workers):
tf.constant([1]),
tf.constant([2, 1]))
sgd_opt = tf.train.GradientDescentOptimizer(2.0)
- sync_rep_opt = tf.train.SyncReplicasOptimizerV2(
+ sync_rep_opt = tf.train.SyncReplicasOptimizer(
sgd_opt, replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=num_workers)
train_op = [sync_rep_opt.apply_gradients(
@@ -111,7 +111,7 @@ def get_workers(num_workers, replicas_to_aggregate, workers):
return sessions, graphs, train_ops
-class SyncReplicasOptimizerV2Test(tf.test.TestCase):
+class SyncReplicasOptimizerTest(tf.test.TestCase):
def _run(self, train_op, sess):
sess.run(train_op)
diff --git a/tensorflow/python/training/training.py b/tensorflow/python/training/training.py
index b84d248fd3..63063ab361 100644
--- a/tensorflow/python/training/training.py
+++ b/tensorflow/python/training/training.py
@@ -167,7 +167,9 @@ from tensorflow.python.training.optimizer import Optimizer
from tensorflow.python.training.rmsprop import RMSPropOptimizer
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
from tensorflow.python.training.proximal_gradient_descent import ProximalGradientDescentOptimizer
+# TODO(xpan): Remove SyncReplicasOptimizerV2 once all users are migrated.
from tensorflow.python.training.sync_replicas_optimizer import SyncReplicasOptimizerV2
+from tensorflow.python.training.sync_replicas_optimizer import SyncReplicasOptimizerV2 as SyncReplicasOptimizer
# Utility classes for training.
from tensorflow.python.training.coordinator import Coordinator
@@ -255,7 +257,7 @@ _allowed_symbols = [
# TODO(drpng): document these. The reference in howtos/distributed does
# not link.
"SyncReplicasOptimizerV2",
-
+ "SyncReplicasOptimizer",
# Protobufs:
"BytesList", # from example_pb2.
"ClusterDef",
diff --git a/tensorflow/tools/dist_test/python/mnist_replica.py b/tensorflow/tools/dist_test/python/mnist_replica.py
index fb51705fe0..7e68258b0a 100644
--- a/tensorflow/tools/dist_test/python/mnist_replica.py
+++ b/tensorflow/tools/dist_test/python/mnist_replica.py
@@ -177,7 +177,7 @@ def main(unused_argv):
else:
replicas_to_aggregate = FLAGS.replicas_to_aggregate
- opt = tf.train.SyncReplicasOptimizerV2(
+ opt = tf.train.SyncReplicasOptimizer(
opt,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=num_workers,