diff options
author | 2018-06-13 15:49:22 -0700 | |
---|---|---|
committer | 2018-06-13 15:59:03 -0700 | |
commit | 4d48d1dc5a1a6010132988e4afe1e70e1f01be03 (patch) | |
tree | b229b029534be77128be4f407c7c5ad0ed552086 /tensorflow/contrib/opt | |
parent | 31ea26d15004a3b5ac5b87e598cd6dfdc71f6012 (diff) |
Uses a resource variable by default for the global step.
PiperOrigin-RevId: 200467580
Diffstat (limited to 'tensorflow/contrib/opt')
-rw-r--r-- | tensorflow/contrib/opt/python/training/drop_stale_gradient_optimizer.py | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/tensorflow/contrib/opt/python/training/drop_stale_gradient_optimizer.py b/tensorflow/contrib/opt/python/training/drop_stale_gradient_optimizer.py index 4a905b1b2a..918165bc6a 100644 --- a/tensorflow/contrib/opt/python/training/drop_stale_gradient_optimizer.py +++ b/tensorflow/contrib/opt/python/training/drop_stale_gradient_optimizer.py @@ -63,7 +63,7 @@ class DropStaleGradientOptimizer(optimizer.Optimizer): def compute_gradients(self, loss, *args, **kwargs): # Record current global step for worker. with ops.colocate_with(loss): - self._local_step = training_util.get_global_step() + 0 + self._local_step = training_util.get_global_step().read_value() + 0 with ops.control_dependencies([self._local_step]): loss = gen_array_ops.identity(loss) @@ -102,7 +102,7 @@ class DropStaleGradientOptimizer(optimizer.Optimizer): with ops.control_dependencies(gradients), ops.colocate_with(global_step): staleness = gen_array_ops.reshape( - global_step - self._local_step, shape=()) + global_step.read_value() - self._local_step, shape=()) conditional_update = stale_counter.assign_add(control_flow_ops.cond( gen_math_ops.less_equal(staleness, self._staleness), @@ -110,5 +110,6 @@ class DropStaleGradientOptimizer(optimizer.Optimizer): summary.scalar( "Gradient staleness percentage", - stale_counter / (math_ops.cast(global_step + 1, dtypes.float32))) + stale_counter / (math_ops.cast(global_step.read_value() + 1, + dtypes.float32))) return conditional_update |