aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/python/training/adagrad.py
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2017-01-27 13:10:15 -0800
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2017-01-27 13:30:03 -0800
commit43c2f2f3058d56fad95aa32d0efb30b06ab24758 (patch)
treeb038b3a908af4cd5b08c67fb6b88a26619e712f5 /tensorflow/python/training/adagrad.py
parent5480f326da105f466d12d33333c0482b8e5a631d (diff)
Enables all optimizers for dense resource variables.
Also fixes small bug with saving partitioned resource variables. Change: 145828125
Diffstat (limited to 'tensorflow/python/training/adagrad.py')
-rw-r--r--tensorflow/python/training/adagrad.py9
1 files changed, 9 insertions, 0 deletions
diff --git a/tensorflow/python/training/adagrad.py b/tensorflow/python/training/adagrad.py
index 9646c5c228..e23b0d4381 100644
--- a/tensorflow/python/training/adagrad.py
+++ b/tensorflow/python/training/adagrad.py
@@ -78,6 +78,15 @@ class AdagradOptimizer(optimizer.Optimizer):
grad,
use_locking=self._use_locking)
+ def _resource_apply_dense(self, grad, var):
+ acc = self.get_slot(var, "accumulator")
+ return training_ops.resource_apply_adagrad(
+ var,
+ acc.handle,
+ math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
+ grad,
+ use_locking=self._use_locking)
+
def _apply_sparse(self, grad, var):
acc = self.get_slot(var, "accumulator")
return training_ops.sparse_apply_adagrad(