aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/contrib/optimizer_v2
diff options
context:
space:
mode:
authorGravatar Asim Shankar <ashankar@google.com>2018-04-10 10:43:14 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-04-10 10:45:31 -0700
commit36a07c59954b8ace54879b8732b6a7ae2dce6450 (patch)
tree0489f1adb914dfc732271a9758461abf49760f30 /tensorflow/contrib/optimizer_v2
parentbd718c410478d066ed1c41d5ffe31970075b808a (diff)
Simplify test_util.run_in_graph_and_eager_modes
- Get rid of unnecessary options - Update various resource variable tests so that they correctly exercise the cases where the variables are placed on GPU (these "with tf.device('/cpu:0')" blocks that were added for eager execution are no longer necessary) PiperOrigin-RevId: 192309109
Diffstat (limited to 'tensorflow/contrib/optimizer_v2')
-rw-r--r--tensorflow/contrib/optimizer_v2/momentum_test.py24
1 files changed, 22 insertions, 2 deletions
diff --git a/tensorflow/contrib/optimizer_v2/momentum_test.py b/tensorflow/contrib/optimizer_v2/momentum_test.py
index f37eb48181..26724f66c2 100644
--- a/tensorflow/contrib/optimizer_v2/momentum_test.py
+++ b/tensorflow/contrib/optimizer_v2/momentum_test.py
@@ -237,7 +237,17 @@ class MomentumOptimizerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
- var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
+ # This test invokes the ResourceSparseApplyMomentum operation, which
+ # did not have a registered GPU kernel as of April 2018. With graph
+ # execution, the placement algorithm notices this and automatically
+ # places the variable in CPU (host) memory. With eager execution,
+ # the variable would be placed in GPU memory if available, which
+ # would then conflict with the future invocation of the
+ # ResourceSparseApplyMomentum operation.
+ # To work around this discrepancy, for now we force the variable
+ # to be placed on CPU.
+ with ops.device("/cpu:0"):
+ var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
# pylint: disable=cell-var-from-loop
def loss():
@@ -256,7 +266,17 @@ class MomentumOptimizerTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testMinimizeWith2DIndiciesForEmbeddingLookup(self):
- var0 = resource_variable_ops.ResourceVariable(array_ops.ones([2, 2]))
+ # This test invokes the ResourceSparseApplyMomentum operation, which
+ # did not have a registered GPU kernel as of April 2018. With graph
+ # execution, the placement algorithm notices this and automatically
+ # places the variable in CPU (host) memory. With eager execution,
+ # the variable would be placed in GPU memory if available, which
+ # would then conflict with the future invocation of the
+ # ResourceSparseApplyMomentum operation.
+ # To work around this discrepancy, for now we force the variable
+ # to be placed on CPU.
+ with ops.device("/cpu:0"):
+ var0 = resource_variable_ops.ResourceVariable(array_ops.ones([2, 2]))
def loss():
return math_ops.reduce_sum(embedding_ops.embedding_lookup(var0, [[1]]))