aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorGravatar A. Unique TensorFlower <gardener@tensorflow.org>2018-07-16 14:02:31 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-07-16 14:07:16 -0700
commit646c179b2ca8e3366d2b0d27f5f14839aa49e658 (patch)
treeedc2d4395595b6d1af369dc3f9a6269e57f4ea9e
parent1d38580d6db3dc48b916453c2ce6cce691a00fe6 (diff)
Re-enable some flaky tests that were affected by low
precision of fused winograd implementation of convolution backprop. PiperOrigin-RevId: 204803121
-rw-r--r--tensorflow/python/ops/parallel_for/BUILD1
-rw-r--r--tensorflow/python/ops/parallel_for/gradients_test.py11
2 files changed, 11 insertions, 1 deletions
diff --git a/tensorflow/python/ops/parallel_for/BUILD b/tensorflow/python/ops/parallel_for/BUILD
index 065c2caedc..6c804a50e7 100644
--- a/tensorflow/python/ops/parallel_for/BUILD
+++ b/tensorflow/python/ops/parallel_for/BUILD
@@ -125,5 +125,4 @@ cuda_py_test(
"//tensorflow/python:random_ops",
"//tensorflow/python/ops/losses",
],
- tags = ["no_gpu"], # TODO(b/80127739): test is flaky
)
diff --git a/tensorflow/python/ops/parallel_for/gradients_test.py b/tensorflow/python/ops/parallel_for/gradients_test.py
index 310a2154f7..3a6d9149ad 100644
--- a/tensorflow/python/ops/parallel_for/gradients_test.py
+++ b/tensorflow/python/ops/parallel_for/gradients_test.py
@@ -19,6 +19,7 @@ from __future__ import division
from __future__ import print_function
import functools
+import os
import time
import numpy as np
@@ -444,6 +445,10 @@ class GradientsTest(test.TestCase):
self.run_and_assert_equal(pfor_outputs, while_outputs)
def test_mnist_per_eg_grad(self):
+ # It looks like CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED
+ # configuration of Winograd can cause low precision output resulting in
+ # tests failing. So we disable that here.
+ os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "0"
data_format = ("channels_first"
if test.is_gpu_available() else "channels_last")
# Note that we we are setting training=False here so that dropout produces
@@ -451,8 +456,13 @@ class GradientsTest(test.TestCase):
pfor_outputs, while_outputs = create_mnist_per_eg_grad(
4, data_format, training=False)
self.run_and_assert_equal(pfor_outputs, while_outputs, rtol=1e-3)
+ os.environ.pop("TF_ENABLE_WINOGRAD_NONFUSED", None)
def test_mnist_per_eg_jacobian(self):
+ # It looks like CUDNN_CONVOLUTION_BWD_DATA_ALGO_WINOGRAD_NONFUSED
+ # configuration of Winograd can cause low precision output resulting in
+ # tests failing. So we disable that here.
+ os.environ["TF_ENABLE_WINOGRAD_NONFUSED"] = "0"
data_format = ("channels_first"
if test.is_gpu_available() else "channels_last")
# Note that we we are setting training=False here so that dropout produces
@@ -460,6 +470,7 @@ class GradientsTest(test.TestCase):
pfor_outputs, while_outputs = create_mnist_per_eg_jacobian(
2, data_format, training=False)
self.run_and_assert_equal(pfor_outputs, while_outputs, rtol=1e-3)
+ os.environ.pop("TF_ENABLE_WINOGRAD_NONFUSED", None)
def test_fc_jacobian(self):
jacobians, per_eg_jacobians_pfor, per_eg_jacobians_while = (