aboutsummaryrefslogtreecommitdiffhomepage
path: root/tensorflow/compiler/tests
diff options
context:
space:
mode:
authorGravatar Adrian Kuegel <akuegel@google.com>2018-09-19 05:20:57 -0700
committerGravatar TensorFlower Gardener <gardener@tensorflow.org>2018-09-19 05:26:15 -0700
commit27ea406e6d43ffc0d63f61782c413fe4d8483193 (patch)
treeb6120e9e47251d092e16c50811a8b8809accf6cc /tensorflow/compiler/tests
parent7b936cb6c4ca47c8d3a63b42364998c86d87f2cf (diff)
Enable tests for CPU and GPU backends that involve XlaSort.
PiperOrigin-RevId: 213611371
Diffstat (limited to 'tensorflow/compiler/tests')
-rw-r--r--tensorflow/compiler/tests/image_ops_test.py12
-rw-r--r--tensorflow/compiler/tests/random_ops_test.py3
-rw-r--r--tensorflow/compiler/tests/sort_ops_test.py20
3 files changed, 0 insertions, 35 deletions
diff --git a/tensorflow/compiler/tests/image_ops_test.py b/tensorflow/compiler/tests/image_ops_test.py
index 6fe5a66e0e..bbe746e28f 100644
--- a/tensorflow/compiler/tests/image_ops_test.py
+++ b/tensorflow/compiler/tests/image_ops_test.py
@@ -605,10 +605,6 @@ class ResizeBilinearTest(xla_test.XLATestCase):
class NonMaxSuppressionTest(xla_test.XLATestCase):
def testNMS128From1024(self):
- # TODO(b/26783907): The Sort HLO is not implemented on CPU or GPU.
- if self.device in ["XLA_CPU", "XLA_GPU"]:
- return
-
with compat.forward_compatibility_horizon(2018, 8, 8):
num_boxes = 1024
boxes_np = np.random.normal(50, 10, (num_boxes, 4)).astype("f4")
@@ -644,10 +640,6 @@ class NonMaxSuppressionTest(xla_test.XLATestCase):
self.assertEqual(indices_tf.size, max_output_size)
def testNMS3From6Boxes(self):
- # TODO(b/26783907): The Sort HLO is not implemented on CPU or GPU.
- if self.device in ["XLA_CPU", "XLA_GPU"]:
- return
-
with compat.forward_compatibility_horizon(2018, 8, 8):
# Three boxes are selected based on IOU.
boxes_data = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
@@ -693,10 +685,6 @@ class NonMaxSuppressionTest(xla_test.XLATestCase):
# Three boxes are selected based on IOU.
# One is filtered out by score threshold.
- # TODO(b/26783907): The Sort HLO is not implemented on CPU or GPU.
- if self.device in ["XLA_CPU", "XLA_GPU"]:
- return
-
with compat.forward_compatibility_horizon(2018, 8, 8):
boxes_data = [[0, 0, 1, 1], [0, 0.1, 1, 1.1], [0, -0.1, 1, 0.9],
[0, 10, 1, 11], [0, 10.1, 1, 11.1], [0, 100, 1, 101]]
diff --git a/tensorflow/compiler/tests/random_ops_test.py b/tensorflow/compiler/tests/random_ops_test.py
index 41fe42a26b..4932819585 100644
--- a/tensorflow/compiler/tests/random_ops_test.py
+++ b/tensorflow/compiler/tests/random_ops_test.py
@@ -145,9 +145,6 @@ class RandomOpsTest(xla_test.XLATestCase):
self.assertAllClose(actual_variance, expected_variance, rtol=2*1e-3)
def testShuffle1d(self):
- # TODO(b/26783907): this test requires the CPU backend to implement sort.
- if self.device in ["XLA_CPU"]:
- return
with self.cached_session() as sess:
with self.test_scope():
x = math_ops.range(1 << 16)
diff --git a/tensorflow/compiler/tests/sort_ops_test.py b/tensorflow/compiler/tests/sort_ops_test.py
index 51c04b5c47..dbf4beb693 100644
--- a/tensorflow/compiler/tests/sort_ops_test.py
+++ b/tensorflow/compiler/tests/sort_ops_test.py
@@ -48,10 +48,6 @@ class XlaSortOpTest(xla_test.XLATestCase):
self.assertAllClose(v, result, rtol=1e-3)
def testSort(self):
- # TODO(b/26783907): The Sort HLO is not implemented on CPU or GPU.
- if self.device in ["XLA_CPU", "XLA_GPU"]:
- return
-
supported_types = set([dtypes.bfloat16.as_numpy_dtype, np.float32])
for dtype in supported_types.intersection(self.numeric_types):
x = np.arange(101, dtype=dtype)
@@ -60,10 +56,6 @@ class XlaSortOpTest(xla_test.XLATestCase):
xla.sort, [x], expected=[np.arange(101, dtype=dtype)])
def testTopK(self):
- # TODO(b/26783907): The Sort HLO is not implemented on CPU or GPU.
- if self.device in ["XLA_CPU", "XLA_GPU"]:
- return
-
supported_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
for dtype in supported_types.intersection(self.numeric_types):
@@ -89,10 +81,6 @@ class XlaSortOpTest(xla_test.XLATestCase):
expected=[x[indices].astype(dtype), indices])
def testTopK2D(self):
- # TODO(b/26783907): The Sort HLO is not implemented on CPU or GPU.
- if self.device in ["XLA_CPU", "XLA_GPU"]:
- return
-
supported_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
for dtype in supported_types.intersection(self.numeric_types):
@@ -122,10 +110,6 @@ class XlaSortOpTest(xla_test.XLATestCase):
def testTopKZeros(self):
"""Tests that positive and negative zeros sort correctly."""
- # TODO(b/26783907): The Sort HLO is not implemented on CPU or GPU.
- if self.device in ["XLA_CPU", "XLA_GPU"]:
- return
-
# Only bfloat16 is implemented.
bfloat16 = dtypes.bfloat16.as_numpy_dtype
if bfloat16 not in self.numeric_types:
@@ -144,10 +128,6 @@ class XlaSortOpTest(xla_test.XLATestCase):
def testTopKInfinities(self):
"""Tests that positive and negative infinity sort correctly."""
- # TODO(b/26783907): The Sort HLO is not implemented on CPU or GPU.
- if self.device in ["XLA_CPU", "XLA_GPU"]:
- return
-
# Only bfloat16 is implemented.
bfloat16 = dtypes.bfloat16.as_numpy_dtype
if bfloat16 not in self.numeric_types: