diff options
author | 2018-08-06 14:28:33 -0700 | |
---|---|---|
committer | 2018-08-06 14:33:30 -0700 | |
commit | 765f12c65edca85cfc8e951f89f12e7af1a76743 (patch) | |
tree | fdb19b23324fb3a35adefe81a40a13b60235f452 | |
parent | a7596e7df45a474f8e2f803c1282dadb43037f6c (diff) |
Disable local test for collective ops.
PiperOrigin-RevId: 207608466
-rw-r--r-- | tensorflow/contrib/distribute/python/cross_tower_ops_test.py | 9 |
1 files changed, 1 insertions, 8 deletions
diff --git a/tensorflow/contrib/distribute/python/cross_tower_ops_test.py b/tensorflow/contrib/distribute/python/cross_tower_ops_test.py index 7b6c1843eb..aec53b01d7 100644 --- a/tensorflow/contrib/distribute/python/cross_tower_ops_test.py +++ b/tensorflow/contrib/distribute/python/cross_tower_ops_test.py @@ -383,7 +383,7 @@ class MultiWorkerCrossTowerOpsTest(multi_worker_test_base.MultiWorkerTestBase, class MultiWorkerCollectiveAllReduceTest( multi_worker_test_base.MultiWorkerTestBase, parameterized.TestCase): - collective_key_base = 10000 + collective_key_base = 100000 @classmethod def setUpClass(cls): @@ -540,13 +540,6 @@ class MultiWorkerCollectiveAllReduceTest( self._run_between_graph_clients(self._test_reduction, self._cluster_spec, num_gpus) - # Collective ops doesn't support strategy with one device. - def testReductionLocal(self, num_gpus=2): - if context.num_gpus() < num_gpus: - return - self._run_between_graph_clients( - self._test_reduction, self._cluster_spec, num_gpus, local_mode=True) - if __name__ == "__main__": test.main() |