From bfa666eb0deebac21b03486e26642fd70d66e478 Mon Sep 17 00:00:00 2001 From: Teng Li Date: Thu, 6 Dec 2018 17:22:04 -0800 Subject: [PATCH] Skipping two c10d tests only if there are multi-GPUs (#14860) Summary: Otherwise, these tests will fail, even though there are never meant to run on single GPU machines. Pull Request resolved: https://github.com/pytorch/pytorch/pull/14860 Differential Revision: D13369060 Pulled By: teng-li fbshipit-source-id: 8a637a6d57335491ba8602cd09927700b2bbf8a0 --- test/test_c10d.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/test_c10d.py b/test/test_c10d.py index 5e715d8..9f477c9 100644 --- a/test/test_c10d.py +++ b/test/test_c10d.py @@ -1565,6 +1565,7 @@ class DistributedDataParallelTest(MultiProcessTestCase): ) @skip_if_not_nccl + @skip_if_not_multigpu def test_queue_reduction(self): # Set up process group. store = c10d.FileStore(self.file.name, self.world_size) @@ -1592,6 +1593,7 @@ class DistributedDataParallelTest(MultiProcessTestCase): torch.ones(10) * (self.world_size + 1) * len(devices) / 2.0) @skip_if_not_nccl + @skip_if_not_multigpu def test_sync_reduction(self): # Set up process group. store = c10d.FileStore(self.file.name, self.world_size) -- 2.7.4