From: Teng Li Date: Fri, 7 Dec 2018 01:22:04 +0000 (-0800) Subject: Skipping two c10d tests only if there are multi-GPUs (#14860) X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~2409 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=bfa666eb0deebac21b03486e26642fd70d66e478;p=platform%2Fupstream%2Fpytorch.git Skipping two c10d tests only if there are multi-GPUs (#14860) Summary: Otherwise, these tests will fail, even though there are never meant to run on single GPU machines. Pull Request resolved: https://github.com/pytorch/pytorch/pull/14860 Differential Revision: D13369060 Pulled By: teng-li fbshipit-source-id: 8a637a6d57335491ba8602cd09927700b2bbf8a0 --- diff --git a/test/test_c10d.py b/test/test_c10d.py index 5e715d8..9f477c9 100644 --- a/test/test_c10d.py +++ b/test/test_c10d.py @@ -1565,6 +1565,7 @@ class DistributedDataParallelTest(MultiProcessTestCase): ) @skip_if_not_nccl + @skip_if_not_multigpu def test_queue_reduction(self): # Set up process group. store = c10d.FileStore(self.file.name, self.world_size) @@ -1592,6 +1593,7 @@ class DistributedDataParallelTest(MultiProcessTestCase): torch.ones(10) * (self.world_size + 1) * len(devices) / 2.0) @skip_if_not_nccl + @skip_if_not_multigpu def test_sync_reduction(self): # Set up process group. store = c10d.FileStore(self.file.name, self.world_size)