[BE] Enable functional optim tests for windows (#63462)
authorRohan Varma <rvarm1@fb.com>
Tue, 24 Aug 2021 00:45:39 +0000 (17:45 -0700)
committerFacebook GitHub Bot <facebook-github-bot@users.noreply.github.com>
Tue, 24 Aug 2021 00:49:01 +0000 (17:49 -0700)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/63462

Now that `torch.distributed.optim` gates DistributedOptimizer on RPC availability, these tests can be run on windows.
ghstack-source-id: 136437635

Test Plan: CI

Reviewed By: SciPioneer

Differential Revision: D30358923

fbshipit-source-id: 36739bdfe7214789f17de652d30c62c2bc124c73

test/distributed/test_c10d_nccl.py
test/test_functional_optim.py
torch/testing/_internal/distributed/distributed_test.py

index e42c5c6..1378aa0 100644 (file)
@@ -39,7 +39,6 @@ from torch.testing._internal.common_distributed import (
     with_nccl_blocking_wait,
 )
 from torch.testing._internal.common_utils import (
-    IS_WINDOWS,
     TestCase,
     run_tests,
     retry_on_connect_failures,
@@ -51,10 +50,9 @@ from torch.testing._internal.common_utils import (
 from torch.utils.checkpoint import checkpoint
 from torch.distributed.optim import functional_optim_map
 
-if not IS_WINDOWS:
-    from torch.distributed.optim.functional_sgd import _FunctionalSGD
-    from torch.distributed.optim.functional_adam import _FunctionalAdam
-    from torch.distributed.optim.functional_adamw import _FunctionalAdamW
+from torch.distributed.optim.functional_sgd import _FunctionalSGD
+from torch.distributed.optim.functional_adam import _FunctionalAdam
+from torch.distributed.optim.functional_adamw import _FunctionalAdamW
 
 if TEST_WITH_DEV_DBG_ASAN:
     print(
index 98a3f06..accc720 100644 (file)
@@ -1,10 +1,8 @@
-import unittest
-
 import torch
 import torch.nn as nn
 import torch.nn.functional as F
 from torch.optim import SGD, Adam, AdamW
-from torch.testing._internal.common_utils import TestCase, run_tests, IS_WINDOWS
+from torch.testing._internal.common_utils import TestCase, run_tests
 from torch.distributed.optim import functional_optim_map
 
 class MyModule(torch.nn.Module):
@@ -80,24 +78,12 @@ class TestFunctionalOptimParity(TestCase):
                 self.assertNotEqual(old_module_optim_params[i], optim_param)
                 self.assertNotEqual(old_module_functional_params[i], functional_param)
 
-    @unittest.skipIf(
-        IS_WINDOWS,
-        "Functional optimizer not support on windows, see https://github.com/pytorch/pytorch/issues/62137",
-    )
     def test_functional_optim_parity_sgd(self):
         self._test_functional_optim_parity(SGD, 1e-2, momentum=0.9, weight_decay=0.01)
 
-    @unittest.skipIf(
-        IS_WINDOWS,
-        "Functional optimizer not support on windows, see https://github.com/pytorch/pytorch/issues/62137",
-    )
     def test_functional_optim_parity_adam(self):
         self._test_functional_optim_parity(Adam, 1e-2, betas=(0.9, 0.999), eps=1e-6)
 
-    @unittest.skipIf(
-        IS_WINDOWS,
-        "Functional optimizer not support on windows, see https://github.com/pytorch/pytorch/issues/62137",
-    )
     def test_functional_optim_parity_adam_w(self):
         self._test_functional_optim_parity(AdamW, 1e-2, betas=(0.9, 0.999), eps=1e-6)
 
index 096b718..aa8841d 100644 (file)
@@ -68,11 +68,12 @@ from torch.testing._internal.common_utils import (
 
 from torch.distributed.optim import functional_optim_map
 
+from torch.distributed.optim.functional_sgd import _FunctionalSGD
+from torch.distributed.optim.functional_adam import _FunctionalAdam
+from torch.distributed.optim.functional_adamw import _FunctionalAdamW
+
 if not IS_WINDOWS:
     import torch.distributed.optim.post_localSGD_optimizer as post_localSGD_optimizer
-    from torch.distributed.optim.functional_sgd import _FunctionalSGD
-    from torch.distributed.optim.functional_adam import _FunctionalAdam
-    from torch.distributed.optim.functional_adamw import _FunctionalAdamW
 
 from torch.utils.data.distributed import DistributedSampler
 
@@ -4003,10 +4004,6 @@ class DistributedTest:
             BACKEND != "nccl" and BACKEND != "gloo",
             "Only Nccl & Gloo backend support DistributedDataParallel",
         )
-        @sandcastle_skip_if(
-            IS_WINDOWS,
-            "FunctionalAdam not yet supported with Windows, see https://github.com/pytorch/pytorch/issues/62137"
-        )
         @skip_if_lt_x_gpu(2)
         @skip_if_rocm
         def test_ddp_hook_with_optimizer_parity_adamw(self):
@@ -4029,10 +4026,6 @@ class DistributedTest:
             BACKEND != "nccl" and BACKEND != "gloo",
             "Only Nccl & Gloo backend support DistributedDataParallel",
         )
-        @sandcastle_skip_if(
-            IS_WINDOWS,
-            "FunctionalAdam not yet supported with Windows, see https://github.com/pytorch/pytorch/issues/62137"
-        )
         @skip_if_lt_x_gpu(2)
         @skip_if_rocm
         def test_ddp_hook_with_optimizer_parity_adam(self):
@@ -4055,10 +4048,6 @@ class DistributedTest:
             BACKEND != "nccl" and BACKEND != "gloo",
             "Only Nccl & Gloo backend support DistributedDataParallel",
         )
-        @sandcastle_skip_if(
-            IS_WINDOWS,
-            "FunctionalSGD not yet supported with Windows, see https://github.com/pytorch/pytorch/issues/62137"
-        )
         @skip_if_lt_x_gpu(2)
         @skip_if_rocm
         def test_ddp_hook_with_optimizer_parity_sgd(self):