enable more unit tests (#18537)
authorjithunnair-amd <37884920+jithunnair-amd@users.noreply.github.com>
Wed, 27 Mar 2019 21:16:01 +0000 (14:16 -0700)
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>
Wed, 27 Mar 2019 21:27:23 +0000 (14:27 -0700)
Summary:
Enable unit tests working with ROCm 2.3. In particular, these are unit tests where we skipped for double data types previously and some tests for multi-GPU setups.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/18537

Differential Revision: D14651822

Pulled By: ezyang

fbshipit-source-id: 7dd575504ebe235a91489866c91000e9754b1235

test/common_nn.py
test/test_cuda.py
test/test_nn.py

index afac197..08903bf 100644 (file)
@@ -923,7 +923,6 @@ new_module_tests = [
         cudnn=True,
         check_eval=True,
         desc='affine',
-        skip_double=TEST_WITH_ROCM,
         test_cuda=(not TEST_WITH_ROCM),
     ),
     dict(
@@ -933,7 +932,6 @@ new_module_tests = [
         cudnn=True,
         check_eval=True,
         desc='3d_input',
-        skip_double=TEST_WITH_ROCM,
     ),
     dict(
         module_name='BatchNorm1d',
@@ -942,7 +940,6 @@ new_module_tests = [
         cudnn=True,
         check_eval=True,
         desc='affine_simple_average',
-        skip_double=TEST_WITH_ROCM,
         test_cuda=(not TEST_WITH_ROCM),
     ),
     dict(
@@ -952,7 +949,6 @@ new_module_tests = [
         cudnn=True,
         check_eval=True,
         desc='not_affine',
-        skip_double=TEST_WITH_ROCM,
     ),
     dict(
         module_name='BatchNorm1d',
@@ -961,7 +957,6 @@ new_module_tests = [
         cudnn=True,
         check_eval=True,
         desc='not_tracking_stats',
-        skip_double=TEST_WITH_ROCM,
         test_cuda=(not TEST_WITH_ROCM),
     ),
     dict(
@@ -971,7 +966,6 @@ new_module_tests = [
         cudnn=True,
         check_eval=True,
         desc='3d_input_not_affine',
-        skip_double=TEST_WITH_ROCM,
     ),
     dict(
         module_name='BatchNorm2d',
@@ -979,7 +973,6 @@ new_module_tests = [
         input_size=(2, 3, 6, 6),
         cudnn=True,
         check_eval=True,
-        skip_double=TEST_WITH_ROCM,
     ),
     dict(
         module_name='BatchNorm2d',
@@ -988,7 +981,6 @@ new_module_tests = [
         cudnn=True,
         check_eval=True,
         desc='2d_simple_average',
-        skip_double=TEST_WITH_ROCM,
     ),
     dict(
         module_name='BatchNorm2d',
@@ -997,7 +989,6 @@ new_module_tests = [
         cudnn=True,
         check_eval=True,
         desc='momentum',
-        skip_double=TEST_WITH_ROCM,
     ),
     dict(
         module_name='BatchNorm2d',
@@ -1006,7 +997,6 @@ new_module_tests = [
         cudnn=True,
         check_eval=True,
         desc='not_affine',
-        skip_double=TEST_WITH_ROCM,
     ),
     dict(
         module_name='BatchNorm2d',
@@ -1015,7 +1005,6 @@ new_module_tests = [
         cudnn=True,
         check_eval=True,
         desc='not_tracking_stats',
-        skip_double=TEST_WITH_ROCM,
     ),
     dict(
         module_name='BatchNorm3d',
@@ -1186,7 +1175,6 @@ new_module_tests = [
         constructor_args=(4, 5, 3),
         input_size=(2, 4, 10),
         cudnn=True,
-        skip_double=TEST_WITH_ROCM,
     ),
     dict(
         module_name='Conv1d',
@@ -1194,7 +1182,6 @@ new_module_tests = [
         input_size=(2, 4, 10),
         cudnn=True,
         desc='stride',
-        skip_double=TEST_WITH_ROCM,
     ),
     dict(
         module_name='Conv1d',
@@ -1202,7 +1189,6 @@ new_module_tests = [
         input_size=(2, 4, 10),
         cudnn=True,
         desc='pad1',
-        skip_double=TEST_WITH_ROCM,
     ),
     dict(
         module_name='Conv1d',
@@ -1210,7 +1196,6 @@ new_module_tests = [
         input_size=(2, 4, 10),
         cudnn=True,
         desc='pad2',
-        skip_double=TEST_WITH_ROCM,
     ),
     dict(
         module_name='Conv1d',
@@ -1218,7 +1203,6 @@ new_module_tests = [
         input_size=(1, 4, 1),
         cudnn=True,
         desc='pad1size1',
-        skip_double=TEST_WITH_ROCM,
     ),
     dict(
         module_name='Conv1d',
@@ -1226,13 +1210,11 @@ new_module_tests = [
         input_size=(1, 4, 1),
         cudnn=True,
         desc='pad2size1',
-        skip_double=TEST_WITH_ROCM,
     ),
     dict(
         fullname='Conv1d_dilated',
         constructor=lambda: nn.Conv1d(4, 5, kernel_size=3, dilation=2),
         input_size=(2, 4, 10),
-        skip_double=TEST_WITH_ROCM,
     ),
     dict(
         fullname='Conv1d_groups',
index 93fcd7d..72a3157 100644 (file)
@@ -1134,7 +1134,6 @@ class TestCuda(TestCase):
 
     @unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
     # Note: fails sometimes on the CI, passes on dual gfx906
-    @skipIfRocm
     def test_broadcast_coalesced(self):
         numel = 5
         num_bytes = numel * 8
index b93f850..09dceee 100644 (file)
@@ -1795,7 +1795,6 @@ class TestNN(NNTestCase):
         m = pickle.loads(pickle.dumps(m))
         self.assertIsInstance(m, nn.Linear)
 
-    @skipIfRocm
     def test_spectral_norm(self):
         input = torch.randn(3, 5)
         m = nn.Linear(5, 7)
@@ -3275,7 +3274,6 @@ class TestNN(NNTestCase):
             _ = dp.gather(inputs, target_device=0)
 
     @unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
-    @skipIfRocm
     def test_broadcast_double_backwards_gpu(self):
         tensors = (torch.randn(4, 4, device='cuda', requires_grad=True),
                    torch.randn(4, 4, device='cuda', requires_grad=True),
@@ -3370,7 +3368,6 @@ class TestNN(NNTestCase):
                 self.assertEqual(out.data, expected)
 
     @unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
-    @skipIfRocm
     def test_data_parallel_multiple_input(self):
         class TestModule(nn.Module):