From 9f1d8f2eeb8baa98cdb85336bddb0c0ab492afd9 Mon Sep 17 00:00:00 2001 From: Chaitanya Sri Krishna Lolla Date: Fri, 14 Dec 2018 14:18:00 -0800 Subject: [PATCH] enabled tests in test_nn, test_cuda and test_sparse (#15232) Summary: tests work on ROCm 1.9.2 as present on CI (fp16 bringup, hipMemset and sparse improvements) Pull Request resolved: https://github.com/pytorch/pytorch/pull/15232 Differential Revision: D13470991 Pulled By: bddppq fbshipit-source-id: 45acc4f9ea5baaaf7672b86eb022948055779925 --- test/test_cuda.py | 29 +++++++++-------------------- test/test_nn.py | 18 ------------------ test/test_sparse.py | 18 ------------------ 3 files changed, 9 insertions(+), 56 deletions(-) diff --git a/test/test_cuda.py b/test/test_cuda.py index ffb6281..d00d4e2 100644 --- a/test/test_cuda.py +++ b/test/test_cuda.py @@ -281,7 +281,7 @@ tests = [ ('addbmm', small_2d, lambda t: [small_3d(t), small_3d(t)], None, float_types), ('addbmm', small_2d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar'), ('addbmm', small_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), small_3d(t), small_3d(t)], 'two_scalars'), - ('baddbmm', small_3d, lambda t: [small_3d(t), small_3d(t)], '', types, False, "skipIfRocm:HalfTensor"), + ('baddbmm', small_3d, lambda t: [small_3d(t), small_3d(t)],), ('baddbmm', small_3d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar'), ('baddbmm', small_3d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), small_3d(t), small_3d(t)], 'two_scalars'), ('bmm', small_3d, lambda t: [small_3d(t)], '', float_types_no_half), @@ -293,16 +293,14 @@ tests = [ ('addmm', medium_2d, lambda t: [medium_2d(t), medium_2d(t)]), ('addmm', medium_2d, lambda t: [number(0.4, 2, t), medium_2d(t), medium_2d(t)], 'scalar'), ('addmm', medium_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_2d(t), medium_2d(t)], 'two_scalars'), - ('addmv', medium_1d, lambda t: [medium_2d(t), medium_1d(t)], '', types, False, "skipIfRocm:HalfTensor"), - ('addmv', medium_1d, lambda t: [number(0.4, 2, t), medium_2d(t), medium_1d(t)], 'scalar', - types, False, "skipIfRocm:HalfTensor"), - ('addmv', medium_1d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_2d(t), medium_1d(t)], 'two_scalars', - types, False, "skipIfRocm:HalfTensor"), + ('addmv', medium_1d, lambda t: [medium_2d(t), medium_1d(t)],), + ('addmv', medium_1d, lambda t: [number(0.4, 2, t), medium_2d(t), medium_1d(t)], 'scalar'), + ('addmv', medium_1d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_2d(t), medium_1d(t)], 'two_scalars'), ('addr', medium_2d, lambda t: [medium_1d(t), medium_1d(t)]), ('addr', medium_2d, lambda t: [number(0.4, 2, t), medium_1d(t), medium_1d(t)], 'scalar'), ('addr', medium_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_1d(t), medium_1d(t)], 'two_scalars'), ('atan2', medium_2d, lambda t: [medium_2d(t)], None, float_types + [torch.HalfTensor]), - ('fmod', small_3d, lambda t: [3], 'value', types, False, "skipIfRocm:HalfTensor"), + ('fmod', small_3d, lambda t: [3], 'value',), ('fmod', small_3d, lambda t: [small_3d_positive(t)], 'tensor'), ('chunk', medium_2d, lambda t: [4],), ('chunk', medium_2d, lambda t: [4, 1], 'dim'), @@ -361,7 +359,7 @@ tests = [ ('mode', small_3d, lambda t: [-1], 'neg_dim'), ('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.1, 10), lambda t: [1], '2d_p=1', float_types_no_half), ('mvlgamma', lambda t: tensor_clamp(small_2d(t), 0.6, 10), lambda t: [2], '2d_p=2', float_types_no_half), - ('remainder', small_3d, lambda t: [3], 'value', types, False, "skipIfRocm:HalfTensor"), + ('remainder', small_3d, lambda t: [3], 'value',), ('remainder', small_3d, lambda t: [-3], 'negative_value', signed_types), ('remainder', small_3d, lambda t: [small_3d_positive(t)], 'tensor'), ('remainder', small_3d, lambda t: [constant_tensor_sub(0, small_3d_positive(t))], 'negative_tensor', signed_types), @@ -415,9 +413,9 @@ tests = [ ('transpose', new_t(1, 2, 3, 4), lambda t: [1, 2],), ('transpose', new_t(1, 2, 3, 4), lambda t: [-1, -2], 'neg_dim'), ('to_list', small_3d, lambda t: [],), - ('topk', small_3d_unique, lambda t: [2, 1, False, True], 'dim_sort', types, False, "skipIfRocm:HalfTensor"), - ('topk', small_3d_unique, lambda t: [2, -1, False, True], 'neg_dim_sort', types, False, "skipIfRocm:HalfTensor"), - ('topk', small_3d_unique, lambda t: [2, 1, True, True], 'dim_desc_sort', types, False, "skipIfRocm:HalfTensor"), + ('topk', small_3d_unique, lambda t: [2, 1, False, True], 'dim_sort',), + ('topk', small_3d_unique, lambda t: [2, -1, False, True], 'neg_dim_sort',), + ('topk', small_3d_unique, lambda t: [2, 1, True, True], 'dim_desc_sort',), ('trace', medium_2d, lambda t: []), ('tril', medium_2d, lambda t: [],), ('tril', medium_2d_expanded, lambda t: [], 'zero_stride', types, True), @@ -811,7 +809,6 @@ class TestCuda(TestCase): end1 = advance(gen1, end1) t += 1 - @skipIfRocm def test_out_of_memory(self): tensor = torch.zeros(1024, device='cuda') @@ -971,11 +968,9 @@ class TestCuda(TestCase): if input.is_cuda and input.get_device() == i: self.assertEqual(t.data_ptr(), input.data_ptr()) - @skipIfRocm def test_broadcast_cpu(self): self._test_broadcast(torch.randn(5, 5)) - @skipIfRocm def test_broadcast_gpu(self): self._test_broadcast(torch.randn(5, 5).cuda()) @@ -1185,11 +1180,9 @@ class TestCuda(TestCase): index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim)) self.assertEqual(result[tuple(index)], y) - @skipIfRocm def test_gather(self): self._test_gather(0) - @skipIfRocm def test_gather_dim(self): self._test_gather(1) @@ -1546,7 +1539,6 @@ class TestCuda(TestCase): self.assertEqual(x.sum(dim=(-1, -2)).cpu(), y.sum(dim=(-1, -2))) self.assertEqual(x.sum(dim=(1, 3)).cpu(), y.sum(dim=(1, 3))) - @skipIfRocm def test_sum_fp16(self): x = torch.zeros(10, device='cuda', dtype=torch.float16) self.assertEqual(x.sum(), 0) @@ -1823,7 +1815,6 @@ class TestCuda(TestCase): def test_tensor_scatter(self): _TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_', test_bounds=False) - @skipIfRocm def test_tensor_scatterAdd(self): _TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_add_', test_bounds=False) @@ -1992,7 +1983,6 @@ class TestCuda(TestCase): def test_norm(self): _TestTorchMixin._test_norm(self, device='cuda') - @skipIfRocm def test_dist(self): _TestTorchMixin._test_dist(self, device='cuda') @@ -2056,7 +2046,6 @@ class TestCuda(TestCase): def test_random_neg_values(self): _TestTorchMixin._test_random_neg_values(self, use_cuda=True) - @skipIfRocm def test_bincount_cuda(self): _TestTorchMixin._test_bincount(self, device='cuda') # ensure CUDA code coverage diff --git a/test/test_nn.py b/test/test_nn.py index f0efd48..1237440 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -2150,7 +2150,6 @@ class TestNN(NNTestCase): @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") @repeat_test_for_types(ALL_TENSORTYPES) - @skipIfRocm def test_gumbel_softmax_st_cuda(self, dtype=torch.float): self._test_gumbel_softmax_st(True, dtype=dtype) @@ -2345,7 +2344,6 @@ class TestNN(NNTestCase): @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") @repeat_test_for_types(ALL_TENSORTYPES) - @skipIfRocm def test_embedding_bag_cuda(self, dtype=torch.float): self._test_EmbeddingBag(True, 'sum', False, dtype) self._test_EmbeddingBag(True, 'mean', False, dtype) @@ -2514,7 +2512,6 @@ class TestNN(NNTestCase): self._test_InstanceNorm_general(nn.InstanceNorm1d, input, dtype=torch.float) @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") - @skipIfRocm def test_InstanceNorm1d_general_cuda(self): b = random.randint(3, 5) c = random.randint(3, 5) @@ -2621,7 +2618,6 @@ class TestNN(NNTestCase): self._test_LayerNorm_general() @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") - @skipIfRocm def test_LayerNorm_general_cuda(self): self._test_LayerNorm_general("cuda") self._test_LayerNorm_cuda_half() @@ -2686,7 +2682,6 @@ class TestNN(NNTestCase): self._test_GroupNorm_general(dtype=torch.float) @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") - @skipIfRocm def test_GroupNorm_general_cuda(self): self._test_GroupNorm_general("cuda", torch.float) self._test_GroupNorm_cuda_half() @@ -2867,7 +2862,6 @@ class TestNN(NNTestCase): self._test_batchnorm_eval() @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") - @skipIfRocm def test_batchnorm_eval_cuda(self, dtype=torch.float): self._test_batchnorm_eval("cuda", dtype) if TEST_CUDNN: @@ -2878,7 +2872,6 @@ class TestNN(NNTestCase): self._test_batchnorm_simple_average() @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") - @skipIfRocm def test_batchnorm_simple_average_cuda(self): self._test_batchnorm_simple_average(torch.cuda.FloatTensor) if TEST_CUDNN: @@ -2906,7 +2899,6 @@ class TestNN(NNTestCase): @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") @repeat_test_for_types(ALL_TENSORTYPES) - @skipIfRocm def test_MaxPool3d_indices_cuda(self, dtype=torch.float): self._test_maxpool_indices(3, device="cuda", dtype=dtype) @@ -4545,7 +4537,6 @@ class TestNN(NNTestCase): test(input_shape, hidden_shape, mode) @unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported") - @skipIfRocm def test_rnn_check_device(self): input_size = 3 hidden_size = 5 @@ -5137,7 +5128,6 @@ class TestNN(NNTestCase): gradgradcheck(func, [v]) @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") - @skipIfRocm def test_batchnorm_cudnn_half(self): # THNN input = torch.randint(1, 10, (2, 3, 2, 2), dtype=torch.half, device="cuda", requires_grad=True) @@ -5185,7 +5175,6 @@ class TestNN(NNTestCase): self._test_batchnorm_update_stats() @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") - @skipIfRocm def test_batchnorm_update_stats_cuda(self): self._test_batchnorm_update_stats("cuda", torch.float) if TEST_CUDNN: @@ -5348,7 +5337,6 @@ class TestNN(NNTestCase): for p in [0, 1, 2, 0.5, 1.5, 2.5, float('inf')]: self.assertTrue(gradcheck(lambda x: F.pdist(x, p), (inp,))) - @skipIfRocm def test_pdist_empty_row(self): for device in device_(): inp = torch.randn(1, 3, dtype=torch.double, device=device, requires_grad=True) @@ -6290,7 +6278,6 @@ class TestNN(NNTestCase): self._test_conv_noncontig_weights(self, torch.device('cpu')) @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") - @skipIfRocm def test_conv_noncontig_weights_cuda(self): self._test_conv_noncontig_weights(self, torch.device('cuda')) @@ -7057,7 +7044,6 @@ new_criterion_tests = [ loss_reference_fns['NLLLossNd'](i, t, reduction=get_reduction(m)), check_sum_reduction=True, desc='2d', - test_cuda=(not TEST_WITH_ROCM), ), dict( module_name='NLLLoss', @@ -7067,7 +7053,6 @@ new_criterion_tests = [ reference_fn=lambda i, t, m: loss_reference_fns['NLLLossNd'](i, t, weight=get_weight(m)), desc='2d_weights', - test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='NLLLoss', @@ -7077,7 +7062,6 @@ new_criterion_tests = [ reference_fn=lambda i, t, m: loss_reference_fns['NLLLossNd'](i, t, ignore_index=1), desc='2d_ignore_index', - test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='NLLLoss', @@ -7087,7 +7071,6 @@ new_criterion_tests = [ loss_reference_fns['NLLLossNd'](i, t, reduction=get_reduction(m)), check_sum_reduction=True, desc='higher_dim', - test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='NLLLoss', @@ -7097,7 +7080,6 @@ new_criterion_tests = [ loss_reference_fns['NLLLossNd'](i, t, reduction=get_reduction(m)), check_sum_reduction=True, desc='dim_is_3', - test_cuda=(not TEST_WITH_ROCM) ), dict( module_name='PoissonNLLLoss', diff --git a/test/test_sparse.py b/test/test_sparse.py index c48c299..ee014d7 100644 --- a/test/test_sparse.py +++ b/test/test_sparse.py @@ -135,7 +135,6 @@ class TestSparse(TestCase): printed.append('') self.assertExpected('\n'.join(printed)) - @skipIfRocm def test_basic(self): def test_shape(sparse_dims, nnz, with_size): if isinstance(with_size, Number): @@ -173,7 +172,6 @@ class TestSparse(TestCase): self.assertEqual(x._indices().numel(), 0) self.assertEqual(x._values().numel(), 0) - @skipIfRocm def test_coalecce(self): for empty_i, empty_v, empty_nnz in itertools.product([True, False], repeat=3): sparse_size = [] if empty_i else [2, 1] @@ -206,7 +204,6 @@ class TestSparse(TestCase): RuntimeError, lambda: self.SparseTensor(indices, values, torch.Size([2, 4, 2, 1]))) - @skipIfRocm def test_to_dense(self): def test_tensor(x, res): x.to_dense() # Tests triple to_dense for memory corruption @@ -271,7 +268,6 @@ class TestSparse(TestCase): sp, _, _ = self._gen_sparse(2, 10, [3, 3, 3]) self.assertRaises(RuntimeError, lambda: sp.to_sparse()) - @skipIfRocm def test_shared(self): i = self.IndexTensor([[2]]) v = self.ValueTensor([5]) @@ -287,7 +283,6 @@ class TestSparse(TestCase): i[0][0] = 0 self.assertEqual(self.ValueTensor(3, 0), self.safeToDense(x)) - @skipIfRocm def test_to_dense_hybrid(self): def test_tensor(x, res): x.to_dense() # Tests double to_dense for memory corruption @@ -332,7 +327,6 @@ class TestSparse(TestCase): res = self.ValueTensor(3, 4, 2, 0) test_tensor(x, res) - @skipIfRocm def test_contig(self): def test_tensor(x, exp_i, exp_v): x = self.safeCoalesce(x) @@ -413,7 +407,6 @@ class TestSparse(TestCase): exp_v = self.ValueTensor(2, 0) test_tensor(x, exp_i, exp_v) - @skipIfRocm def test_contig_hybrid(self): def test_tensor(x, exp_i, exp_v): x = self.safeCoalesce(x) @@ -516,7 +509,6 @@ class TestSparse(TestCase): test_shape(3, 10, [100, 100, 100, 5, 5, 5, 0]) test_shape(3, 0, [0, 0, 100, 5, 5, 5, 0]) - @skipIfRocm def test_Sparse_to_Sparse_copy_(self): # This is for testing torch.copy_(SparseTensor, SparseTensor) sparse_dims = 3 @@ -602,7 +594,6 @@ class TestSparse(TestCase): x = torch.sparse.FloatTensor(2, 3, 4, 0) test_tensor(x) - @skipIfRocm def test_transpose(self): def test_shape(sparse_dims, nnz, with_size): x = self._gen_sparse(sparse_dims, nnz, with_size)[0] @@ -671,7 +662,6 @@ class TestSparse(TestCase): test_in_place(x) test_not_in_place(x) - @skipIfRocm def test_add_zeros(self): def test_shape(sparse_dims, nnz, sizes): x, _, _ = self._gen_sparse(sparse_dims, nnz, sizes) @@ -686,7 +676,6 @@ class TestSparse(TestCase): test_shape(2, 20, [3, 17, 19, 5]) test_shape(2, 20, [3, 17, 19, 0]) - @skipIfRocm def test_cat(self): # shapes: list of tuples (sparse_dims, nnz, sizes) def test_shapes(shapes, dim, fail_message=None): @@ -927,7 +916,6 @@ class TestSparse(TestCase): expected = y + r * self.safeToDense(x_) self.assertEqual(res, expected) - @skipIfRocm def test_spadd(self): self._test_spadd_shape(10, [5, 6]) self._test_spadd_shape(10, [10, 10, 10]) @@ -937,7 +925,6 @@ class TestSparse(TestCase): self._test_spadd_shape(0, [50, 0, 20]) self._test_spadd_shape(0, [50, 30, 0]) - @skipIfRocm def test_spadd_hybrid(self): self._test_spadd_shape(10, [5, 6], [2, 3]) self._test_spadd_shape(10, [10, 10, 10], [3]) @@ -1115,7 +1102,6 @@ class TestSparse(TestCase): self._test_basic_ops_shape(0, 0, [10, 10, 10], [2, 0]) self._test_basic_ops_shape(0, 0, [10, 10, 0], [2, 0]) - @skipIfRocm def test_add_dense_sparse_mismatch(self): def test_shape(dense_size, sparse_dims_shape, dense_dims_shape, sparse_size): x = torch.zeros(dense_size, dtype=self.value_dtype, device=self.device) @@ -1294,7 +1280,6 @@ class TestSparse(TestCase): for length in range(dim_sz - start): yield [dim, start, length] - @skipIfRocm def test_narrow(self): shape = [3, 3, 4, 2] input, _, _ = self._gen_sparse(4, 19, shape) @@ -1443,7 +1428,6 @@ class TestSparse(TestCase): self._test_new_device((30, 20, 10), 1) self._test_new_device((30, 20, 10, 0), 1) - @skipIfRocm def test_new(self): def test_shape(sparse_dims, nnz, with_size): x, indices, values = self._gen_sparse(sparse_dims, nnz, with_size) @@ -1595,7 +1579,6 @@ class TestSparse(TestCase): test_shape([3, 0], [0, 2, 4, 0], [0, 0, 0, 2, 4, 0], [0, 0, 0, 2, 4, 0]) test_shape([3, 0], [0, 2, 4, 0], [1, 2, 3, 2, 4, 0], [1, 2, 3, 2, 4, 0]) - @skipIfRocm def test_factory_dense_dim(self): indices = self.IndexTensor([[0]]) values = self.ValueTensor([[[1, 1, 1], [1, 1, 1]]]) @@ -1750,7 +1733,6 @@ class TestSparse(TestCase): x = self.SparseTensor(1, 0) self.assertTrue(x.is_sparse) - @skipIfRocm def test_resize_as(self): def do_test(t): y = t.new().resize_as_(t).zero_() -- 2.7.4