From 3f7ddd269c996d790266f3e5119b4f26b67b8cf1 Mon Sep 17 00:00:00 2001 From: Xiang Gao Date: Fri, 12 Apr 2019 12:34:29 -0700 Subject: [PATCH] Step 2: Rename _unique_dim2_temporary_will_remove_soon to unique_dim (#18649) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/18649 ghimport-source-id: 3411d240a6af5fe299a889667964730184e30645 Differential Revision: D14888292 Pulled By: VitalyFedyunin fbshipit-source-id: 80da83c264598f74ab8decb165da4a1ce2b352bb --- aten/src/ATen/native/Unique.cpp | 2 +- aten/src/ATen/native/cuda/Unique.cu | 2 +- aten/src/ATen/native/native_functions.yaml | 7 ++++--- test/test_torch.py | 15 +++++++++------ torch/functional.py | 3 +++ 5 files changed, 18 insertions(+), 11 deletions(-) diff --git a/aten/src/ATen/native/Unique.cpp b/aten/src/ATen/native/Unique.cpp index 2888097..354072c 100644 --- a/aten/src/ATen/native/Unique.cpp +++ b/aten/src/ATen/native/Unique.cpp @@ -230,7 +230,7 @@ _unique_dim_cpu(const Tensor& self, const int64_t dim, const bool sorted, const } std::tuple -_unique_dim2_cpu(const Tensor& self, const int64_t dim, const bool sorted, const bool return_inverse, const bool return_counts) { +unique_dim_cpu(const Tensor& self, const int64_t dim, const bool sorted, const bool return_inverse, const bool return_counts) { return AT_DISPATCH_ALL_TYPES(self.scalar_type(), "unique_dim", [&] { // The current implementation using `dim` always sorts due to unhashable tensors return _unique_dim_cpu_template(self, dim, false, return_inverse, return_counts); diff --git a/aten/src/ATen/native/cuda/Unique.cu b/aten/src/ATen/native/cuda/Unique.cu index 734fa66..166a0e4 100644 --- a/aten/src/ATen/native/cuda/Unique.cu +++ b/aten/src/ATen/native/cuda/Unique.cu @@ -228,7 +228,7 @@ _unique_dim_cuda(const Tensor& self, const int64_t dim, const bool sorted, const } std::tuple -_unique_dim2_cuda(const Tensor& self, const int64_t dim, const bool sorted, const bool return_inverse, const bool return_counts) { +unique_dim_cuda(const Tensor& self, const int64_t dim, const bool sorted, const bool return_inverse, const bool return_counts) { return AT_DISPATCH_ALL_TYPES(self.scalar_type(), "unique_dim", [&] { return unique_dim_cuda_template(self, dim, false, return_inverse, return_counts); }); diff --git a/aten/src/ATen/native/native_functions.yaml b/aten/src/ATen/native/native_functions.yaml index 47c6d23..a4b6469 100644 --- a/aten/src/ATen/native/native_functions.yaml +++ b/aten/src/ATen/native/native_functions.yaml @@ -1926,11 +1926,12 @@ CPU: _unique2_cpu CUDA: _unique2_cuda -- func: _unique_dim2_temporary_will_remove_soon(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) +- func: unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) + matches_jit_signature: True variants: function dispatch: - CPU: _unique_dim2_cpu - CUDA: _unique_dim2_cuda + CPU: unique_dim_cpu + CUDA: unique_dim_cuda - func: _unsafe_view(Tensor self, int[] size) -> Tensor diff --git a/test/test_torch.py b/test/test_torch.py index 6d7651f..de52cb7 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -10659,6 +10659,9 @@ tensor([[[1., 1., 1., ..., 1., 1., 1.], run_test(torch.device('cuda')) def test_unique_dim(self): + self.assertFalse(hasattr(torch, 'unique_dim')) + torch.unique_dim = torch._C._VariableFunctions.unique_dim + def run_test(dtype=torch.float, device=torch.device('cpu')): x = torch.tensor([[[1., 1.], [0., 1.], @@ -10712,7 +10715,7 @@ tensor([[[1., 1., 1., ..., 1., 1., 1.], self.assertEqual(expected_unique_dim0, x_unique) self.assertEqual(expected_inverse_dim0, x_inverse) - x_unique, _, x_counts = torch._unique_dim2_temporary_will_remove_soon( + x_unique, _, x_counts = torch.unique_dim( x, return_inverse=False, return_counts=True, @@ -10720,7 +10723,7 @@ tensor([[[1., 1., 1., ..., 1., 1., 1.], self.assertEqual(expected_unique_dim0, x_unique) self.assertEqual(expected_counts_dim0, x_counts) - x_unique, x_inverse, x_counts = torch._unique_dim2_temporary_will_remove_soon( + x_unique, x_inverse, x_counts = torch.unique_dim( x, return_inverse=True, return_counts=True, @@ -10740,7 +10743,7 @@ tensor([[[1., 1., 1., ..., 1., 1., 1.], self.assertEqual(expected_unique_dim1, x_unique) self.assertEqual(expected_inverse_dim1, x_inverse) - x_unique, _, x_counts = torch._unique_dim2_temporary_will_remove_soon( + x_unique, _, x_counts = torch.unique_dim( x, return_inverse=False, return_counts=True, @@ -10748,7 +10751,7 @@ tensor([[[1., 1., 1., ..., 1., 1., 1.], self.assertEqual(expected_unique_dim1, x_unique) self.assertEqual(expected_counts_dim1, x_counts) - x_unique, x_inverse, x_counts = torch._unique_dim2_temporary_will_remove_soon( + x_unique, x_inverse, x_counts = torch.unique_dim( x, return_inverse=True, return_counts=True, @@ -10768,7 +10771,7 @@ tensor([[[1., 1., 1., ..., 1., 1., 1.], self.assertEqual(expected_unique_dim2, x_unique) self.assertEqual(expected_inverse_dim2, x_inverse) - x_unique, _, x_counts = torch._unique_dim2_temporary_will_remove_soon( + x_unique, _, x_counts = torch.unique_dim( x, return_inverse=False, return_counts=True, @@ -10776,7 +10779,7 @@ tensor([[[1., 1., 1., ..., 1., 1., 1.], self.assertEqual(expected_unique_dim2, x_unique) self.assertEqual(expected_counts_dim2, x_counts) - x_unique, x_inverse, x_counts = torch._unique_dim2_temporary_will_remove_soon( + x_unique, x_inverse, x_counts = torch.unique_dim( x, return_inverse=True, return_counts=True, diff --git a/torch/functional.py b/torch/functional.py index fd0e47e..5f0f525 100644 --- a/torch/functional.py +++ b/torch/functional.py @@ -387,6 +387,9 @@ def stft(input, n_fft, hop_length=None, win_length=None, window=None, return torch._C._VariableFunctions.stft(input, n_fft, hop_length, win_length, window, normalized, onesided) +del torch.unique_dim + + def unique(input, sorted=True, return_inverse=False, dim=None): r"""Returns the unique scalar elements of the input tensor as a 1-D tensor. -- 2.7.4