From: Kshiteej K Date: Wed, 15 Sep 2021 01:17:53 +0000 (-0700) Subject: [fix] don't expose unique_dim in torch (#63080) X-Git-Tag: accepted/tizen/8.0/unified/20231005.095509~207 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=ff6b475d4a7e35d71eabfbd7bcbcada3a449ddce;p=platform%2Fupstream%2Fpytorch.git [fix] don't expose unique_dim in torch (#63080) Summary: Fixes https://github.com/pytorch/pytorch/issues/62793 This is mostly a quick fix. I think the more correct fix could be updating `unique_dim` to `_unique_dim` which could be BC-breaking for C++ users (� maybe). Maybe something else I am missing. ~~Not sure how to add a test for it.~~ Have tested it locally. We can add a test like following. Tested this locally, it fails currently but passes with the fix. ```python def test_wildcard_import(self): exec('from torch import *') ``` Pull Request resolved: https://github.com/pytorch/pytorch/pull/63080 Reviewed By: gchanan Differential Revision: D30738711 Pulled By: zou3519 fbshipit-source-id: b86d0190e45ba0b49fd2cffdcfd2e3a75cc2a35e --- diff --git a/test/test_torch.py b/test/test_torch.py index d166f96..3c47fd8 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -121,6 +121,9 @@ class AbstractTestCases: def test_dir(self): dir(torch) + def test_wildcard_import(self): + exec('from torch import *') + @wrapDeterministicFlagAPITest def test_deterministic_flag(self): for deterministic in [True, False]: diff --git a/torch/__init__.py b/torch/__init__.py index a910882..5740b7a 100644 --- a/torch/__init__.py +++ b/torch/__init__.py @@ -624,8 +624,14 @@ if TYPE_CHECKING: # PR #43339 for details. from torch._C._VariableFunctions import * # type: ignore[misc] # noqa: F403 +# Ops not to be exposed in `torch` namespace, +# mostly helper ops. +PRIVATE_OPS = ( + 'unique_dim', +) + for name in dir(_C._VariableFunctions): - if name.startswith('__'): + if name.startswith('__') or name in PRIVATE_OPS: continue globals()[name] = getattr(_C._VariableFunctions, name) __all__.append(name) diff --git a/torch/functional.py b/torch/functional.py index 63470cf..a773333 100644 --- a/torch/functional.py +++ b/torch/functional.py @@ -629,9 +629,6 @@ def istft(input: Tensor, n_fft: int, hop_length: Optional[int] = None, normalized, onesided, length, return_complex) -del torch.unique_dim - - if TYPE_CHECKING: # These _impl functions return a variable number of tensors as output with # __torch_function__; tuple unpacking is done already rather than being