From: Edward Yang Date: Fri, 22 Mar 2019 14:43:40 +0000 (-0700) Subject: Correctly call superclass setUp in TestCase subclasses. (#18291) X-Git-Tag: accepted/tizen/6.5/unified/20211028.231830~686 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=2934153f3508ac1710f358677d83de72789bb275;p=platform%2Fupstream%2Fpytorch.git Correctly call superclass setUp in TestCase subclasses. (#18291) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/18291 ghimport-source-id: d6e95e899bd320407967df41435801e54864ba62 Stack from [ghstack](https://github.com/ezyang/ghstack): * #18292 Add test for #17271 (torch.exp incorrect for 2**31 size tensor) * **#18291 Correctly call superclass setUp in TestCase subclasses.** This makes PYTORCH_TEST_SKIP_FAST work correctly for more tests, reducing the wasted testing effort on our slow_test job. Signed-off-by: Edward Z. Yang Differential Revision: D14567643 fbshipit-source-id: 40cf1d6556e0dd0a0550ff3d9ffed8b6000f8191 --- diff --git a/test/test_c10d.py b/test/test_c10d.py index 04b6a5f..4e4a2e5 100644 --- a/test/test_c10d.py +++ b/test/test_c10d.py @@ -170,11 +170,9 @@ class StoreTestBase(object): class FileStoreTest(TestCase, StoreTestBase): def setUp(self): + super(FileStoreTest, self).setUp() self.file = tempfile.NamedTemporaryFile(delete=False) - def tearDown(self): - pass - def _create_store(self): store = c10d.FileStore(self.file.name, 1) store.set_timeout(timedelta(seconds=300)) @@ -183,14 +181,12 @@ class FileStoreTest(TestCase, StoreTestBase): class PrefixFileStoreTest(TestCase, StoreTestBase): def setUp(self): + super(PrefixFileStoreTest, self).setUp() self.file = tempfile.NamedTemporaryFile(delete=False) self.filestore = c10d.FileStore(self.file.name, 1) self.prefix = "test_prefix" self.filestore.set_timeout(timedelta(seconds=300)) - def tearDown(self): - pass - def _create_store(self): return c10d.PrefixStore(self.prefix, self.filestore) @@ -232,6 +228,7 @@ class TCPStoreTest(TestCase, StoreTestBase): class PrefixTCPStoreTest(TestCase, StoreTestBase): def setUp(self): + super(PrefixTCPStoreTest, self).setUp() self.tcpstore = create_tcp_store('localhost') self.prefix = "test_prefix" self.tcpstore.set_timeout(timedelta(seconds=300)) @@ -449,11 +446,13 @@ class MultiProcessTestCase(TestCase): setattr(cls, attr, cls.join_or_run(fn)) def setUp(self): + super(MultiProcessTestCase, self).setUp() self.rank = self.MAIN_PROCESS_RANK self.file = tempfile.NamedTemporaryFile(delete=False) self.processes = [self._spawn_process(rank) for rank in range(int(self.world_size))] def tearDown(self): + super(MultiProcessTestCase, self).tearDown() for p in self.processes: p.terminate() diff --git a/test/test_dataloader.py b/test/test_dataloader.py index 96b2a6a..0de04f6 100644 --- a/test/test_dataloader.py +++ b/test/test_dataloader.py @@ -411,6 +411,7 @@ def init_fn(worker_id): class TestDataLoader(TestCase): def setUp(self): + super(TestDataLoader, self).setUp() self.data = torch.randn(100, 2, 3, 5) self.labels = torch.randperm(50).repeat(2) self.dataset = TensorDataset(self.data, self.labels) @@ -926,6 +927,7 @@ class StringDataset(Dataset): class TestStringDataLoader(TestCase): def setUp(self): + super(TestStringDataLoader, self).setUp() self.dataset = StringDataset() @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") @@ -951,6 +953,7 @@ class DictDataset(Dataset): class TestDictDataLoader(TestCase): def setUp(self): + super(TestDictDataLoader, self).setUp() self.dataset = DictDataset() def test_sequential_batch(self): @@ -994,6 +997,7 @@ class NamedTupleDataset(Dataset): class TestNamedTupleDataLoader(TestCase): def setUp(self): + super(TestNamedTupleDataLoader, self).setUp() self.dataset = NamedTupleDataset() @unittest.skipIf(not TEST_CUDA, "CUDA unavailable") @@ -1039,6 +1043,7 @@ def collate_into_packed_sequence_batch_first(batch): class TestCustomPinFn(TestCase): def setUp(self): + super(TestCustomPinFn, self).setUp() inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5) tgts = torch.arange(10 * 5, dtype=torch.float32).view(10, 5) self.dataset = TensorDataset(inps, tgts) @@ -1091,6 +1096,7 @@ class TestWorkerQueueDataset(Dataset): class TestIndividualWorkerQueue(TestCase): def setUp(self): + super(TestIndividualWorkerQueue, self).setUp() self.dataset = TestWorkerQueueDataset([i for i in range(128)]) def _run_ind_worker_queue_test(self, batch_size, num_workers): diff --git a/test/test_distributed.py b/test/test_distributed.py index 152499d..879b927 100644 --- a/test/test_distributed.py +++ b/test/test_distributed.py @@ -1461,6 +1461,7 @@ if BACKEND == "gloo" or BACKEND == "nccl": setattr(cls, attr, cls.manager_join(fn)) def setUp(self): + super(TestDistBackend, self).setUp() # Adding this hack until we fix the FileStore to delete its # content at the end global INIT_METHOD @@ -1475,6 +1476,7 @@ if BACKEND == "gloo" or BACKEND == "nccl": self.processes.append(self._spawn_process(rank)) def tearDown(self): + super(TestDistBackend, self).tearDown() for p in self.processes: p.terminate() diff --git a/test/test_distributions.py b/test/test_distributions.py index b854e6b..75ca520 100644 --- a/test/test_distributions.py +++ b/test/test_distributions.py @@ -2694,14 +2694,14 @@ class TestRsample(TestCase): class TestDistributionShapes(TestCase): def setUp(self): - super(TestCase, self).setUp() + super(TestDistributionShapes, self).setUp() self.scalar_sample = 1 self.tensor_sample_1 = torch.ones(3, 2) self.tensor_sample_2 = torch.ones(3, 2, 3) Distribution.set_default_validate_args(True) def tearDown(self): - super(TestCase, self).tearDown() + super(TestDistributionShapes, self).tearDown() Distribution.set_default_validate_args(False) def test_entropy_shape(self): @@ -3082,6 +3082,7 @@ class TestDistributionShapes(TestCase): class TestKL(TestCase): def setUp(self): + super(TestKL, self).setUp() class Binomial30(Binomial): def __init__(self, probs): @@ -3596,6 +3597,7 @@ class TestNumericalStability(TestCase): class TestLazyLogitsInitialization(TestCase): def setUp(self): + super(TestLazyLogitsInitialization, self).setUp() self.examples = [e for e in EXAMPLES if e.Dist in (Categorical, OneHotCategorical, Bernoulli, Binomial, Multinomial)] @@ -3638,7 +3640,7 @@ class TestLazyLogitsInitialization(TestCase): @unittest.skipIf(not TEST_NUMPY, "NumPy not found") class TestAgainstScipy(TestCase): def setUp(self): - set_rng_seed(0) + super(TestAgainstScipy, self).setUp() positive_var = torch.randn(20).exp() positive_var2 = torch.randn(20).exp() random_var = torch.randn(20) @@ -3789,6 +3791,7 @@ class TestAgainstScipy(TestCase): class TestTransforms(TestCase): def setUp(self): + super(TestTransforms, self).setUp() self.transforms = [] transforms_by_cache_size = {} for cache_size in [0, 1]: @@ -4197,7 +4200,7 @@ class TestValidation(TestCase): raise AssertionError(fail_string.format(Dist.__name__, i + 1, len(params))) def tearDown(self): - super(TestCase, self).tearDown() + super(TestValidation, self).tearDown() Distribution.set_default_validate_args(False) diff --git a/test/test_jit.py b/test/test_jit.py index 0a194bc..fad5307 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -256,6 +256,7 @@ class JitTestCase(TestCase): _restored_warnings = False def setUp(self): + super(JitTestCase, self).setUp() # unittest overrides all warning filters and forces all of them to show up # after we install our own to silence those coming from inside PyTorch. # This will ensure that our filter still takes precedence. @@ -265,6 +266,7 @@ class JitTestCase(TestCase): torch._C._jit_set_emit_module_hook(self.emitModuleHook) def tearDown(self): + super(JitTestCase, self).tearDown() # needs to be cleared because python might be unloaded before # the callback gets destucted torch._C._jit_set_emit_module_hook(None) diff --git a/test/test_optim.py b/test/test_optim.py index cc833af..82fee18 100644 --- a/test/test_optim.py +++ b/test/test_optim.py @@ -509,6 +509,7 @@ class LegacyCosineAnnealingLR(CosineAnnealingLR): class TestLRScheduler(TestCase): def setUp(self): + super(TestLRScheduler, self).setUp() self.net = SchedulerTestNet() self.opt = SGD( [{'params': self.net.conv1.parameters()}, {'params': self.net.conv2.parameters(), 'lr': 0.5}], diff --git a/test/test_thd_distributed.py b/test/test_thd_distributed.py index 0caf521..9c3c500 100644 --- a/test/test_thd_distributed.py +++ b/test/test_thd_distributed.py @@ -1083,6 +1083,7 @@ if BACKEND == "tcp" or BACKEND == "gloo" or BACKEND == "nccl": setattr(cls, attr, cls.manager_join(fn)) def setUp(self): + super(TestDistBackend, self).setUp() self.processes = [] self.rank = self.MANAGER_PROCESS_RANK Barrier.init() @@ -1090,6 +1091,7 @@ if BACKEND == "tcp" or BACKEND == "gloo" or BACKEND == "nccl": self.processes.append(self._spawn_process(rank)) def tearDown(self): + super(TestDistBackend, self).tearDown() for p in self.processes: p.terminate()