class FileStoreTest(TestCase, StoreTestBase):
def setUp(self):
+ super(FileStoreTest, self).setUp()
self.file = tempfile.NamedTemporaryFile(delete=False)
- def tearDown(self):
- pass
-
def _create_store(self):
store = c10d.FileStore(self.file.name, 1)
store.set_timeout(timedelta(seconds=300))
class PrefixFileStoreTest(TestCase, StoreTestBase):
def setUp(self):
+ super(PrefixFileStoreTest, self).setUp()
self.file = tempfile.NamedTemporaryFile(delete=False)
self.filestore = c10d.FileStore(self.file.name, 1)
self.prefix = "test_prefix"
self.filestore.set_timeout(timedelta(seconds=300))
- def tearDown(self):
- pass
-
def _create_store(self):
return c10d.PrefixStore(self.prefix, self.filestore)
class PrefixTCPStoreTest(TestCase, StoreTestBase):
def setUp(self):
+ super(PrefixTCPStoreTest, self).setUp()
self.tcpstore = create_tcp_store('localhost')
self.prefix = "test_prefix"
self.tcpstore.set_timeout(timedelta(seconds=300))
setattr(cls, attr, cls.join_or_run(fn))
def setUp(self):
+ super(MultiProcessTestCase, self).setUp()
self.rank = self.MAIN_PROCESS_RANK
self.file = tempfile.NamedTemporaryFile(delete=False)
self.processes = [self._spawn_process(rank) for rank in range(int(self.world_size))]
def tearDown(self):
+ super(MultiProcessTestCase, self).tearDown()
for p in self.processes:
p.terminate()
class TestDataLoader(TestCase):
def setUp(self):
+ super(TestDataLoader, self).setUp()
self.data = torch.randn(100, 2, 3, 5)
self.labels = torch.randperm(50).repeat(2)
self.dataset = TensorDataset(self.data, self.labels)
class TestStringDataLoader(TestCase):
def setUp(self):
+ super(TestStringDataLoader, self).setUp()
self.dataset = StringDataset()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
class TestDictDataLoader(TestCase):
def setUp(self):
+ super(TestDictDataLoader, self).setUp()
self.dataset = DictDataset()
def test_sequential_batch(self):
class TestNamedTupleDataLoader(TestCase):
def setUp(self):
+ super(TestNamedTupleDataLoader, self).setUp()
self.dataset = NamedTupleDataset()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
class TestCustomPinFn(TestCase):
def setUp(self):
+ super(TestCustomPinFn, self).setUp()
inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
tgts = torch.arange(10 * 5, dtype=torch.float32).view(10, 5)
self.dataset = TensorDataset(inps, tgts)
class TestIndividualWorkerQueue(TestCase):
def setUp(self):
+ super(TestIndividualWorkerQueue, self).setUp()
self.dataset = TestWorkerQueueDataset([i for i in range(128)])
def _run_ind_worker_queue_test(self, batch_size, num_workers):
setattr(cls, attr, cls.manager_join(fn))
def setUp(self):
+ super(TestDistBackend, self).setUp()
# Adding this hack until we fix the FileStore to delete its
# content at the end
global INIT_METHOD
self.processes.append(self._spawn_process(rank))
def tearDown(self):
+ super(TestDistBackend, self).tearDown()
for p in self.processes:
p.terminate()
class TestDistributionShapes(TestCase):
def setUp(self):
- super(TestCase, self).setUp()
+ super(TestDistributionShapes, self).setUp()
self.scalar_sample = 1
self.tensor_sample_1 = torch.ones(3, 2)
self.tensor_sample_2 = torch.ones(3, 2, 3)
Distribution.set_default_validate_args(True)
def tearDown(self):
- super(TestCase, self).tearDown()
+ super(TestDistributionShapes, self).tearDown()
Distribution.set_default_validate_args(False)
def test_entropy_shape(self):
class TestKL(TestCase):
def setUp(self):
+ super(TestKL, self).setUp()
class Binomial30(Binomial):
def __init__(self, probs):
class TestLazyLogitsInitialization(TestCase):
def setUp(self):
+ super(TestLazyLogitsInitialization, self).setUp()
self.examples = [e for e in EXAMPLES if e.Dist in
(Categorical, OneHotCategorical, Bernoulli, Binomial, Multinomial)]
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
class TestAgainstScipy(TestCase):
def setUp(self):
- set_rng_seed(0)
+ super(TestAgainstScipy, self).setUp()
positive_var = torch.randn(20).exp()
positive_var2 = torch.randn(20).exp()
random_var = torch.randn(20)
class TestTransforms(TestCase):
def setUp(self):
+ super(TestTransforms, self).setUp()
self.transforms = []
transforms_by_cache_size = {}
for cache_size in [0, 1]:
raise AssertionError(fail_string.format(Dist.__name__, i + 1, len(params)))
def tearDown(self):
- super(TestCase, self).tearDown()
+ super(TestValidation, self).tearDown()
Distribution.set_default_validate_args(False)
_restored_warnings = False
def setUp(self):
+ super(JitTestCase, self).setUp()
# unittest overrides all warning filters and forces all of them to show up
# after we install our own to silence those coming from inside PyTorch.
# This will ensure that our filter still takes precedence.
torch._C._jit_set_emit_module_hook(self.emitModuleHook)
def tearDown(self):
+ super(JitTestCase, self).tearDown()
# needs to be cleared because python might be unloaded before
# the callback gets destucted
torch._C._jit_set_emit_module_hook(None)
class TestLRScheduler(TestCase):
def setUp(self):
+ super(TestLRScheduler, self).setUp()
self.net = SchedulerTestNet()
self.opt = SGD(
[{'params': self.net.conv1.parameters()}, {'params': self.net.conv2.parameters(), 'lr': 0.5}],
setattr(cls, attr, cls.manager_join(fn))
def setUp(self):
+ super(TestDistBackend, self).setUp()
self.processes = []
self.rank = self.MANAGER_PROCESS_RANK
Barrier.init()
self.processes.append(self._spawn_process(rank))
def tearDown(self):
+ super(TestDistBackend, self).tearDown()
for p in self.processes:
p.terminate()