@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
- @skipIfRocm
def test_ge_cuda(self):
self.run_ge_tests(True, True)
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
@unittest.skipIf(not RUN_CUDA, "calls .cuda()")
- @skipIfRocm
def test_traced_module_cuda(self):
class Model(nn.Module):
def __init__(self, num_features, num_layers):
self.assertEqual(b.device, s(b, "t.to(dtype=torch.int32)").device)
@unittest.skipIf(not RUN_CUDA, "No CUDA")
- @skipIfRocm
def test_tensor_number_math_cuda(self):
self._test_tensor_number_math(device='cuda')
self._test_dcgan_models(self, device='cpu')
@unittest.skipIf(not RUN_CUDA, "no CUDA")
- @skipIfRocm
def test_dcgan_models_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
self._test_dcgan_models(self, device='cuda', check_export_import=False)
self._test_mnist(self, device='cpu')
@unittest.skipIf(not RUN_CUDA, "no CUDA")
- @skipIfRocm
def test_mnist_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
self._test_mnist(self, device='cuda', check_export_import=False)
@unittest.skipIf(not RUN_CUDA, "no CUDA")
- @skipIfRocm
def test_mnist_training_leaks_no_memory_cuda(self):
net = MnistNet().cuda()
# MnistNet uses dropout, don't check its trace
self.checkTrace(SNLIClassifier(Config()).to(device), (premise, hypothesis),
inputs_require_grads=False, export_import=check_export_import)
- @skipIfRocm
def test_snli(self):
self._test_snli(self, device='cpu')
if not TEST_WITH_UBSAN and torch.fbgemm_is_cpu_supported():
- @skipIfRocm
def test_snli_quantized(self):
self._test_snli(self, device='cpu', quantized=True)
- @skipIfRocm
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_snli_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
self.checkTrace(net, (torch.rand(5, 1, 32, 32, device=device),),
export_import=check_export_import)
- @skipIfRocm
def test_super_resolution(self):
self._test_super_resolution(self, device='cpu')
- @skipIfRocm
@unittest.skipIf(not RUN_CUDA, 'no CUDA')
def test_super_resolution_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
x = torch.ones(3)
torch.onnx._export(foo, (x,), f, example_outputs=(x, x))
- @skipIfRocm
@skipIfNoLapack
def test_aten_fallback(self):
class ModelWithAtenNotONNXOp(nn.Module):
# torch.fmod is using to test ONNX_ATEN.
# If you plan to remove fmod from aten, or found this test failed.
# please contact @Rui.
- @skipIfRocm
def test_onnx_aten(self):
class ModelWithAtenFmod(nn.Module):
def forward(self, x, y):
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
@unittest.skipIf(not RUN_CUDA, "fuser requires CUDA")
- @skipIfRocm
def test_arg_configurations_smoke_cuda(self):
# A smoke test to make sure we won't use the same kernel for contiguous
# and non-contiguous arguments.
self.checkScript(fn, [tensor])
@unittest.skipIf(IS_WINDOWS or IS_SANDCASTLE, "NYI: fuser support for Windows or Sandcastle")
- @skipIfRocm
@enable_cpu_fuser
def test_chunk_correctness(self):
return self._test_chunk_correctness(self, 'cpu')
@unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows")
@unittest.skipIf(not RUN_CUDA, "No CUDA")
- @skipIfRocm
def test_chunk_correctness_cuda(self):
return self._test_chunk_correctness(self, 'cuda')