self.assertEqual(res1, res2)
def test_logical_any(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
x = torch.zeros([2, 3, 400], dtype=torch.uint8, device=device)
self.assertEqual(
self.assertEqual(y, x.any(2, keepdim=True))
def test_logical_all(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
x = torch.ones([2, 3, 400], dtype=torch.uint8, device=device)
self.assertEqual(
('logsumexp', torch.logsumexp, -inf),
]
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
shape = (2, 0, 4)
- for device in devices:
+ for device in torch.testing.get_all_device_types():
x = torch.randn(shape, device=device)
for item in fns_to_test:
self.assertEqual(torch.ones((), device=device), xb.all())
def test_pairwise_distance_empty(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
shape = (2, 0)
x = torch.randn(shape, device=device)
y = torch.randn(shape, device=device)
self.assertEqual(torch.zeros((0, 1), device=device), torch.pairwise_distance(x, y, keepdim=True))
def test_pdist_empty(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
shape = (0, 2)
x = torch.randn(shape, device=device)
self.assertEqual(torch.empty(0, device=device), torch.pdist(x))
self.assertEqual(expected.shape, actual.shape)
self.assertTrue(torch.allclose(expected, actual))
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
for shape in [(4, 5), (3, 2), (2, 1)]:
for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]:
for trans in [False, True]:
test_pdist_single((1000, 2), device, 2, dtype, False)
def test_cdist_empty(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
x = torch.randn((0, 5), device=device)
y = torch.randn((4, 5), device=device)
self.assertEqual(torch.empty(0, 4, device=device), torch.cdist(x, y))
self.assertEqual(torch.empty(2, 0, device=device), torch.cdist(x, y))
def test_cdist_norm(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
for r1 in [3, 4, 5, 6]:
for m in [2, 3, 4, 10]:
for r2 in [4, 6, 7, 8]:
self.assertTrue(torch.allclose(expected, actual))
def test_cdist_large(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
x = torch.randn(1000, 10, device=device)
y = torch.randn(1000, 10, device=device)
actual = torch.cdist(x, y, p=2)
self.assertTrue(torch.allclose(expected, actual))
def test_cdist_non_contiguous(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
x = torch.randn(5, 7, device=device).t()
y = torch.randn(5, 3, device=device).t()
actual = torch.cdist(x, y, p=2)
self._test_neg(self, lambda t: t)
def test_threshold(self):
- for dtype in torch.testing.get_all_dtypes():
+ for dtype in torch.testing.get_all_math_dtypes('cpu'):
if dtype != torch.uint8 and dtype != torch.float16:
# 100 is wide enough to use AVX2 instructions for all types
x = torch.randn(100).sign().to(dtype=dtype)
self.assertEqual(res1, res2)
def test_floordiv(self):
- for dtype in torch.testing.get_all_dtypes():
+ for dtype in torch.testing.get_all_math_dtypes('cpu'):
if dtype is torch.float16:
continue
x = torch.randn(100).mul(10).to(dtype)
self.assertEqual(y, z)
def test_rdiv(self):
- for dtype in torch.testing.get_all_dtypes():
+ for dtype in torch.testing.get_all_math_dtypes('cpu'):
if dtype is torch.float16:
continue
x = torch.rand(100).add(1).mul(4).to(dtype)
# 'out' is favored over dtype, check error
self.assertRaises(RuntimeError, lambda: fn(x, out=out, dtype=other_dtype))
- for dtype in [dtype for dtype in torch.testing.get_all_dtypes() if dtype != torch.float16]:
+ for dtype in [dtype for dtype in torch.testing.get_all_math_dtypes('cpu') if dtype != torch.float16]:
x = torch.ones(shape, dtype=dtype)
expected_dtype = dtype if dtype.is_floating_point else torch.int64
self.assertIs(expected_dtype, fn(x).dtype)
self.assertEqual(b.device, a.to(b, non_blocking=non_blocking).device)
def test_empty_full(self):
- do_test_empty_full(self, torch.testing.get_all_dtypes(), torch.strided, torch.device('cpu'))
+ do_test_empty_full(self, torch.testing.get_all_math_dtypes('cpu'), torch.strided, torch.device('cpu'))
if torch.cuda.device_count() > 0:
- do_test_empty_full(self, torch.testing.get_all_dtypes(), torch.strided, None)
- do_test_empty_full(self, torch.testing.get_all_dtypes(), torch.strided, torch.device('cuda:0'))
+ do_test_empty_full(self, torch.testing.get_all_math_dtypes('cpu'), torch.strided, None)
+ do_test_empty_full(self, torch.testing.get_all_math_dtypes('cpu'), torch.strided, torch.device('cuda:0'))
def test_dtype_out_match(self):
d = torch.autograd.Variable(torch.DoubleTensor(2, 3))
self.assertTrue(x.is_cuda)
torch.set_default_tensor_type(saved_type)
- # This is a temporary test for a boolean tensors on CPU. Once the CUDA part
- # will be done, these test cases will be moved down to test_tensor_factories_empty test
- def test_tensor_factories_bool(self):
- expectedShape = (1, 2)
- test = torch.empty(expectedShape, dtype=torch.bool)
- self.assertEqual(expectedShape, test.shape)
-
- test2 = torch.empty_like(test, dtype=torch.bool)
- self.assertEqual(test.shape, test2.shape)
-
- test = torch.full(expectedShape, True, dtype=torch.bool)
- self.assertEqual(test, torch.tensor([[True, True]], dtype=torch.bool))
-
- test2 = torch.full_like(test, True, dtype=torch.bool)
- self.assertEqual(test, test2)
-
- test = torch.zeros(expectedShape, dtype=torch.bool)
- self.assertEqual(test, torch.tensor([[False, False]], dtype=torch.bool))
-
- test2 = torch.zeros_like(test, dtype=torch.bool)
- self.assertEqual(test, test2)
-
- test = torch.ones(expectedShape, dtype=torch.bool)
- self.assertEqual(test, torch.tensor([[True, True]], dtype=torch.bool))
-
- test2 = torch.ones_like(test, dtype=torch.bool)
- self.assertEqual(test, test2)
+ def test_unfold_all_devices_and_dtypes(self):
+ for device in torch.testing.get_all_device_types():
+ for dt in torch.testing.get_all_dtypes():
+ if dt == torch.half and device == 'cpu':
+ # fix once random is implemented for Half on CPU
+ self.assertRaises(RuntimeError, lambda: torch.randint(5, (0, 1, 3, 0), dtype=dt, device=device))
+ else:
+ x = torch.randint(5, (0, 1, 3, 0), dtype=dt, device=device)
+ self.assertEqual((0, 1, 1, 0, 3), x.unfold(2, 3, 2).shape)
- test = torch.randint(10, expectedShape, dtype=torch.bool)
- self.assertEqual(expectedShape, test.shape)
- self.assertEqual(torch.bool, test.dtype)
+ def test_copy_all_dtypes_and_devices(self):
+ from copy import copy
+ for device in torch.testing.get_all_device_types():
+ for dt in torch.testing.get_all_dtypes():
+ x = torch.tensor([1, 2, 3, 4], dtype=dt, device=device)
+ x_clone = x.clone()
+
+ y = copy(x)
+ y.fill_(1)
+
+ # copy is a shallow copy, only copies the tensor view,
+ # not the data
+ self.assertEqual(x, y)
+
+ def test_resize_all_dtypes_and_devices(self):
+ shape = (2, 2)
+ for device in torch.testing.get_all_device_types():
+ for dt in torch.testing.get_all_dtypes():
+ x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device)
+ x.resize_(shape)
+ self.assertEqual(shape, x.shape)
+
+ def test_fill_all_dtypes_and_devices(self):
+ for device in torch.testing.get_all_device_types():
+ for dt in torch.testing.get_all_dtypes():
+ x = torch.tensor((1, 1), dtype=dt, device=device)
+ x.fill_(1)
+
+ self.assertEqual(x, torch.tensor([1, 1], dtype=dt, device=device))
+ self.assertEqual(dt, x.dtype)
+
+ def test_clone_all_dtypes_and_devices(self):
+ for device in torch.testing.get_all_device_types():
+ for dt in torch.testing.get_all_dtypes():
+ x = torch.tensor((1, 1), dtype=dt, device=device)
+ y = x.clone()
+ self.assertEqual(x, y)
+
+ def test_cat_all_dtypes_and_devices(self):
+ for device in torch.testing.get_all_device_types():
+ for dt in torch.testing.get_all_dtypes():
+ x = torch.tensor([[1, 2], [3, 4]], dtype=dt, device=device)
+ expected1 = torch.tensor([[1, 2], [3, 4], [1, 2], [3, 4]], dtype=dt, device=device)
+ self.assertEqual(torch.cat((x, x), 0), expected1)
+
+ expected2 = torch.tensor([[1, 2, 1, 2], [3, 4, 3, 4]], dtype=dt, device=device)
+ self.assertEqual(torch.cat((x, x), 1), expected2)
def test_tensor_factories_empty(self):
# ensure we can create empty tensors from each factory function
shapes = [(5, 0, 1), (0,), (0, 0, 1, 0, 2, 0, 0)]
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
for shape in shapes:
- self.assertEqual(shape, torch.zeros(shape, device=device).shape)
- self.assertEqual(shape, torch.zeros_like(torch.zeros(shape, device=device)).shape)
- self.assertEqual(shape, torch.empty(shape, device=device).shape)
- self.assertEqual(shape, torch.empty_like(torch.zeros(shape, device=device)).shape)
- self.assertEqual(shape, torch.empty_strided(shape, (0,) * len(shape), device=device).shape)
- self.assertEqual(shape, torch.full(shape, 3, device=device).shape)
- self.assertEqual(shape, torch.full_like(torch.zeros(shape, device=device), 3).shape)
- self.assertEqual(shape, torch.ones(shape, device=device).shape)
- self.assertEqual(shape, torch.ones_like(torch.zeros(shape, device=device)).shape)
- self.assertEqual(shape, torch.rand(shape, device=device).shape)
- self.assertEqual(shape, torch.rand_like(torch.zeros(shape, device=device)).shape)
- self.assertEqual(shape, torch.randn(shape, device=device).shape)
- self.assertEqual(shape, torch.randn_like(torch.zeros(shape, device=device)).shape)
- self.assertEqual(shape, torch.randint(6, shape, device=device).shape)
- self.assertEqual(shape, torch.randint_like(torch.zeros(shape, device=device), 6).shape)
+ for dt in torch.testing.get_all_dtypes():
+ self.assertEqual(shape, torch.zeros(shape, device=device, dtype=dt).shape)
+ self.assertEqual(shape, torch.zeros_like(torch.zeros(shape, device=device, dtype=dt)).shape)
+ self.assertEqual(shape, torch.full(shape, 3, device=device, dtype=dt).shape)
+ self.assertEqual(shape, torch.full_like(torch.zeros(shape, device=device, dtype=dt), 3).shape)
+ self.assertEqual(shape, torch.ones(shape, device=device, dtype=dt).shape)
+ self.assertEqual(shape, torch.ones_like(torch.zeros(shape, device=device, dtype=dt)).shape)
+ self.assertEqual(shape, torch.empty(shape, device=device, dtype=dt).shape)
+ self.assertEqual(shape, torch.empty_like(torch.zeros(shape, device=device, dtype=dt)).shape)
+ self.assertEqual(shape, torch.empty_strided(shape, (0,) * len(shape), device=device, dtype=dt).shape)
+
+ if dt == torch.half and device == "cpu":
+ # update once random is implemented for half on CPU
+ self.assertRaises(RuntimeError, lambda: torch.randint(6, shape, device=device, dtype=dt).shape)
+ else:
+ self.assertEqual(shape, torch.randint(6, shape, device=device, dtype=dt).shape)
+ self.assertEqual(shape, torch.randint_like(torch.zeros(shape, device=device, dtype=dt), 6).shape)
+
+ if dt != torch.double and dt != torch.float and dt != torch.half:
+ self.assertRaises(RuntimeError, lambda: torch.rand(shape, device=device, dtype=dt).shape)
+
+ if dt == torch.double or dt == torch.float:
+ self.assertEqual(shape, torch.randn(shape, device=device, dtype=dt).shape)
+ self.assertEqual(shape, torch.randn_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual((0,), torch.arange(0, device=device).shape)
self.assertEqual((0, 0), torch.eye(0, device=device).shape)
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('inf')))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('inf')))
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(-5, float('nan'), device=device))
# check with step size
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('-inf'), -1, device=device))
def test_empty_tensor_props(self):
sizes = [(0,), (0, 3), (5, 0), (5, 0, 3, 0, 2), (0, 3, 0, 2), (0, 5, 0, 2, 0)]
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
for size in sizes:
- for device in devices:
+ for device in torch.testing.get_all_device_types():
x = torch.empty(tuple(size), device=device)
self.assertEqual(size, x.shape)
self.assertTrue(x.is_contiguous())
@unittest.skipIf(not TEST_NUMPY, 'Numpy not found')
def test_tensordot(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for d in devices:
+ for d in torch.testing.get_all_device_types():
a = torch.arange(60., device=d).reshape(3, 4, 5)
b = torch.arange(24., device=d).reshape(4, 3, 2)
c = torch.tensordot(a, b, dims=([1, 0], [0, 1])).cpu()
self.assertEqual(x.narrow(-2, -1, 1), torch.Tensor([[6, 7, 8]]))
def test_narrow_empty(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
x = torch.randn(2, 3, 4, device=device)
for d in range(x.dim()):
y = x.narrow(d, x.size(d), 0)
@skipIfRocm
def test_linspace(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
_from = random.random()
to = _from + random.random()
res1 = torch.linspace(_from, to, 137, device=device)
check(src.transpose(1, 2), idx)
def test_take_empty(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
for input_shape in [(0,), (0, 1, 2, 0), (1, 2, 3)]:
for indices_shape in [(0,), (0, 1, 2, 0)]:
input = torch.empty(input_shape, device=device)
self.assertEqual(dst.tolist(), [[5, 7], [1, 1]])
def test_put_empty(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
for dst_shape in [(0,), (0, 1, 2, 0), (1, 2, 3)]:
for indices_shape in [(0,), (0, 1, 2, 0)]:
for accumulate in [False, True]:
self.assertRaises(RuntimeError, lambda: x.reshape(0, -1))
def test_tensor_shape_empty(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
x = torch.randn((0, 1, 3, 0), device=device)
# flatten
self.assertEqual((0,), torch.flatten(x, 0, 3).shape)
# select
self.assertEqual((0, 1, 0), torch.select(x, 2, 2).shape)
- # unfold
- self.assertEqual((0, 1, 1, 0, 3), x.unfold(2, 3, 2).shape)
- y = torch.randn((0, 1, 3), device=device)
- self.assertEqual((1, 1, 3, 0), y.unfold(0, 0, 4).shape)
# repeat, permute
self.assertEqual((9, 0, 5, 6, 0), x.repeat(9, 7, 5, 2, 3).shape)
# functions that operate over a dimension but don't reduce.
@skipIfRocm
def test_dim_function_empty(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
shape = (0, 1, 2, 0)
x = torch.randn(shape, device=device)
@skipIfRocm
def test_blas_empty(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
def fn(torchfn, *args):
return torchfn(*tuple(torch.randn(shape, device=device) if isinstance(shape, tuple) else shape
@skipIfRocm
def test_blas_alpha_beta_empty(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
# ensure beta is respected
value = 11
input = torch.full((2,), value, device=device)
# numpy/sci often has a direct wrapper (e.g. lu_factor) and a wrapper that "does the right thing"
# (e.g. lu). We often name our functions identically to the lapack function, so it will take work
# to name / migrate-to better wrappers.
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
# need to init cuda to check has_magma
empty = torch.randn((0, 0), device=device)
"Tensors with no storages should not appear to be set "
"to each other")
+ t1 = torch.tensor([True, True], dtype=torch.bool)
+ t2 = torch.tensor([0], dtype=torch.bool).set_(t1)
+ self.assertTrue(t1.is_set_to(t2))
+
def test_tensor_set(self):
t1 = torch.Tensor()
t2 = torch.Tensor(3, 4, 9, 10).uniform_()
self.assertEqual(t1.size(), size)
self.assertEqual(t1.stride(), stride)
+ t1 = torch.tensor([True, True], dtype=torch.bool)
+ t2 = torch.tensor([False, False], dtype=torch.bool)
+ t1.set_(t2)
+ self.assertEqual(t1.storage()._cdata, t2.storage()._cdata)
+
def test_equal(self):
# Contiguous, 1D
t1 = torch.Tensor((3, 4, 9, 10))
self._test_flip(self, use_cuda=False)
def test_roll(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
numbers = torch.arange(1, 9, device=device)
single_roll = numbers.roll(1, 0)
self.assertNotEqual(tensor[dst1[i, 0], dst1[i, 1], dst1[i, 2]].item(), 0)
def test_nonzero_empty(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
x = torch.randn(0, 2, 0, 5, 0, device=device)
y = torch.nonzero(x)
self.assertEqual(0, y.numel())
self.assertEqual(torch.nn.Parameter, type(s2['weight']))
self.assertEqual(torch.nn.Parameter, type(s2['bias']))
- def test_copy(self):
- from copy import copy
- a = torch.randn(5, 5)
- a_clone = a.clone()
- b = copy(a)
- b.fill_(1)
- # copy is a shallow copy, only copies the tensor view,
- # not the data
- self.assertEqual(a, b)
-
def test_pickle(self):
if sys.version_info[0] == 2:
import cPickle as pickle
self.assertEqual(torch.empty_like(a).type(), a.type())
def test_empty_strided(self):
- devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
- for device in devices:
+ for device in torch.testing.get_all_device_types():
for shape in [(2, 3, 4), (0, 2, 0)]:
# some of these cases are pretty strange, just verifying that if as_strided
# allows them then empty_strided can as well.