ss << "The shape of the mask " << mask.sizes() << " at index " << maskIdx;
ss << " does not match the shape of the indexed tensor " << self.sizes();
ss << " at index " << idx;
- AT_ERROR(ss.str());
+ AT_INDEX_ERROR(ss.str());
}
static void checkIndexTensorTypes(TensorList indices) {
if (tensor.defined()) {
auto& type = tensor.type();
auto scalarType = type.scalarType();
- AT_CHECK(scalarType == kLong || scalarType == kByte,
- "tensors used as indices must be long or byte tensors");
+ if (scalarType != kLong && scalarType != kByte) {
+ AT_INDEX_ERROR("tensors used as indices must be long or byte tensors");
+ }
}
}
}
// restride_src with an unhelpful error message.
if (std::find(indexed_sizes.begin(), indexed_sizes.end(), 0) != indexed_sizes.end() &&
std::find(replacement_shape.begin(), replacement_shape.end(), 0) == replacement_shape.end()) {
- AT_ERROR("index is out of bounds for dim with size 0");
+ AT_INDEX_ERROR("index is out of bounds for dimension with size 0");
}
this->dims_before = dims_before;
try {
indices = expand_outplace(indices);
} catch (std::exception& e) {
- AT_ERROR("shape mismatch: indexing tensors could not be broadcast together"
- " with shapes ", shapes_as_str(indices));
+ AT_INDEX_ERROR("shape mismatch: indexing tensors could not be broadcast together"
+ " with shapes ", shapes_as_str(indices));
}
// add missing null Tensors so that it matches self.dim()
while (indices.size() < (size_t)self.dim()) {
Tensor select(const Tensor& self, int64_t dim, int64_t index) {
int64_t ndim = self.dim();
- AT_CHECK(ndim > 0, "select() cannot be applied to a 0-dim tensor.");
+ if (ndim == 0) {
+ AT_INDEX_ERROR("select() cannot be applied to a 0-dim tensor.");
+ }
dim = maybe_wrap_dim(dim, ndim);
auto size = self.size(dim);
- AT_CHECK(index >= -size && index < size,
- "select(): index ", index, " out of range for tensor of size ",
- self.sizes(), " at dimension ", dim);
+ if (index < -size || index >= size) {
+ AT_INDEX_ERROR("select(): index ", index, " out of range for tensor of size ",
+ self.sizes(), " at dimension ", dim);
+ }
if (index < 0) {
index += size;
}
Tensor slice(const Tensor& self, int64_t dim, int64_t start, int64_t end, int64_t step) {
int64_t ndim = self.dim();
- AT_CHECK(ndim > 0, "slice() cannot be applied to a 0-dim tensor.");
+ if (ndim == 0) {
+ AT_INDEX_ERROR("slice() cannot be applied to a 0-dim tensor.");
+ }
dim = maybe_wrap_dim(dim, ndim);
auto sizes = self.sizes().vec();
auto strides = self.strides().vec();
x = torch.empty(10, 0)
self.assertEqual(x[[1, 2]].shape, (2, 0))
self.assertEqual(x[[], []].shape, (0,))
- with self.assertRaisesRegex(RuntimeError, 'for dim with size 0'):
+ with self.assertRaisesRegex(IndexError, 'for dimension with size 0'):
x[:, [0, 1]]
def test_empty_ndim_index_bool(self):
a[...] = neg_ones_expanded * 4
self.assertEqual(a, neg_ones * 4)
if a.dim() == 0:
- with self.assertRaises(RuntimeError):
+ with self.assertRaises(IndexError):
a[:] = neg_ones_expanded * 5
def test_setitem_expansion_error(self):
a = torch.randn(2, 3)
# check prefix with non-1s doesn't work
a_expanded = a.expand(torch.Size([5, 1]) + a.size())
+ # NumPy: ValueError
with self.assertRaises(RuntimeError):
a[True] = a_expanded
with self.assertRaises(RuntimeError):
# scalar indexed with scalar
r = torch.randn(())
- with self.assertRaises(RuntimeError):
+ with self.assertRaises(IndexError):
r[:]
with self.assertRaises(IndexError):
r[zero]
# scalar indexed with scalars
r = torch.randn(())
- with self.assertRaises(RuntimeError):
+ with self.assertRaises(IndexError):
r[:] = 8.8
with self.assertRaises(IndexError):
r[zero] = 8.8
self.assertEqual(a[[]], torch.tensor([], dtype=torch.long))
b = tensor([]).float()
- self.assertRaises(RuntimeError, lambda: a[b])
+ self.assertRaises(IndexError, lambda: a[b])
def test_ellipsis_index(self):
a = tensor([[1, 2, 3],
def test_boolean_shape_mismatch(self):
arr = torch.ones((5, 4, 3))
- # TODO: prefer IndexError
index = tensor([True])
- self.assertRaisesRegex(RuntimeError, 'mask', lambda: arr[index])
+ self.assertRaisesRegex(IndexError, 'mask', lambda: arr[index])
index = tensor([False] * 6)
- self.assertRaisesRegex(RuntimeError, 'mask', lambda: arr[index])
+ self.assertRaisesRegex(IndexError, 'mask', lambda: arr[index])
index = torch.ByteTensor(4, 4).zero_()
- self.assertRaisesRegex(RuntimeError, 'mask', lambda: arr[index])
+ self.assertRaisesRegex(IndexError, 'mask', lambda: arr[index])
- self.assertRaisesRegex(RuntimeError, 'mask', lambda: arr[(slice(None), index)])
+ self.assertRaisesRegex(IndexError, 'mask', lambda: arr[(slice(None), index)])
def test_boolean_indexing_onedim(self):
# Indexing a 2-dimensional array with
a = torch.ones((2, 3, 4))
self.assertEqual((0, 2, 3, 4), a[False, True, ...].shape)
self.assertEqual(torch.ones(1, 2), a[True, [0, 1], True, True, [1], [[2]]])
- self.assertRaises(RuntimeError, lambda: a[False, [0, 1], ...])
+ self.assertRaises(IndexError, lambda: a[False, [0, 1], ...])
def test_boolean_indexing_weirdness_tensors(self):
# Weird boolean indexing things
a = torch.ones((2, 3, 4))
self.assertEqual((0, 2, 3, 4), a[False, True, ...].shape)
self.assertEqual(torch.ones(1, 2), a[true, [0, 1], true, true, [1], [[2]]])
- self.assertRaises(RuntimeError, lambda: a[false, [0, 1], ...])
+ self.assertRaises(IndexError, lambda: a[false, [0, 1], ...])
def test_boolean_indexing_alldims(self):
true = torch.tensor(True)
def test_broaderrors_indexing(self):
a = torch.zeros(5, 5)
- self.assertRaisesRegex(RuntimeError, 'shape mismatch', a.__getitem__, ([0, 1], [0, 1, 2]))
- self.assertRaisesRegex(RuntimeError, 'shape mismatch', a.__setitem__, ([0, 1], [0, 1, 2]), 0)
+ self.assertRaisesRegex(IndexError, 'shape mismatch', a.__getitem__, ([0, 1], [0, 1, 2]))
+ self.assertRaisesRegex(IndexError, 'shape mismatch', a.__setitem__, ([0, 1], [0, 1, 2]), 0)
def test_trivial_fancy_out_of_bounds(self):
a = torch.zeros(5)
if a.is_cuda:
raise unittest.SkipTest('CUDA asserts instead of raising an exception')
ind[-1] = 10
- self.assertRaises(RuntimeError, a.__getitem__, ind)
- self.assertRaises(RuntimeError, a.__setitem__, ind, 0)
+ self.assertRaises(IndexError, a.__getitem__, ind)
+ self.assertRaises(IndexError, a.__setitem__, ind, 0)
ind = torch.ones(20, dtype=torch.int64)
ind[0] = 11
- self.assertRaises(RuntimeError, a.__getitem__, ind)
- self.assertRaises(RuntimeError, a.__setitem__, ind, 0)
+ self.assertRaises(IndexError, a.__getitem__, ind)
+ self.assertRaises(IndexError, a.__setitem__, ind, 0)
def test_index_is_larger(self):
# Simple case of fancy index broadcasting of the index.