auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
+ // we use double precision for (start - end) / step
+ // to compute size_d for consistency across devices.
+ // The problem with using accscalar_t is that accscalar_t might be float32 on gpu for a float32 scalar_t,
+ // but double on cpu for the same,
+ // and the effective output size starts differing on CPU vs GPU because of precision issues, which
+ // we dont want.
+ // the corner-case we do want to take into account is int64_t, which has higher precision than double
+ double size_d;
+ if (std::is_same<scalar_t, int64_t>::value) {
+ size_d = std::ceil(static_cast<double>(end.to<accscalar_t>() - start.to<accscalar_t>())
+ / step.to<accscalar_t>());
+ } else {
+ size_d = std::ceil(static_cast<double>(end.to<double>() - start.to<double>())
+ / step.to<double>());
+ }
+
AT_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
AT_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
AT_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
- double size_d = std::ceil(static_cast<double>(xend - xstart) / xstep);
AT_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()),
"invalid size, possible overflow?");
int64_t size = static_cast<int64_t>(size_d);
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
+ // we use double precision for (start - end) / step
+ // to compute size_d for consistency across devices.
+ // The problem with using accscalar_t is that accscalar_t might be float32 on gpu for a float32 scalar_t,
+ // but double on cpu for the same,
+ // and the effective output size starts differing on CPU vs GPU because of precision issues, which
+ // we dont want.
+ // the corner-case we do want to take into account is int64_t, which has higher precision than double
+ double size_d;
+ if (std::is_same<scalar_t, int64_t>::value) {
+ size_d = std::ceil(static_cast<double>(end.to<accscalar_t>() - start.to<accscalar_t>())
+ / step.to<accscalar_t>());
+ } else {
+ size_d = std::ceil(static_cast<double>(end.to<double>() - start.to<double>())
+ / step.to<double>());
+ }
+
AT_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
AT_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
AT_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
- double size_d = std::ceil(static_cast<double>(xend - xstart) / xstep);
AT_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()),
"invalid size, possible overflow?");
int64_t size = static_cast<int64_t>(size_d);
RuntimeError, "overflow",
lambda: torch.arange(1.175494351e-38, 3.402823466e+38, device=device))
+ # check that it holds a consistent output shape on precision-cornered step sizes
+ d = torch.arange(-4.0, 4.0, 0.01, dtype=torch.float32, device=device)
+ self.assertEqual(d.shape[0], 800)
+
def test_arange_inference(self):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(torch.float32)