From b6648c1bbc8572e0432e883eb6974a5597391f64 Mon Sep 17 00:00:00 2001 From: Michael Antonov Date: Thu, 7 Feb 2019 12:42:38 -0800 Subject: [PATCH] Update ATen internals to use int64_t for dimension indexing (#16739) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/16739 Some code ATen locations seemed to use int, etc. inclorrectly where either int64_t or size_t was required. Update them to use int64_t for dimension indexing where necessary. Reviewed By: ezyang Differential Revision: D13950124 fbshipit-source-id: aaf1cef783bf3c657aa03490f2616c35c816679f --- aten/src/ATen/ExpandUtils.cpp | 19 ++++++++++--------- aten/src/ATen/ExpandUtils.h | 6 +++--- aten/src/ATen/SparseTensorImpl.h | 4 ++-- aten/src/ATen/core/type.cpp | 2 +- aten/src/ATen/native/Indexing.cpp | 2 +- aten/src/ATen/native/ReduceOps.cpp | 6 +++--- aten/src/ATen/native/TensorIterator.cpp | 6 +++--- 7 files changed, 23 insertions(+), 22 deletions(-) diff --git a/aten/src/ATen/ExpandUtils.cpp b/aten/src/ATen/ExpandUtils.cpp index 81fc414..f0e854c 100644 --- a/aten/src/ATen/ExpandUtils.cpp +++ b/aten/src/ATen/ExpandUtils.cpp @@ -3,17 +3,18 @@ namespace at { std::vector infer_size(IntArrayRef a, IntArrayRef b) { - auto dimsA = a.size(); - auto dimsB = b.size(); - ptrdiff_t ndim = dimsA > dimsB ? dimsA : dimsB; + size_t dimsA = a.size(); + size_t dimsB = b.size(); + size_t ndim = dimsA > dimsB ? dimsA : dimsB; std::vector expandedSizes(ndim); - for (long i = ndim - 1; i >= 0; --i) { - long offset = ndim - 1 - i; - long dimA = dimsA - 1 - offset; - long dimB = dimsB - 1 - offset; - long sizeA = (dimA >= 0) ? a[dimA] : 1; - long sizeB = (dimB >= 0) ? b[dimB] : 1; + // Use ptrdiff_t to ensure signed comparison. + for (ptrdiff_t i = (ptrdiff_t)ndim - 1; i >= 0; --i) { + ptrdiff_t offset = ndim - 1 - i; + ptrdiff_t dimA = dimsA - 1 - offset; + ptrdiff_t dimB = dimsB - 1 - offset; + int64_t sizeA = (dimA >= 0) ? a[dimA] : 1; + int64_t sizeB = (dimB >= 0) ? b[dimB] : 1; AT_CHECK( sizeA == sizeB || sizeA == 1 || sizeB == 1, diff --git a/aten/src/ATen/ExpandUtils.h b/aten/src/ATen/ExpandUtils.h index 7fe76a5..39d2ae5 100644 --- a/aten/src/ATen/ExpandUtils.h +++ b/aten/src/ATen/ExpandUtils.h @@ -159,12 +159,12 @@ static inline Tensor sum_to(Tensor tensor, const IntArrayRef shape) { // True if `shape` can be broadcasted to `desired` static inline bool is_expandable_to(IntArrayRef shape, IntArrayRef desired) { - int ndim = shape.size(); - int target_dim = desired.size(); + size_t ndim = shape.size(); + size_t target_dim = desired.size(); if (ndim > target_dim) { return false; } - for (int i = 0; i < ndim; i++) { + for (size_t i = 0; i < ndim; i++) { int64_t size = shape[ndim - i - 1]; int64_t target = desired[target_dim - i - 1]; if (size != target && size != 1) { diff --git a/aten/src/ATen/SparseTensorImpl.h b/aten/src/ATen/SparseTensorImpl.h index 2ee64ec..9da294f 100644 --- a/aten/src/ATen/SparseTensorImpl.h +++ b/aten/src/ATen/SparseTensorImpl.h @@ -106,7 +106,7 @@ public: bool shrinking_dense_dim = false; auto sparse_size_original = sizes().slice(0, sparse_dim); auto sparse_size_new = size.slice(0, sparse_dim); - for (int i = 0; i < sparse_dim; i++) { + for (int64_t i = 0; i < sparse_dim; i++) { if (sparse_size_new[i] < sparse_size_original[i]) { shrinking_sparse_dims = true; break; @@ -114,7 +114,7 @@ public: } auto dense_size_original = sizes().slice(sparse_dim); auto dense_size_new = size.slice(sparse_dim); - for (int i = 0; i < dense_dim; i++) { + for (int64_t i = 0; i < dense_dim; i++) { if (dense_size_new[i] < dense_size_original[i]) { shrinking_dense_dim = true; break; diff --git a/aten/src/ATen/core/type.cpp b/aten/src/ATen/core/type.cpp index 1b9c1f4..a83dbdd 100644 --- a/aten/src/ATen/core/type.cpp +++ b/aten/src/ATen/core/type.cpp @@ -25,7 +25,7 @@ std::ostream& operator<<(std::ostream & out, const Type & t) { out << ")"; } else if (auto value = t.cast()) { out << toString(value->scalarType()) << "("; - for (int i = 0; i < value->dim(); ++i) { + for (int64_t i = 0; i < value->dim(); ++i) { if (i > 0) { out << ", "; } diff --git a/aten/src/ATen/native/Indexing.cpp b/aten/src/ATen/native/Indexing.cpp index 5e25696..ef5c124 100644 --- a/aten/src/ATen/native/Indexing.cpp +++ b/aten/src/ATen/native/Indexing.cpp @@ -326,7 +326,7 @@ static Tensor reshape_indexer(const Tensor& index, int64_t dims_before, int64_t AdvancedIndex::AdvancedIndex(const Tensor& src, TensorList indices_list) { int64_t element_size_bytes = src.type().elementSizeInBytes(); - int dims_before = 0, dims_after = 0, dims_indexed = 0; + int64_t dims_before = 0, dims_after = 0, dims_indexed = 0; IntArrayRef replacement_shape; for (size_t dim = 0; dim < indices_list.size(); dim++) { if (!indices_list[dim].defined()) { diff --git a/aten/src/ATen/native/ReduceOps.cpp b/aten/src/ATen/native/ReduceOps.cpp index 587f3ad..27c2566 100644 --- a/aten/src/ATen/native/ReduceOps.cpp +++ b/aten/src/ATen/native/ReduceOps.cpp @@ -38,12 +38,12 @@ static inline Tensor integer_upcast(const Tensor& self, optional dty using DimMask = TensorIterator::DimMask; -static DimMask make_dim_mask(IntArrayRef dims, int ndim) { +static DimMask make_dim_mask(IntArrayRef dims, int64_t ndim) { auto mask = DimMask(); if (dims.empty()) { mask.flip(); } else { - for (int dim : dims) { + for (int64_t dim : dims) { mask.set(maybe_wrap_dim(dim, ndim)); } } @@ -98,7 +98,7 @@ static std::unique_ptr make_reduction( " and ", toString(dtype), "."); - int ndim = self.dim(); + int64_t ndim = self.dim(); auto mask = make_dim_mask(dim, ndim); allocate_reduction_result(result, self, mask, keepdim, dtype); auto viewed_result = review_reduce_result(result, ndim, mask, keepdim); diff --git a/aten/src/ATen/native/TensorIterator.cpp b/aten/src/ATen/native/TensorIterator.cpp index 3f81642..29257aa 100644 --- a/aten/src/ATen/native/TensorIterator.cpp +++ b/aten/src/ATen/native/TensorIterator.cpp @@ -22,14 +22,14 @@ void TensorIterator::reorder_dimensions() { // returns 1 if the dim0 should come after dim1, -1 if dim0 should come // before dim1, and 0 if the comparison is ambiguous. - auto should_swap = [&](int dim0, int dim1) { + auto should_swap = [&](size_t dim0, size_t dim1) { int ret = 0; for (int arg = 0; arg < ntensors(); arg++) { if (operands_[arg].stride_bytes.empty()) { continue; } - int stride0 = operands_[arg].stride_bytes[dim0]; - int stride1 = operands_[arg].stride_bytes[dim1]; + int64_t stride0 = operands_[arg].stride_bytes[dim0]; + int64_t stride1 = operands_[arg].stride_bytes[dim1]; if (operands_[arg].is_output) { // move reduced dimensions to the front if ((stride0 == 0) != (stride1 == 0)) { -- 2.7.4