namespace at {
std::vector<int64_t> infer_size(IntArrayRef a, IntArrayRef b) {
- auto dimsA = a.size();
- auto dimsB = b.size();
- ptrdiff_t ndim = dimsA > dimsB ? dimsA : dimsB;
+ size_t dimsA = a.size();
+ size_t dimsB = b.size();
+ size_t ndim = dimsA > dimsB ? dimsA : dimsB;
std::vector<int64_t> expandedSizes(ndim);
- for (long i = ndim - 1; i >= 0; --i) {
- long offset = ndim - 1 - i;
- long dimA = dimsA - 1 - offset;
- long dimB = dimsB - 1 - offset;
- long sizeA = (dimA >= 0) ? a[dimA] : 1;
- long sizeB = (dimB >= 0) ? b[dimB] : 1;
+ // Use ptrdiff_t to ensure signed comparison.
+ for (ptrdiff_t i = (ptrdiff_t)ndim - 1; i >= 0; --i) {
+ ptrdiff_t offset = ndim - 1 - i;
+ ptrdiff_t dimA = dimsA - 1 - offset;
+ ptrdiff_t dimB = dimsB - 1 - offset;
+ int64_t sizeA = (dimA >= 0) ? a[dimA] : 1;
+ int64_t sizeB = (dimB >= 0) ? b[dimB] : 1;
AT_CHECK(
sizeA == sizeB || sizeA == 1 || sizeB == 1,
// True if `shape` can be broadcasted to `desired`
static inline bool is_expandable_to(IntArrayRef shape, IntArrayRef desired) {
- int ndim = shape.size();
- int target_dim = desired.size();
+ size_t ndim = shape.size();
+ size_t target_dim = desired.size();
if (ndim > target_dim) {
return false;
}
- for (int i = 0; i < ndim; i++) {
+ for (size_t i = 0; i < ndim; i++) {
int64_t size = shape[ndim - i - 1];
int64_t target = desired[target_dim - i - 1];
if (size != target && size != 1) {
bool shrinking_dense_dim = false;
auto sparse_size_original = sizes().slice(0, sparse_dim);
auto sparse_size_new = size.slice(0, sparse_dim);
- for (int i = 0; i < sparse_dim; i++) {
+ for (int64_t i = 0; i < sparse_dim; i++) {
if (sparse_size_new[i] < sparse_size_original[i]) {
shrinking_sparse_dims = true;
break;
}
auto dense_size_original = sizes().slice(sparse_dim);
auto dense_size_new = size.slice(sparse_dim);
- for (int i = 0; i < dense_dim; i++) {
+ for (int64_t i = 0; i < dense_dim; i++) {
if (dense_size_new[i] < dense_size_original[i]) {
shrinking_dense_dim = true;
break;
out << ")";
} else if (auto value = t.cast<DimensionedTensorType>()) {
out << toString(value->scalarType()) << "(";
- for (int i = 0; i < value->dim(); ++i) {
+ for (int64_t i = 0; i < value->dim(); ++i) {
if (i > 0) {
out << ", ";
}
AdvancedIndex::AdvancedIndex(const Tensor& src, TensorList indices_list)
{
int64_t element_size_bytes = src.type().elementSizeInBytes();
- int dims_before = 0, dims_after = 0, dims_indexed = 0;
+ int64_t dims_before = 0, dims_after = 0, dims_indexed = 0;
IntArrayRef replacement_shape;
for (size_t dim = 0; dim < indices_list.size(); dim++) {
if (!indices_list[dim].defined()) {
using DimMask = TensorIterator::DimMask;
-static DimMask make_dim_mask(IntArrayRef dims, int ndim) {
+static DimMask make_dim_mask(IntArrayRef dims, int64_t ndim) {
auto mask = DimMask();
if (dims.empty()) {
mask.flip();
} else {
- for (int dim : dims) {
+ for (int64_t dim : dims) {
mask.set(maybe_wrap_dim(dim, ndim));
}
}
" and ",
toString(dtype),
".");
- int ndim = self.dim();
+ int64_t ndim = self.dim();
auto mask = make_dim_mask(dim, ndim);
allocate_reduction_result(result, self, mask, keepdim, dtype);
auto viewed_result = review_reduce_result(result, ndim, mask, keepdim);
// returns 1 if the dim0 should come after dim1, -1 if dim0 should come
// before dim1, and 0 if the comparison is ambiguous.
- auto should_swap = [&](int dim0, int dim1) {
+ auto should_swap = [&](size_t dim0, size_t dim1) {
int ret = 0;
for (int arg = 0; arg < ntensors(); arg++) {
if (operands_[arg].stride_bytes.empty()) {
continue;
}
- int stride0 = operands_[arg].stride_bytes[dim0];
- int stride1 = operands_[arg].stride_bytes[dim1];
+ int64_t stride0 = operands_[arg].stride_bytes[dim0];
+ int64_t stride1 = operands_[arg].stride_bytes[dim1];
if (operands_[arg].is_output) {
// move reduced dimensions to the front
if ((stride0 == 0) != (stride1 == 0)) {