From: MaheshRavishankar Date: Fri, 7 May 2021 00:17:29 +0000 (-0700) Subject: [mlir][Linalg] Allow folding to rank-zero tensor when using rank-reducing subtensors. X-Git-Tag: llvmorg-14-init~7371 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=05a89312d812bb5dcec6deca8f1e28a198ce1167;p=platform%2Fupstream%2Fllvm.git [mlir][Linalg] Allow folding to rank-zero tensor when using rank-reducing subtensors. The pattern to convert subtensor ops to their rank-reduced versions (by dropping unit-dims in the result) can also convert to a zero-rank tensor. Handle that case. This also fixes a OOB access bug in the existing pattern for such cases. Differential Revision: https://reviews.llvm.org/D101949 --- diff --git a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp index 47f490c..9c4d8af 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/DropUnitDims.cpp @@ -459,7 +459,10 @@ getReassociationMapForFoldingUnitDims(ArrayRef mixedSizes) { reassociation.emplace_back(ReassociationIndices{}); std::swap(reassociation.back(), curr); } - if (!curr.empty()) + // When the reassociations are not empty, then fold the remaining + // unit-dimensions into the last dimension. If the reassociations so far is + // empty, then leave it emtpy. This will fold everything to a rank-0 tensor. + if (!curr.empty() && !reassociation.empty()) reassociation.back().append(curr.begin(), curr.end()); return reassociation; } diff --git a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir index 2b8855a..808622b 100644 --- a/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir +++ b/mlir/test/Dialect/Linalg/drop-unit-extent-dims.mlir @@ -496,3 +496,27 @@ func @unit_dim_for_reduction_inner(%arg0: tensor) -> tensor) // CHECK: %[[RESULT_RESHAPE:.+]] = linalg.tensor_reshape %[[RESULT]] {{\[}}[0, 1]] // CHECK: return %[[RESULT_RESHAPE]] + +// ----- + +func @subtensor_unit_dims(%arg0: tensor<1x3xf32>) -> tensor<1x1xf32> { + %0 = subtensor %arg0[0, 2] [1, 1] [1, 1] : tensor<1x3xf32> to tensor<1x1xf32> + return %0 : tensor<1x1xf32> +} +// CHECK-LABEL: func @subtensor_unit_dims +// CHECK: %[[SUBTENSOR:.+]] = subtensor +// CHECK-SAME: tensor<1x3xf32> to tensor +// CHECK: %[[RESULT:.+]] = linalg.tensor_reshape %[[SUBTENSOR]] [] +// CHECK: return %[[RESULT]] + +// ----- + +func @subtensor_insert_unit_dims(%arg0: tensor<1x3xf32>, %arg1: tensor<1x1xf32>) -> tensor<1x3xf32> { + %0 = subtensor_insert %arg1 into %arg0[0, 2] [1, 1] [1, 1] : tensor<1x1xf32> into tensor<1x3xf32> + return %0 : tensor<1x3xf32> +} +// CHECK-LABEL: func @subtensor_insert_unit_dims +// CHECK: %[[RESHAPE:.+]] = linalg.tensor_reshape %{{.+}} [] +// CHECK: %[[RESULT:.+]] = subtensor_insert %[[RESHAPE]] +// CHECK-SAME: tensor into tensor<1x3xf32> +// CHECK: return %[[RESULT]]