if (attr.cast<IntegerAttr>().getInt() == 1)
unusedDims.set(dim.index());
+ // Early exit for the case where the number of unused dims matches the number
+ // of ranks reduced.
+ if (unusedDims.count() + reducedType.getRank() == originalType.getRank())
+ return unusedDims;
+
SmallVector<int64_t> originalStrides, candidateStrides;
int64_t originalOffset, candidateOffset;
if (failed(
%1 = memref.reinterpret_cast %0 to offset: [0], sizes: [%size2], strides: [1] : memref<?xi8> to memref<?xi8>
return %1 : memref<?xi8>
}
+
+// -----
+
+func @canonicalize_rank_reduced_subview(%arg0 : memref<8x?xf32>,
+ %arg1 : index) -> memref<?xf32, offset : ?, strides : [?]> {
+ %c0 = arith.constant 0 : index
+ %c1 = arith.constant 1 : index
+ %0 = memref.subview %arg0[%c0, %c0] [1, %arg1] [%c1, %c1] : memref<8x?xf32> to memref<?xf32, offset : ?, strides : [?]>
+ return %0 : memref<?xf32, offset : ?, strides : [?]>
+}
+// CHECK-DAG: #[[MAP:.+]] = affine_map<(d0)[s0] -> (d0 + s0)>
+// CHECK: func @canonicalize_rank_reduced_subview
+// CHECK-SAME: %[[ARG0:.+]]: memref<8x?xf32>
+// CHECK-SAME: %[[ARG1:.+]]: index
+// CHECK: %[[SUBVIEW:.+]] = memref.subview %[[ARG0]][0, 0] [1, %[[ARG1]]] [1, 1]
+// CHECK-SAME: memref<8x?xf32> to memref<?xf32, #[[MAP]]>
// -----
-func @static_stride_to_dynamic_stride(%arg0 : memref<?x?x?xf32>, %arg1 : index,
- %arg2 : index) -> memref<?x?xf32, offset:?, strides: [?, ?]> {
- // expected-error @+1 {{expected result type to be 'memref<1x?x?xf32, affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)>>' or a rank-reduced version. (mismatch of result layout)}}
- %0 = memref.subview %arg0[0, 0, 0] [1, %arg1, %arg2] [1, 1, 1] : memref<?x?x?xf32> to memref<?x?xf32, offset: ?, strides: [?, ?]>
- return %0 : memref<?x?xf32, offset: ?, strides: [?, ?]>
-}
-
-// -----
-
#map0 = affine_map<(d0, d1)[s0] -> (d0 * 16 + d1)>
func @subview_bad_offset_1(%arg0: memref<16x16xf32>) {