// CHECK-DAG: %[[ARG_M:.*]] = bufferization.to_memref %[[ARG]] : memref<*xf32>
// CHECK-DAG: %[[ALLOC:.*]] = memref.alloc(%[[DYNAMIC_EXTENT]]) {{.*}} : memref<?xindex>
// CHECK: %[[ALLOC_T:.*]] = bufferization.to_tensor %[[ALLOC]]
-// CHECK: %[[MAPPED:.*]] = linalg.map outs(%[[ALLOC_T]] : tensor<?xindex>)() {
+// CHECK: %[[MAPPED:.*]] = linalg.map
+// CHECK: outs(%[[ALLOC_T]] : tensor<?xindex>)
// CHECK: %[[INDEX:.*]] = linalg.index 0 : index
// CHECK: %[[ELEM:.*]] = memref.dim %[[ARG_M]], %[[INDEX]] : memref<*xf32>
// CHECK: linalg.yield %[[ELEM]]
// CHECK-SAME: %[[DYNAMIC_EXTENT:.*]]: index) -> tensor<16x?xindex> {
// CHECK: %[[ALLOC:.*]] = memref.alloc(%[[DYNAMIC_EXTENT]]) {{.*}} : memref<16x?xindex>
// CHECK: %[[ALLOC_T:.*]] = bufferization.to_tensor %[[ALLOC]]
-// CHECK: %[[MAPPED:.*]] = linalg.map outs(%[[ALLOC_T]] : tensor<16x?xindex>)() {
+// CHECK: %[[MAPPED:.*]] = linalg.map
+// CHECK: outs(%[[ALLOC_T]] : tensor<16x?xindex>)
// CHECK: %[[INDEX0:.*]] = linalg.index 0
// CHECK: %[[INDEX1:.*]] = linalg.index 1
// CHECK: %[[ADD:.*]] = arith.addi %[[INDEX0]], %[[INDEX1]]
// CHECK-DAG: %[[size1:.*]] = affine.apply #[[$sum_map]]()[%[[dim1]], %[[l2]], %[[h2]]]
// CHECK: %[[alloc:.*]] = memref.alloc(%[[size0]], %[[size1]]) {{.*}} : memref<?x?xindex>
// CHECK: %[[alloc_t:.*]] = bufferization.to_tensor %[[alloc]]
- // CHECK: %[[mapped:.*]] = linalg.map outs(%[[alloc_t]] : tensor<?x?xindex>)() {
+ // CHECK: %[[mapped:.*]] = linalg.map
+ // CHECK: outs(%[[alloc_t]] : tensor<?x?xindex>)
// CHECK: %[[index0:.*]] = linalg.index 0
// CHECK: %[[index1:.*]] = linalg.index 1
// CHECK: %[[mul:.*]] = arith.muli %[[index0]], %[[index1]]