[mlir][linalg] Add one-shot-bufferize tests for Linalg ops: reduce, map and transpose.
authorOleg Shyshkov <shyshkov@google.com>
Fri, 21 Oct 2022 13:19:52 +0000 (15:19 +0200)
committerOleg Shyshkov <shyshkov@google.com>
Fri, 21 Oct 2022 13:40:02 +0000 (15:40 +0200)
Differential Revision: https://reviews.llvm.org/D136431

mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td
mlir/test/Dialect/Linalg/one-shot-bufferize.mlir

index 9c2246e..1692a0f 100644 (file)
@@ -284,6 +284,11 @@ def MapOp : LinalgStructuredBase_Op<"map", [
       return getInputOperands();
     }
 
+    bool payloadUsesValueFromOperand(OpOperand * opOperand) {
+      if (isOutput(opOperand)) return false;
+      return !getMatchingBlockArgument(opOperand).use_empty();
+    }
+
     static std::function<void(mlir::ImplicitLocOpBuilder &, mlir::Block &,
                               mlir::ArrayRef<mlir::NamedAttribute>)>
     getRegionBuilder() {
index b7be9d4..e71f566 100644 (file)
@@ -335,6 +335,59 @@ func.func @op_is_reading_but_following_ops_are_not(
 
 // -----
 
+// CHECK-LABEL: func @map_binary
+// CHECK-SAME:  %[[LHS:[0-9a-zA-Z]*]]: memref<64xf32
+// CHECK-SAME:  %[[RHS:[0-9a-zA-Z]*]]: memref<64xf32
+func.func @map_binary(%lhs: tensor<64xf32>, %rhs: tensor<64xf32>,
+                      %init: tensor<64xf32>) -> tensor<64xf32> {
+   // CHECK:      linalg.map
+   // CHECK-SAME: ins(%[[LHS]], %[[RHS]] : memref<64xf32
+   %add = linalg.map
+          ins(%lhs, %rhs: tensor<64xf32>, tensor<64xf32>)
+          outs(%init:tensor<64xf32>)
+          (%lhs_elem: f32, %rhs_elem: f32) {
+            %0 = arith.addf %lhs_elem, %rhs_elem: f32
+            linalg.yield %0: f32
+          }
+  func.return %add : tensor<64xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func @reduce
+// CHECK-SAME:  %[[INPUT:.*]]: memref<16x32x64xf32
+func.func @reduce(%input: tensor<16x32x64xf32>,
+                  %init: tensor<16x64xf32>) -> tensor<16x64xf32> {
+  // CHECK:     linalg.reduce
+  // CHECK-SAME: ins(%[[INPUT]] : memref<16x32x64xf32
+  %reduce = linalg.reduce
+      ins(%input:tensor<16x32x64xf32>)
+      outs(%init:tensor<16x64xf32>)
+      dimensions = [1]
+      (%in: f32, %out: f32) {
+        %0 = arith.addf %in, %out: f32
+        linalg.yield %0: f32
+      }
+  func.return %reduce : tensor<16x64xf32>
+}
+
+// -----
+
+// CHECK-LABEL: func @transpose
+// CHECK-SAME:  %[[ARG0:.*]]: memref<16x32x64xf32
+func.func @transpose(%input: tensor<16x32x64xf32>,
+                     %init: tensor<32x64x16xf32>) -> tensor<32x64x16xf32> {
+  // CHECK:      linalg.transpose
+  // CHECK-SAME: ins(%[[ARG0]] : memref<16x32x64xf32
+  %transpose = linalg.transpose
+      ins(%input:tensor<16x32x64xf32>)
+      outs(%init:tensor<32x64x16xf32>)
+      permutation = [1, 2, 0]
+  func.return %transpose : tensor<32x64x16xf32>
+}
+
+// -----
+
 //===----------------------------------------------------------------------===//
 // AllocTensorOp elimination would produce SSA violations for the example below.
 //===----------------------------------------------------------------------===//