From 00ac39574b9f8b4f3d6bd4b12924ef3174e6f0e9 Mon Sep 17 00:00:00 2001 From: Oleg Shyshkov Date: Fri, 21 Oct 2022 15:19:52 +0200 Subject: [PATCH] [mlir][linalg] Add one-shot-bufferize tests for Linalg ops: reduce, map and transpose. Differential Revision: https://reviews.llvm.org/D136431 --- .../mlir/Dialect/Linalg/IR/LinalgStructuredOps.td | 5 ++ mlir/test/Dialect/Linalg/one-shot-bufferize.mlir | 53 ++++++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td index 9c2246e..1692a0f 100644 --- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td +++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgStructuredOps.td @@ -284,6 +284,11 @@ def MapOp : LinalgStructuredBase_Op<"map", [ return getInputOperands(); } + bool payloadUsesValueFromOperand(OpOperand * opOperand) { + if (isOutput(opOperand)) return false; + return !getMatchingBlockArgument(opOperand).use_empty(); + } + static std::function)> getRegionBuilder() { diff --git a/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir b/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir index b7be9d4..e71f566 100644 --- a/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir +++ b/mlir/test/Dialect/Linalg/one-shot-bufferize.mlir @@ -335,6 +335,59 @@ func.func @op_is_reading_but_following_ops_are_not( // ----- +// CHECK-LABEL: func @map_binary +// CHECK-SAME: %[[LHS:[0-9a-zA-Z]*]]: memref<64xf32 +// CHECK-SAME: %[[RHS:[0-9a-zA-Z]*]]: memref<64xf32 +func.func @map_binary(%lhs: tensor<64xf32>, %rhs: tensor<64xf32>, + %init: tensor<64xf32>) -> tensor<64xf32> { + // CHECK: linalg.map + // CHECK-SAME: ins(%[[LHS]], %[[RHS]] : memref<64xf32 + %add = linalg.map + ins(%lhs, %rhs: tensor<64xf32>, tensor<64xf32>) + outs(%init:tensor<64xf32>) + (%lhs_elem: f32, %rhs_elem: f32) { + %0 = arith.addf %lhs_elem, %rhs_elem: f32 + linalg.yield %0: f32 + } + func.return %add : tensor<64xf32> +} + +// ----- + +// CHECK-LABEL: func @reduce +// CHECK-SAME: %[[INPUT:.*]]: memref<16x32x64xf32 +func.func @reduce(%input: tensor<16x32x64xf32>, + %init: tensor<16x64xf32>) -> tensor<16x64xf32> { + // CHECK: linalg.reduce + // CHECK-SAME: ins(%[[INPUT]] : memref<16x32x64xf32 + %reduce = linalg.reduce + ins(%input:tensor<16x32x64xf32>) + outs(%init:tensor<16x64xf32>) + dimensions = [1] + (%in: f32, %out: f32) { + %0 = arith.addf %in, %out: f32 + linalg.yield %0: f32 + } + func.return %reduce : tensor<16x64xf32> +} + +// ----- + +// CHECK-LABEL: func @transpose +// CHECK-SAME: %[[ARG0:.*]]: memref<16x32x64xf32 +func.func @transpose(%input: tensor<16x32x64xf32>, + %init: tensor<32x64x16xf32>) -> tensor<32x64x16xf32> { + // CHECK: linalg.transpose + // CHECK-SAME: ins(%[[ARG0]] : memref<16x32x64xf32 + %transpose = linalg.transpose + ins(%input:tensor<16x32x64xf32>) + outs(%init:tensor<32x64x16xf32>) + permutation = [1, 2, 0] + func.return %transpose : tensor<32x64x16xf32> +} + +// ----- + //===----------------------------------------------------------------------===// // AllocTensorOp elimination would produce SSA violations for the example below. //===----------------------------------------------------------------------===// -- 2.7.4