From f18a8612995e3b4b7af9d7430374915724cdde51 Mon Sep 17 00:00:00 2001 From: Diego Caballero Date: Wed, 29 Mar 2023 19:19:24 +0000 Subject: [PATCH] [mlir][Vector] Enable masked vectorization of linalg.fill linalg.fill was already vectorizable with masks but not supported in the dynamic pre-checks. Reviewed By: nicolasvasilache Differential Revision: https://reviews.llvm.org/D146856 --- .../lib/Dialect/Linalg/Transforms/Vectorization.cpp | 8 +------- mlir/test/Dialect/Linalg/vectorization.mlir | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp index 6b27b41..98ee5e2 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -1291,19 +1291,13 @@ static LogicalResult reductionPreconditions(LinalgOp op) { static LogicalResult vectorizeDynamicLinalgOpPrecondition(linalg::LinalgOp op) { // TODO: Masking only supports dynamic generic ops for now. - if (!isa(op)) + if (!isa(op)) return failure(); // TODO: Index vectorization assumes static shape. if (op.hasIndexSemantics()) return failure(); - // TODO: 0-d vectors are not supported yet. - if (llvm::any_of(op.getIndexingMapsArray(), [](AffineMap map) { - return map.isEmpty() || map.getResults().empty(); - })) - return failure(); - LDBG("Dynamically-shaped op meets vectorization pre-conditions\n"); return success(); } diff --git a/mlir/test/Dialect/Linalg/vectorization.mlir b/mlir/test/Dialect/Linalg/vectorization.mlir index 26e27c1..105d952 100644 --- a/mlir/test/Dialect/Linalg/vectorization.mlir +++ b/mlir/test/Dialect/Linalg/vectorization.mlir @@ -2535,3 +2535,24 @@ transform.sequence failures(propagate) { %0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!pdl.operation) -> !pdl.operation transform.structured.masked_vectorize %0 vector_sizes [8, 32] } + +// ----- + +func.func @vectorize_dynamic_fill(%A : tensor, %arg0 : f32) -> tensor { + %0 = linalg.fill ins(%arg0 : f32) outs(%A : tensor) -> tensor + return %0 : tensor +} + +// CHECK-LABEL: func.func @vectorize_dynamic_fill +// CHECK: %[[DIM0:.*]] = tensor.dim +// CHECK: %[[DIM1:.*]] = tensor.dim +// CHECK: %[[MASK:.*]] = vector.create_mask %[[DIM0]], %[[DIM1]] : vector<8x16xi1> +// CHECK: %[[BCAST:.*]] = vector.broadcast %{{.*}} : f32 to vector<8x16xf32> +// CHECK: vector.mask %[[MASK]] { vector.transfer_write %[[BCAST]], {{.*}} {in_bounds = [true, true]} : vector<8x16xf32>, tensor } : vector<8x16xi1> + +transform.sequence failures(propagate) { +^bb1(%arg1: !pdl.operation): + %0 = transform.structured.match ops{["linalg.fill"]} in %arg1 : (!pdl.operation) -> !pdl.operation + transform.structured.masked_vectorize %0 vector_sizes [8, 16] +} + -- 2.7.4