From: MaheshRavishankar Date: Tue, 31 Mar 2020 15:59:45 +0000 (-0700) Subject: [mlir][Linalg] Allow tiling of batch dimension for convolution ops with padding. X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=da7b6fe942fc9bbf535321477d614051d05aee4b;p=platform%2Fupstream%2Fllvm.git [mlir][Linalg] Allow tiling of batch dimension for convolution ops with padding. Existing tiling implementation of Linalg would still work for tiling the batch dimensions of the convolution op. Differential Revision: https://reviews.llvm.org/D76637 --- diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp index 2d9ca16..14253e3 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -336,9 +336,12 @@ Optional static tileLinalgOpImpl(OpBuilder &b, LinalgOp op, "expected matching number of tile sizes and loops"); if (auto convOp = dyn_cast(op.getOperation())) { - // TODO(ntv): add a level of indirection to linalg.generic. - if (convOp.padding()) - llvm_unreachable("Unexpected conv with padding"); + // For conv op only support tiling along batch dimension (which is the first + // loop). + if (convOp.padding() && + !llvm::all_of(tileSizes.drop_front(), + [](Value val) { return isZero(val); })) + return llvm::None; } // If permutation is empty, use the identity. Build the permutation map @@ -420,12 +423,6 @@ tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ArrayRef tileSizes, if (tileSizes.empty()) return llvm::None; - if (auto convOp = dyn_cast(op.getOperation())) { - // TODO(ntv): add a level of indirection to linalg.generic. - if (convOp.padding()) - llvm_unreachable("Unexpected conv with padding"); - } - // The following uses the convention that "tiling by zero" skips tiling a // particular dimension. This convention is significantly simpler to handle // instead of adjusting affine maps to account for missing dimensions. @@ -436,6 +433,14 @@ tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ArrayRef tileSizes, if (llvm::all_of(tileSizes, [](int64_t v) { return v == 0; })) return llvm::None; + if (auto convOp = dyn_cast(op.getOperation())) { + // For conv op only support tiling along batch dimension (which is the first + // loop). + if (convOp.padding() && !llvm::all_of(tileSizes.drop_front(), + [](int64_t val) { return val == 0; })) + return llvm::None; + } + // Create a builder for tile size constants. OpBuilder::InsertionGuard g(b); b.setInsertionPoint(op); diff --git a/mlir/test/Dialect/Linalg/tile_conv_padding.mlir b/mlir/test/Dialect/Linalg/tile_conv_padding.mlir new file mode 100644 index 0000000..a704233 --- /dev/null +++ b/mlir/test/Dialect/Linalg/tile_conv_padding.mlir @@ -0,0 +1,40 @@ +// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,0,0,4" | FileCheck %s -check-prefix=TILE-23004 +// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2" | FileCheck %s -check-prefix=TILE-20000 + +// TILE-23004-DAG: #[[strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)> +// TILE-20000-DAG: #[[strided4D:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3)> +// TILE-20000-DAG: #[[minmap:.*]] = affine_map<(d0, d1, d2) -> (d0, d1 - d2)> +// TILE-20000-DAG: #[[subviewstride:.*]] = affine_map<(d0, d1, d2, d3)[s0, s1, s2, s3, s4] -> (d0 * s1 + s0 + d1 * s2 + d2 * s3 + d3 * s4)> + +func @conv_padding(%arg0: memref, %arg1: memref, %arg2: memref) { + linalg.conv(%arg0, %arg1, %arg2) {dilations = [10, 20], padding = dense<[[1, 1], [0, 1]]> : tensor<2x2xi64>, strides = [30, 40]} : memref, memref, memref + return +} +// TILE-23004-LABEL: func @conv_padding( +// TILE-23004-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: memref +// TILE-23004-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: memref +// TILE-23004-SAME: %[[ARG2:[a-zA-Z0-9_]*]]: memref) +// TILE-23004: linalg.conv(%[[ARG0]], %[[ARG1]], %[[ARG2]]) + +// TILE-20000-LABEL: func @conv_padding( +// TILE-20000-SAME: %[[ARG0:[a-zA-Z0-9_]*]]: memref +// TILE-20000-SAME: %[[ARG1:[a-zA-Z0-9_]*]]: memref +// TILE-20000-SAME: %[[ARG2:[a-zA-Z0-9_]*]]: memref) +// TILE-20000-DAG: %[[C0:.*]] = constant 0 : index +// TILE-20000-DAG: %[[C1:.*]] = constant 1 : index +// TILE-20000-DAG: %[[C2:.*]] = constant 2 : index +// TILE-20000: %[[B:.*]] = dim %[[ARG1]], 0 +// TILE-20000: loop.for %[[ivI:.*]] = %[[C0]] to %[[B]] step %[[C2]] { +// TILE-20000: %[[DIM10:.*]] = dim %[[ARG1]], 0 +// TILE-20000: %[[EXTENT:.*]] = affine.min #[[minmap]](%[[C2]], %[[DIM10]], %[[ivI]]) +// TILE-20000: %[[DIM11:.*]] = dim %[[ARG1]], 1 +// TILE-20000: %[[DIM12:.*]] = dim %[[ARG1]], 2 +// TILE-20000: %[[DIM13:.*]] = dim %[[ARG1]], 3 +// TILE-20000: %[[SUBVIEW1:.*]] = subview %[[ARG1]][%[[ivI]], %[[C0]], %[[C0]], %[[C0]]] [%[[EXTENT]], %[[DIM11]], %[[DIM12]], %[[DIM13]]] +// TILE-20000: %[[DIM20:.*]] = dim %[[ARG2]], 0 +// TILE-20000: %[[EXTENT:.*]] = affine.min #[[minmap]](%[[C2]], %[[DIM20]], %[[ivI]]) +// TILE-20000: %[[DIM21:.*]] = dim %[[ARG2]], 1 +// TILE-20000: %[[DIM22:.*]] = dim %[[ARG2]], 2 +// TILE-20000: %[[DIM23:.*]] = dim %[[ARG2]], 3 +// TILE-20000: %[[SUBVIEW2:.*]] = subview %[[ARG2]][%[[ivI]], %[[C0]], %[[C0]], %[[C0]]] [%[[EXTENT]], %[[DIM21]], %[[DIM22]], %[[DIM23]]] +// TILE-20000: linalg.conv(%[[ARG0]], %[[SUBVIEW1]], %[[SUBVIEW2]])