From 23bd2e96fe3c945972eec8d8ad963651dd13ea6a Mon Sep 17 00:00:00 2001 From: Matthias Springer Date: Fri, 30 Jun 2023 12:44:33 +0200 Subject: [PATCH] [mlir][Affine] Delete duplicate code: `applyMapToValues` The same functionality is provided by `makeComposedFoldedAffineApply`. Differential Revision: https://reviews.llvm.org/D154199 --- mlir/include/mlir/Dialect/Affine/IR/AffineOps.h | 4 --- mlir/lib/Dialect/Affine/IR/AffineOps.cpp | 27 --------------- mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp | 11 +++--- .../Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp | 39 +++++++++------------- mlir/test/Dialect/Linalg/pad_fusion.mlir | 4 +-- .../Linalg/resolve-shaped-type-result-dims.mlir | 4 +-- mlir/test/Dialect/Linalg/vectorization-masked.mlir | 2 +- mlir/test/Dialect/Tensor/bufferize.mlir | 2 +- 8 files changed, 29 insertions(+), 64 deletions(-) diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h index 778c3b3..153878a 100644 --- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h +++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.h @@ -425,10 +425,6 @@ OpFoldResult makeComposedFoldedAffineMax(OpBuilder &b, Location loc, AffineMap map, ArrayRef operands); -/// Returns the values obtained by applying `map` to the list of values. -SmallVector applyMapToValues(OpBuilder &b, Location loc, - AffineMap map, ValueRange values); - /// Given an affine map `map` and its input `operands`, this method composes /// into `map`, maps of AffineApplyOps whose results are the values in /// `operands`, iteratively until no more of `operands` are the result of an diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp index f110a44..ca676d9 100644 --- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp +++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp @@ -1392,33 +1392,6 @@ mlir::affine::makeComposedFoldedAffineMax(OpBuilder &b, Location loc, return makeComposedFoldedMinMax(b, loc, map, operands); } -/// Fully compose map with operands and canonicalize the result. -/// Return the `createOrFold`'ed AffineApply op. -static Value createFoldedComposedAffineApply(OpBuilder &b, Location loc, - AffineMap map, - ValueRange operandsRef) { - SmallVector operands(operandsRef.begin(), operandsRef.end()); - fullyComposeAffineMapAndOperands(&map, &operands); - canonicalizeMapAndOperands(&map, &operands); - return b.createOrFold(loc, map, operands); -} - -SmallVector mlir::affine::applyMapToValues(OpBuilder &b, Location loc, - AffineMap map, - ValueRange values) { - SmallVector res; - res.reserve(map.getNumResults()); - unsigned numDims = map.getNumDims(), numSym = map.getNumSymbols(); - // For each `expr` in `map`, applies the `expr` to the values extracted from - // ranges. If the resulting application can be folded into a Value, the - // folding occurs eagerly. - for (auto expr : map.getResults()) { - AffineMap map = AffineMap::get(numDims, numSym, expr); - res.push_back(createFoldedComposedAffineApply(b, loc, map, values)); - } - return res; -} - // A symbol may appear as a dim in affine.apply operations. This function // canonicalizes dims that are valid symbols into actual symbols. template diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp index f4e9c24..ebfdc6e 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp @@ -529,7 +529,7 @@ tileLinalgOpImpl(RewriterBase &b, LinalgOp op, ArrayRef tileSizes, procInfo.resize( iteratorTypes.size(), linalg::ProcInfo{nullptr, nullptr, linalg::DistributionMethod::None}); - // Collect loop ranges of tiled loopss, loops that are parallel. + // Collect loop ranges of tiled loops, loops that are parallel. SmallVector parallelLoopRanges; for (const auto &iteratorType : llvm::enumerate(iteratorTypes)) { if (!isParallelIterator(iteratorType.value())) @@ -559,10 +559,13 @@ tileLinalgOpImpl(RewriterBase &b, LinalgOp op, ArrayRef tileSizes, // loop ranges and the iterator types. Apply its inverse to the // resulting loop `ivs` to match the op definition. SmallVector interchangedIvs; - if (!options.interchangeVector.empty()) - interchangedIvs = applyMapToValues(b, loc, invPermutationMap, ivs); - else + if (!options.interchangeVector.empty()) { + for (AffineExpr result : invPermutationMap.getResults()) + interchangedIvs.push_back( + ivs[result.cast().getPosition()]); + } else { interchangedIvs.assign(ivs.begin(), ivs.end()); + } // Tile the `operandValuesToUse` that either match the `op` operands // themselves or the tile loop arguments forwarding them. diff --git a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp index a964d91..c9e71a8 100644 --- a/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp +++ b/mlir/lib/Dialect/Tensor/IR/TensorInferTypeOpInterfaceImpl.cpp @@ -49,15 +49,15 @@ static OpFoldResult getCollapsedOutputDimFromInputShape( map.getResults().front().cast().getPosition(); unsigned endPos = map.getResults().back().cast().getPosition(); AffineExpr expr; - SmallVector dynamicDims; + SmallVector dynamicDims; for (auto dim : llvm::seq_inclusive(startPos, endPos)) { dynamicDims.push_back(builder.createOrFold(loc, src, dim)); AffineExpr currExpr = builder.getAffineSymbolExpr(dim - startPos); expr = (expr ? expr * currExpr : currExpr); } - return affine::applyMapToValues( + return affine::makeComposedFoldedAffineApply( builder, loc, AffineMap::get(0, endPos - startPos + 1, expr), - dynamicDims)[0]; + dynamicDims); } /// Given the `src` of a collapsing reshape op and its reassociation maps, @@ -102,12 +102,13 @@ static OpFoldResult getExpandedOutputDimFromInputShape( "dimensions"); linearizedStaticDim *= d.value(); } - Value sourceDim = builder.create(loc, src, sourceDimPos); - return affine::applyMapToValues( + OpFoldResult sourceDim = + builder.create(loc, src, sourceDimPos).getResult(); + return affine::makeComposedFoldedAffineApply( builder, loc, AffineMap::get( 0, 1, builder.getAffineSymbolExpr(0).floorDiv(linearizedStaticDim)), - sourceDim)[0]; + sourceDim); } /// Given the `src` of an expanding reshape op, the reassociation maps and the @@ -174,25 +175,17 @@ struct ReifyPadOp } // Shape along each dimension is source dim + low pad + high pad. - SmallVector mapOperands; + SmallVector mapOperands; mapOperands.push_back( b.createOrFold(loc, padOp.getSource(), dim)); - AffineExpr expr = b.getAffineDimExpr(0); - unsigned numSymbols = 0; - auto addOpFoldResult = [&](OpFoldResult valueOrAttr) { - if (Value v = llvm::dyn_cast_if_present(valueOrAttr)) { - expr = expr + b.getAffineSymbolExpr(numSymbols++); - mapOperands.push_back(v); - return; - } - int64_t staticValue = - llvm::cast(valueOrAttr.get()).getInt(); - expr = expr + staticValue; - }; - addOpFoldResult(lowPad[dim]); - addOpFoldResult(highPad[dim]); - shapes.push_back(affine::applyMapToValues( - b, loc, AffineMap::get(1, numSymbols, expr), mapOperands)[0]); + mapOperands.push_back(lowPad[dim]); + mapOperands.push_back(highPad[dim]); + AffineExpr expr = b.getAffineDimExpr(0) + b.getAffineSymbolExpr(0) + + b.getAffineSymbolExpr(1); + shapes.push_back(getValueOrCreateConstantIndexOp( + b, loc, + affine::makeComposedFoldedAffineApply( + b, loc, AffineMap::get(1, 2, expr), mapOperands))); } reifiedReturnShapes.emplace_back(std::move(shapes)); return success(); diff --git a/mlir/test/Dialect/Linalg/pad_fusion.mlir b/mlir/test/Dialect/Linalg/pad_fusion.mlir index 36eca8e..a0d9a6d 100644 --- a/mlir/test/Dialect/Linalg/pad_fusion.mlir +++ b/mlir/test/Dialect/Linalg/pad_fusion.mlir @@ -22,7 +22,7 @@ func.func @dynamic_pad_fusion(%arg0 : tensor, %arg1 : index, %arg2 : in return %1 : tensor } -// CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0, s1, s2] -> (s2 + s0 + s1)> +// CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0, s1, s2] -> (s0 + s1 + s2)> // CHECK: func @dynamic_pad_fusion // CHECK-SAME: %[[ARG0:.+]]: tensor // CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: index @@ -70,7 +70,7 @@ func.func @mixed_pad_fusion(%arg0 : tensor, %arg1 : index, %arg2 : ind } : tensor<42x?xf32> to tensor<49x?xf32> return %1 : tensor<49x?xf32> } -// CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0, s1, s2] -> (s2 + s0 + s1)> +// CHECK-DAG: #[[MAP:.+]] = affine_map<()[s0, s1, s2] -> (s0 + s1 + s2)> // CHECK: func @mixed_pad_fusion // CHECK-SAME: %[[ARG0:.+]]: tensor // CHECK-SAME: %[[ARG1:[a-zA-Z0-9]+]]: index diff --git a/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir b/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir index f931fe8..aeb357d 100644 --- a/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir +++ b/mlir/test/Dialect/Linalg/resolve-shaped-type-result-dims.mlir @@ -262,8 +262,8 @@ func.func @dim_of_pad_op(%arg0 : tensor<2x?x?xf32>, %arg1 : index, %arg2 : index %3 = tensor.dim %0, %c2 : tensor return %1, %2, %3 : index, index, index } -// CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0, s1] -> (s1 + s0 + 5)> -// CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0, s1] -> (s1 + s0 + 4)> +// CHECK-DAG: #[[MAP0:.+]] = affine_map<()[s0, s1] -> (s0 + s1 + 5)> +// CHECK-DAG: #[[MAP1:.+]] = affine_map<()[s0, s1] -> (s0 + s1 + 4)> // CHECK: func @dim_of_pad_op // CHECK-SAME: %[[ARG0:[A-Za-z0-9_]+]]: tensor<2x?x?xf32> // CHECK-SAME: %[[ARG1:[A-Za-z0-9_]+]]: index diff --git a/mlir/test/Dialect/Linalg/vectorization-masked.mlir b/mlir/test/Dialect/Linalg/vectorization-masked.mlir index 985dd05..fc7749a 100644 --- a/mlir/test/Dialect/Linalg/vectorization-masked.mlir +++ b/mlir/test/Dialect/Linalg/vectorization-masked.mlir @@ -405,7 +405,7 @@ transform.sequence failures(propagate) { // ----- -// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1] -> (s1 + s0)> +// CHECK: #[[MAP:.+]] = affine_map<()[s0, s1] -> (s0 + s1)> // CHECK: func @test_masked_vectorize_dynamic_pad func.func @test_masked_vectorize_dynamic_pad( %0 : tensor, %h0 : index, %h1 : index) diff --git a/mlir/test/Dialect/Tensor/bufferize.mlir b/mlir/test/Dialect/Tensor/bufferize.mlir index b9382b9..c7b1631 100644 --- a/mlir/test/Dialect/Tensor/bufferize.mlir +++ b/mlir/test/Dialect/Tensor/bufferize.mlir @@ -547,7 +547,7 @@ func.func @tensor.reshape(%t1: tensor) -> tensor<2x2x5xf32> { // ----- -// CHECK: #[[$sum_map_1:.+]] = affine_map<()[s0, s1] -> (s1 + s0 + 5)> +// CHECK: #[[$sum_map_1:.+]] = affine_map<()[s0, s1] -> (s0 + s1 + 5)> // CHECK: #[[$sum_map_2:.+]] = affine_map<()[s0, s1] -> (s0 + s1 + 10)> // CHECK-LABEL: func @tensor.pad( // CHECK-SAME: %[[t1:.*]]: tensor, %[[l2:.*]]: index, %[[h1:.*]]: index, %[[h2:.*]]: index -- 2.7.4