-//===- AffineParallelNormalize.cpp - AffineParallelNormalize Pass ---------===//
+//===- AffineLoopNormalize.cpp - AffineLoopNormalize Pass -----------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
//
//===----------------------------------------------------------------------===//
//
-// This file implements a normalizer for affine parallel loops.
+// This file implements a normalizer for affine loop-like ops.
//
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/IR/AffineValueMap.h"
#include "mlir/Dialect/Affine/Passes.h"
+#include "mlir/Dialect/Affine/Utils.h"
#include "mlir/IR/PatternMatch.h"
+#include "mlir/Transforms/LoopUtils.h"
using namespace mlir;
-void normalizeAffineParallel(AffineParallelOp op) {
+void mlir::normalizeAffineParallel(AffineParallelOp op) {
AffineMap lbMap = op.lowerBoundsMap();
SmallVector<int64_t, 8> steps = op.getSteps();
// No need to do any work if the parallel op is already normalized.
op.setUpperBounds(ranges.getOperands(), newUpperMap);
}
+/// Normalization transformations for affine.for ops. For now, it only removes
+/// single iteration loops. We may want to consider separating redundant loop
+/// elimitation from loop bound normalization, if needed in the future.
+static void normalizeAffineFor(AffineForOp op) {
+ if (succeeded(promoteIfSingleIteration(op)))
+ return;
+
+ // TODO: Normalize loop bounds.
+}
+
namespace {
/// Normalize affine.parallel ops so that lower bounds are 0 and steps are 1.
/// As currently implemented, this pass cannot fail, but it might skip over ops
/// that are already in a normalized form.
-struct AffineParallelNormalizePass
- : public AffineParallelNormalizeBase<AffineParallelNormalizePass> {
+struct AffineLoopNormalizePass
+ : public AffineLoopNormalizeBase<AffineLoopNormalizePass> {
- void runOnFunction() override { getFunction().walk(normalizeAffineParallel); }
+ void runOnFunction() override {
+ getFunction().walk([](Operation *op) {
+ if (auto affineParallel = dyn_cast<AffineParallelOp>(op))
+ normalizeAffineParallel(affineParallel);
+ else if (auto affineFor = dyn_cast<AffineForOp>(op))
+ normalizeAffineFor(affineFor);
+ });
+ }
};
} // namespace
-std::unique_ptr<OperationPass<FuncOp>>
-mlir::createAffineParallelNormalizePass() {
- return std::make_unique<AffineParallelNormalizePass>();
+std::unique_ptr<OperationPass<FuncOp>> mlir::createAffineLoopNormalizePass() {
+ return std::make_unique<AffineLoopNormalizePass>();
}
-// RUN: mlir-opt %s -affine-parallel-normalize -split-input-file | FileCheck %s
+// RUN: mlir-opt %s -affine-loop-normalize -split-input-file | FileCheck %s
// Normalize steps to 1 and lower bounds to 0.
}
return
}
+
+// -----
+
+// Check that single iteration loop is removed and its body is promoted to the
+// parent block.
+
+// CHECK-LABEL: func @single_iteration_loop
+func @single_iteration_loop(%in: memref<1xf32>, %out: memref<1xf32>) {
+ affine.for %i = 0 to 1 {
+ %1 = affine.load %in[%i] : memref<1xf32>
+ affine.store %1, %out[%i] : memref<1xf32>
+ }
+ return
+}
+
+// CHECK-NOT: affine.for
+// CHECK: affine.load
+// CHECK-NEXT: affine.store
+// CHECK-NEXT: return