#ifndef MLIR_ANALYSIS_AFFINE_ANALYSIS_H
#define MLIR_ANALYSIS_AFFINE_ANALYSIS_H
-#include "mlir/Support/Status.h"
+#include "mlir/Support/LogicalResult.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
/// operands are added as symbols in the system. Returns failure for the yet
/// unimplemented cases.
// TODO(bondhugula): handle non-unit strides.
-Status getIndexSet(llvm::MutableArrayRef<OpPointer<AffineForOp>> forOps,
- FlatAffineConstraints *domain);
+LogicalResult getIndexSet(llvm::MutableArrayRef<OpPointer<AffineForOp>> forOps,
+ FlatAffineConstraints *domain);
/// Encapsulates a memref load or store access information.
struct MemRefAccess {
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/OpDefinition.h"
-#include "mlir/Support/Status.h"
+#include "mlir/Support/LogicalResult.h"
namespace mlir {
/// instruction are added as trailing identifiers (either dimensional or
/// symbolic depending on whether the operand is a valid ML Function symbol).
// TODO(bondhugula): add support for non-unit strides.
- Status addAffineForOpDomain(ConstOpPointer<AffineForOp> forOp);
+ LogicalResult addAffineForOpDomain(ConstOpPointer<AffineForOp> forOp);
/// Adds a lower or an upper bound for the identifier at the specified
/// position with constraints being drawn from the specified bound map and
/// operands. If `eq` is true, add a single equality equal to the bound map's
/// first result expr.
- Status addLowerOrUpperBound(unsigned pos, AffineMap boundMap,
- ArrayRef<Value *> operands, bool eq,
- bool lower = true);
+ LogicalResult addLowerOrUpperBound(unsigned pos, AffineMap boundMap,
+ ArrayRef<Value *> operands, bool eq,
+ bool lower = true);
/// Computes the lower and upper bounds of the first 'num' dimensional
/// identifiers as an affine map of the remaining identifiers (dimensional and
/// operand list 'operands'.
/// This function assumes 'values.size' == 'lbMaps.size' == 'ubMaps.size'.
/// Note that both lower/upper bounds use operands from 'operands'.
- Status addSliceBounds(ArrayRef<Value *> values, ArrayRef<AffineMap> lbMaps,
- ArrayRef<AffineMap> ubMaps, ArrayRef<Value *> operands);
+ LogicalResult addSliceBounds(ArrayRef<Value *> values,
+ ArrayRef<AffineMap> lbMaps,
+ ArrayRef<AffineMap> ubMaps,
+ ArrayRef<Value *> operands);
// Adds an inequality (>= 0) from the coefficients specified in inEq.
void addInequality(ArrayRef<int64_t> inEq);
/// symbolic operands of vMap should match 1:1 (in the same order) with those
/// of this constraint system, but the latter could have additional trailing
/// operands.
- Status composeMap(AffineValueMap *vMap);
+ LogicalResult composeMap(AffineValueMap *vMap);
/// Projects out (aka eliminates) 'num' identifiers starting at position
/// 'pos'. The resulting constraint system is the shadow along the dimensions
/// equality detection; if successful, the constant is substituted for the
/// identifier everywhere in the constraint system and then removed from the
/// system.
- Status constantFoldId(unsigned pos);
+ LogicalResult constantFoldId(unsigned pos);
/// This method calls constantFoldId for the specified range of identifiers,
/// 'num' identifiers starting at position 'pos'.
/// 9}, output = {s0 + 1 <= d0 <= s0 + 20}.
/// 3) 'this' = {0 <= d0 <= 5, 1 <= d1 <= 9}, 'other' = {2 <= d0 <= 6, 5 <= d1
/// <= 15}, output = {0 <= d0 <= 6, 1 <= d1 <= 15}.
- Status unionBoundingBox(const FlatAffineConstraints &other);
+ LogicalResult unionBoundingBox(const FlatAffineConstraints &other);
unsigned getNumConstraints() const {
return getNumInequalities() + getNumEqualities();
// Eliminates a single identifier at 'position' from equality and inequality
// constraints. Returns 'success' if the identifier was eliminated, and
// 'failure' otherwise.
- inline Status gaussianEliminateId(unsigned position) {
+ inline LogicalResult gaussianEliminateId(unsigned position) {
return gaussianEliminateIds(position, position + 1) == 1
- ? Status::success()
- : Status::failure();
+ ? LogicalResult::success()
+ : LogicalResult::failure();
}
// Eliminates identifiers from equality and inequality constraints
/// that connect newly introduced local identifiers to existing dimensional and
/// symbolic identifiers. See documentation for AffineExprFlattener on how
/// mod's and div's are flattened.
-Status getFlattenedAffineExpr(AffineExpr expr, unsigned numDims,
- unsigned numSymbols,
- llvm::SmallVectorImpl<int64_t> *flattenedExpr,
- FlatAffineConstraints *cst = nullptr);
+LogicalResult
+getFlattenedAffineExpr(AffineExpr expr, unsigned numDims, unsigned numSymbols,
+ llvm::SmallVectorImpl<int64_t> *flattenedExpr,
+ FlatAffineConstraints *cst = nullptr);
/// Flattens the result expressions of the map to their corresponding flattened
/// forms and set in 'flattenedExprs'. Returns failure if any expression in the
/// method should be used instead of repeatedly calling getFlattenedAffineExpr
/// since local variables added to deal with div's and mod's will be reused
/// across expressions.
-Status getFlattenedAffineExprs(
+LogicalResult getFlattenedAffineExprs(
AffineMap map, std::vector<llvm::SmallVector<int64_t, 8>> *flattenedExprs,
FlatAffineConstraints *cst = nullptr);
-Status getFlattenedAffineExprs(
+LogicalResult getFlattenedAffineExprs(
IntegerSet set, std::vector<llvm::SmallVector<int64_t, 8>> *flattenedExprs,
FlatAffineConstraints *cst = nullptr);
#include "mlir/IR/Block.h"
#include "mlir/IR/Location.h"
#include "mlir/Support/LLVM.h"
-#include "mlir/Support/Status.h"
+#include "mlir/Support/LogicalResult.h"
#include "llvm/ADT/SmallVector.h"
#include <memory>
// Constraints are added for all loop IV bounds (dim or symbol), and
// constraints are added for slice bounds in 'lbs'/'ubs'.
// Returns failure if we cannot add loop bounds because of unsupported cases.
- Status getAsConstraints(FlatAffineConstraints *cst);
+ LogicalResult getAsConstraints(FlatAffineConstraints *cst);
// Clears all bounds and operands in slice state.
void clearBounds();
/// Computes computation slice loop bounds for the loop nest surrounding
/// 'srcAccess', where the returned loop bound AffineMaps are functions of
/// loop IVs from the loop nest surrounding 'dstAccess'.
-Status getBackwardComputationSliceState(const MemRefAccess &srcAccess,
- const MemRefAccess &dstAccess,
- unsigned dstLoopDepth,
- ComputationSliceState *sliceState);
+LogicalResult getBackwardComputationSliceState(
+ const MemRefAccess &srcAccess, const MemRefAccess &dstAccess,
+ unsigned dstLoopDepth, ComputationSliceState *sliceState);
/// Creates a clone of the computation contained in the loop nest surrounding
/// 'srcOpInst', slices the iteration space of src loop based on slice bounds
/// {memref = %A, write = false, {%i <= m0 <= %i + 7} }
/// The last field is a 2-d FlatAffineConstraints symbolic in %i.
///
- Status compute(Instruction *inst, unsigned loopDepth,
- ComputationSliceState *sliceState = nullptr);
+ LogicalResult compute(Instruction *inst, unsigned loopDepth,
+ ComputationSliceState *sliceState = nullptr);
FlatAffineConstraints *getConstraints() { return &cst; }
const FlatAffineConstraints *getConstraints() const { return &cst; }
Optional<int64_t> getRegionSize();
// Wrapper around FlatAffineConstraints::unionBoundingBox.
- Status unionBoundingBox(const MemRefRegion &other);
+ LogicalResult unionBoundingBox(const MemRefRegion &other);
/// Returns the rank of the memref that this region corresponds to.
unsigned getRank() const;
/// access is out of bounds along any of the dimensions, success otherwise.
/// Emits a diagnostic error (with location information) if emitError is true.
template <typename LoadOrStoreOpPointer>
-Status boundCheckLoadOrStoreOp(LoadOrStoreOpPointer loadOrStoreOp,
- bool emitError = true);
+LogicalResult boundCheckLoadOrStoreOp(LoadOrStoreOpPointer loadOrStoreOp,
+ bool emitError = true);
/// Returns the number of surrounding loops common to both A and B.
unsigned getNumCommonSurroundingLoops(const Instruction &A,
#include "mlir/Pass/AnalysisManager.h"
#include "mlir/Pass/PassRegistry.h"
-#include "mlir/Support/Status.h"
+#include "mlir/Support/LogicalResult.h"
#include "llvm/ADT/PointerIntPair.h"
namespace mlir {
private:
/// Forwarding function to execute this pass.
LLVM_NODISCARD
- Status run(Function *fn, FunctionAnalysisManager &fam);
+ LogicalResult run(Function *fn, FunctionAnalysisManager &fam);
/// The current execution state for the pass.
llvm::Optional<PassStateT> passState;
private:
/// Forwarding function to execute this pass.
LLVM_NODISCARD
- Status run(Module *module, ModuleAnalysisManager &mam);
+ LogicalResult run(Module *module, ModuleAnalysisManager &mam);
/// The current execution state for the pass.
llvm::Optional<PassStateT> passState;
#ifndef MLIR_PASS_PASSMANAGER_H
#define MLIR_PASS_PASSMANAGER_H
-#include "mlir/Support/Status.h"
+#include "mlir/Support/LogicalResult.h"
#include "llvm/ADT/SmallVector.h"
namespace mlir {
/// Run the passes within this manager on the provided module.
LLVM_NODISCARD
- Status run(Module *module);
+ LogicalResult run(Module *module);
private:
/// A stack of nested pass executors on sub-module IR units, e.g. function.
-//===- Status.h - Utilities for handling success/failure --------*- C++ -*-===//
+//===- LogicalResult.h - Utilities for handling success/failure -*- C++ -*-===//
//
// Copyright 2019 The MLIR Authors.
//
// limitations under the License.
// =============================================================================
-#ifndef MLIR_SUPPORT_STATUS_H
-#define MLIR_SUPPORT_STATUS_H
+#ifndef MLIR_SUPPORT_LOGICAL_RESULT_H
+#define MLIR_SUPPORT_LOGICAL_RESULT_H
#include "mlir/Support/LLVM.h"
// Values that can be used to signal success/failure. This should be used in
// conjunction with the 'succeeded' and 'failed' functions below.
-struct Status {
+struct LogicalResult {
enum ResultEnum { Success, Failure } value;
- Status(ResultEnum v) : value(v) {}
+ LogicalResult(ResultEnum v) : value(v) {}
/// Utility method to generate a success result.
- static Status success() { return Success; }
+ static LogicalResult success() { return Success; }
/// Utility method to generate a failure result.
- static Status failure() { return Failure; }
+ static LogicalResult failure() { return Failure; }
};
-/// Utility function that returns true if the provided Status corresponds
+/// Utility function that returns true if the provided LogicalResult corresponds
/// to a success value.
-inline bool succeeded(Status result) { return result.value == Status::Success; }
+inline bool succeeded(LogicalResult result) {
+ return result.value == LogicalResult::Success;
+}
-/// Utility function that returns true if the provided Status corresponds
+/// Utility function that returns true if the provided LogicalResult corresponds
/// to a failure value.
-inline bool failed(Status result) { return result.value == Status::Failure; }
+inline bool failed(LogicalResult result) {
+ return result.value == LogicalResult::Failure;
+}
} // namespace mlir
-#endif // MLIR_SUPPORT_STATUS_H
+#endif // MLIR_SUPPORT_LOGICAL_RESULT_H
#include "mlir/IR/PatternMatch.h"
#include "mlir/Support/LLVM.h"
-#include "mlir/Support/Status.h"
+#include "mlir/Support/LogicalResult.h"
namespace mlir {
/// Run the converter on the provided module.
LLVM_NODISCARD
- Status convert(Module *m);
+ LogicalResult convert(Module *m);
protected:
/// Derived classes must implement this hook to produce a set of conversion
#define MLIR_TRANSFORMS_LOOP_UTILS_H
#include "mlir/Support/LLVM.h"
-#include "mlir/Support/Status.h"
+#include "mlir/Support/LogicalResult.h"
namespace mlir {
class AffineMap;
/// Unrolls this for instruction completely if the trip count is known to be
/// constant.
-Status loopUnrollFull(OpPointer<AffineForOp> forOp);
+LogicalResult loopUnrollFull(OpPointer<AffineForOp> forOp);
/// Unrolls this for instruction by the specified unroll factor. Returns failure
/// if the loop cannot be unrolled either due to restrictions or due to invalid
/// unroll factors.
-Status loopUnrollByFactor(OpPointer<AffineForOp> forOp, uint64_t unrollFactor);
+LogicalResult loopUnrollByFactor(OpPointer<AffineForOp> forOp,
+ uint64_t unrollFactor);
/// Unrolls this loop by the specified unroll factor or its trip count,
/// whichever is lower.
-Status loopUnrollUpToFactor(OpPointer<AffineForOp> forOp,
- uint64_t unrollFactor);
+LogicalResult loopUnrollUpToFactor(OpPointer<AffineForOp> forOp,
+ uint64_t unrollFactor);
/// Unrolls and jams this loop by the specified factor. Returns success if the
/// loop is successfully unroll-jammed.
-Status loopUnrollJamByFactor(OpPointer<AffineForOp> forOp,
- uint64_t unrollJamFactor);
+LogicalResult loopUnrollJamByFactor(OpPointer<AffineForOp> forOp,
+ uint64_t unrollJamFactor);
/// Unrolls and jams this loop by the specified factor or by the trip count (if
/// constant), whichever is lower.
-Status loopUnrollJamUpToFactor(OpPointer<AffineForOp> forOp,
- uint64_t unrollJamFactor);
+LogicalResult loopUnrollJamUpToFactor(OpPointer<AffineForOp> forOp,
+ uint64_t unrollJamFactor);
/// Promotes the loop body of a AffineForOp to its containing block if the
/// AffineForOp was known to have a single iteration.
-Status promoteIfSingleIteration(OpPointer<AffineForOp> forOp);
+LogicalResult promoteIfSingleIteration(OpPointer<AffineForOp> forOp);
/// Promotes all single iteration AffineForOp's in the Function, i.e., moves
/// their body into the containing Block.
/// instruction-wise shifts. The shifts are with respect to the original
/// execution order, and are multiplied by the loop 'step' before being applied.
LLVM_NODISCARD
-Status instBodySkew(OpPointer<AffineForOp> forOp, ArrayRef<uint64_t> shifts,
- bool unrollPrologueEpilogue = false);
+LogicalResult instBodySkew(OpPointer<AffineForOp> forOp,
+ ArrayRef<uint64_t> shifts,
+ bool unrollPrologueEpilogue = false);
/// Tiles the specified band of perfectly nested loops creating tile-space loops
/// and intra-tile loops. A band is a contiguous set of loops.
LLVM_NODISCARD
-Status tileCodeGen(MutableArrayRef<OpPointer<AffineForOp>> band,
- ArrayRef<unsigned> tileSizes);
+LogicalResult tileCodeGen(MutableArrayRef<OpPointer<AffineForOp>> band,
+ ArrayRef<unsigned> tileSizes);
/// Performs loop interchange on 'forOpA' and 'forOpB'. Requires that 'forOpA'
/// and 'forOpB' are part of a perfectly nested sequence of loops.
// stride information in FlatAffineConstraints. (For eg., by using iv - lb %
// step = 0 and/or by introducing a method in FlatAffineConstraints
// setExprStride(ArrayRef<int64_t> expr, int64_t stride)
-Status mlir::getIndexSet(MutableArrayRef<OpPointer<AffineForOp>> forOps,
- FlatAffineConstraints *domain) {
+LogicalResult mlir::getIndexSet(MutableArrayRef<OpPointer<AffineForOp>> forOps,
+ FlatAffineConstraints *domain) {
SmallVector<Value *, 4> indices;
extractForInductionVars(forOps, &indices);
// Reset while associated Values in 'indices' to the domain.
for (auto forOp : forOps) {
// Add constraints from forOp's bounds.
if (failed(domain->addAffineForOpDomain(forOp)))
- return Status::failure();
+ return LogicalResult::failure();
}
- return Status::success();
+ return LogicalResult::success();
}
// Computes the iteration domain for 'opInst' and populates 'indexSet', which
// 'indexSet' correspond to the loops surounding 'inst' from outermost to
// innermost.
// TODO(andydavis) Add support to handle IfInsts surrounding 'inst'.
-static Status getInstIndexSet(const Instruction *inst,
- FlatAffineConstraints *indexSet) {
+static LogicalResult getInstIndexSet(const Instruction *inst,
+ FlatAffineConstraints *indexSet) {
// TODO(andydavis) Extend this to gather enclosing IfInsts and consider
// factoring it out into a utility function.
SmallVector<OpPointer<AffineForOp>, 4> loops;
// semi-affine). Returns success otherwise.
// TODO(bondhugula): assumes that dependenceDomain doesn't have local
// variables already. Fix this soon.
-static Status
+static LogicalResult
addMemRefAccessConstraints(const AffineValueMap &srcAccessMap,
const AffineValueMap &dstAccessMap,
const ValuePositionMap &valuePosMap,
FlatAffineConstraints *dependenceDomain) {
if (dependenceDomain->getNumLocalIds() != 0)
- return Status::failure();
+ return LogicalResult::failure();
AffineMap srcMap = srcAccessMap.getAffineMap();
AffineMap dstMap = dstAccessMap.getAffineMap();
assert(srcMap.getNumResults() == dstMap.getNumResults());
// Get flattened expressions for the source destination maps.
if (failed(getFlattenedAffineExprs(srcMap, &srcFlatExprs, &srcLocalVarCst)) ||
failed(getFlattenedAffineExprs(dstMap, &destFlatExprs, &destLocalVarCst)))
- return Status::failure();
+ return LogicalResult::failure();
unsigned srcNumLocalIds = srcLocalVarCst.getNumLocalIds();
unsigned dstNumLocalIds = destLocalVarCst.getNumLocalIds();
dependenceDomain->addInequality(ineq);
}
- return Status::success();
+ return LogicalResult::success();
}
// Returns the number of outer loop common to 'src/dstDomain'.
// Flattens the expressions in map. Returns failure if 'expr' was unable to be
// flattened (i.e., semi-affine expressions not handled yet).
-static Status getFlattenedAffineExprs(
+static LogicalResult getFlattenedAffineExprs(
ArrayRef<AffineExpr> exprs, unsigned numDims, unsigned numSymbols,
std::vector<llvm::SmallVector<int64_t, 8>> *flattenedExprs,
FlatAffineConstraints *localVarCst) {
if (exprs.empty()) {
localVarCst->reset(numDims, numSymbols);
- return Status::success();
+ return LogicalResult::success();
}
AffineExprFlattener flattener(numDims, numSymbols, exprs[0].getContext());
// local identifiers / expressions are shared.
for (auto expr : exprs) {
if (!expr.isPureAffine())
- return Status::failure();
+ return LogicalResult::failure();
flattener.walkPostOrder(expr);
}
localVarCst->clearAndCopyFrom(flattener.localVarCst);
}
- return Status::success();
+ return LogicalResult::success();
}
// Flattens 'expr' into 'flattenedExpr'. Returns failure if 'expr' was unable to
// be flattened (semi-affine expressions not handled yet).
-Status
+LogicalResult
mlir::getFlattenedAffineExpr(AffineExpr expr, unsigned numDims,
unsigned numSymbols,
llvm::SmallVectorImpl<int64_t> *flattenedExpr,
FlatAffineConstraints *localVarCst) {
std::vector<SmallVector<int64_t, 8>> flattenedExprs;
- Status ret = ::getFlattenedAffineExprs({expr}, numDims, numSymbols,
- &flattenedExprs, localVarCst);
+ LogicalResult ret = ::getFlattenedAffineExprs({expr}, numDims, numSymbols,
+ &flattenedExprs, localVarCst);
*flattenedExpr = flattenedExprs[0];
return ret;
}
/// Flattens the expressions in map. Returns failure if 'expr' was unable to be
/// flattened (i.e., semi-affine expressions not handled yet).
-Status mlir::getFlattenedAffineExprs(
+LogicalResult mlir::getFlattenedAffineExprs(
AffineMap map, std::vector<llvm::SmallVector<int64_t, 8>> *flattenedExprs,
FlatAffineConstraints *localVarCst) {
if (map.getNumResults() == 0) {
localVarCst->reset(map.getNumDims(), map.getNumSymbols());
- return Status::success();
+ return LogicalResult::success();
}
return ::getFlattenedAffineExprs(map.getResults(), map.getNumDims(),
map.getNumSymbols(), flattenedExprs,
localVarCst);
}
-Status mlir::getFlattenedAffineExprs(
+LogicalResult mlir::getFlattenedAffineExprs(
IntegerSet set, std::vector<llvm::SmallVector<int64_t, 8>> *flattenedExprs,
FlatAffineConstraints *localVarCst) {
if (set.getNumConstraints() == 0) {
localVarCst->reset(set.getNumDims(), set.getNumSymbols());
- return Status::success();
+ return LogicalResult::success();
}
return ::getFlattenedAffineExprs(set.getConstraints(), set.getNumDims(),
set.getNumSymbols(), flattenedExprs,
// This routine may add additional local variables if the flattened expression
// corresponding to the map has such variables due to mod's, ceildiv's, and
// floordiv's in it.
-Status FlatAffineConstraints::composeMap(AffineValueMap *vMap) {
+LogicalResult FlatAffineConstraints::composeMap(AffineValueMap *vMap) {
std::vector<SmallVector<int64_t, 8>> flatExprs;
FlatAffineConstraints localCst;
if (failed(getFlattenedAffineExprs(vMap->getAffineMap(), &flatExprs,
&localCst))) {
LLVM_DEBUG(llvm::dbgs()
<< "composition unimplemented for semi-affine maps\n");
- return Status::failure();
+ return LogicalResult::failure();
}
assert(flatExprs.size() == vMap->getNumResults());
addEquality(eqToAdd);
}
- return Status::success();
+ return LogicalResult::success();
}
// Turn a dimension into a symbol.
}
}
-Status
+LogicalResult
FlatAffineConstraints::addAffineForOpDomain(ConstOpPointer<AffineForOp> forOp) {
unsigned pos;
// Pre-condition for this method.
if (!findId(*forOp->getInductionVar(), &pos)) {
assert(false && "Value not found");
- return Status::failure();
+ return LogicalResult::failure();
}
if (forOp->getStep() != 1)
ncForOp->getLowerBoundOperands().end());
if (failed(addLowerOrUpperBound(pos, forOp->getLowerBoundMap(), lbOperands,
/*eq=*/false, /*lower=*/true)))
- return Status::failure();
+ return LogicalResult::failure();
}
if (forOp->hasConstantUpperBound()) {
addConstantUpperBound(pos, forOp->getConstantUpperBound() - step);
- return Status::success();
+ return LogicalResult::success();
}
// Non-constant upper bound case.
OpPointer<AffineForOp> ncForOp =
}
}
-Status FlatAffineConstraints::addLowerOrUpperBound(unsigned pos,
- AffineMap boundMap,
- ArrayRef<Value *> operands,
- bool eq, bool lower) {
+LogicalResult
+FlatAffineConstraints::addLowerOrUpperBound(unsigned pos, AffineMap boundMap,
+ ArrayRef<Value *> operands, bool eq,
+ bool lower) {
assert(pos < getNumDimAndSymbolIds() && "invalid position");
// Equality follows the logic of lower bound except that we add an equality
// instead of an inequality.
std::vector<SmallVector<int64_t, 8>> flatExprs;
if (failed(getFlattenedAffineExprs(boundMap, &flatExprs, &localVarCst))) {
LLVM_DEBUG(llvm::dbgs() << "semi-affine expressions not yet supported\n");
- return Status::failure();
+ return LogicalResult::failure();
}
// Merge and align with localVarCst.
: flatExpr[flatExpr.size() - 1] - 1;
eq ? addEquality(ineq) : addInequality(ineq);
}
- return Status::success();
+ return LogicalResult::success();
}
// Adds slice lower bounds represented by lower bounds in 'lbMaps' and upper
// This function assumes 'values.size' == 'lbMaps.size' == 'ubMaps.size', and
// skips any null AffineMaps in 'lbMaps' or 'ubMaps'.
// Note that both lower/upper bounds use operands from 'operands'.
-// Returns true on success. Returns false for unimplemented cases such as
-// semi-affine expressions or expressions with mod/floordiv.
-Status FlatAffineConstraints::addSliceBounds(ArrayRef<Value *> values,
- ArrayRef<AffineMap> lbMaps,
- ArrayRef<AffineMap> ubMaps,
- ArrayRef<Value *> operands) {
+// Returns failure for unimplemented cases such as semi-affine expressions or
+// expressions with mod/floordiv.
+LogicalResult FlatAffineConstraints::addSliceBounds(
+ ArrayRef<Value *> values, ArrayRef<AffineMap> lbMaps,
+ ArrayRef<AffineMap> ubMaps, ArrayRef<Value *> operands) {
assert(values.size() == lbMaps.size());
assert(lbMaps.size() == ubMaps.size());
lbMap.getResult(0) + 1 == ubMap.getResult(0)) {
if (failed(addLowerOrUpperBound(pos, lbMap, operands, /*eq=*/true,
/*lower=*/true)))
- return Status::failure();
+ return LogicalResult::failure();
if (failed(addLowerOrUpperBound(pos, lbMap, operands, /*eq=*/true,
/*lower=*/true)))
- return Status::failure();
+ return LogicalResult::failure();
continue;
}
if (lbMap && failed(addLowerOrUpperBound(pos, lbMap, operands, /*eq=*/false,
/*lower=*/true)))
- return Status::failure();
+ return LogicalResult::failure();
if (lbMap && failed(addLowerOrUpperBound(pos, lbMap, operands, /*eq=*/false,
/*lower=*/true)))
- return Status::failure();
+ return LogicalResult::failure();
if (ubMap && failed(addLowerOrUpperBound(pos, ubMap, operands, /*eq=*/false,
/*lower=*/false)))
- return Status::failure();
+ return LogicalResult::failure();
}
- return Status::success();
+ return LogicalResult::success();
}
void FlatAffineConstraints::addEquality(ArrayRef<int64_t> eq) {
removeId(pos);
}
-Status FlatAffineConstraints::constantFoldId(unsigned pos) {
+LogicalResult FlatAffineConstraints::constantFoldId(unsigned pos) {
assert(pos < getNumIds() && "invalid position");
int rowIdx;
if ((rowIdx = findEqualityToConstant(*this, pos)) == -1)
- return Status::failure();
+ return LogicalResult::failure();
// atEq(rowIdx, pos) is either -1 or 1.
assert(atEq(rowIdx, pos) * atEq(rowIdx, pos) == 1);
int64_t constVal = -atEq(rowIdx, getNumCols() - 1) / atEq(rowIdx, pos);
setAndEliminate(pos, constVal);
- return Status::success();
+ return LogicalResult::success();
}
void FlatAffineConstraints::constantFoldIdRange(unsigned pos, unsigned num) {
for (unsigned r = 0, e = getNumEqualities(); r < e; r++) {
if (atEq(r, pos) != 0) {
// Use Gaussian elimination here (since we have an equality).
- Status ret = gaussianEliminateId(pos);
+ LogicalResult ret = gaussianEliminateId(pos);
(void)ret;
assert(succeeded(ret) && "Gaussian elimination guaranteed to succeed");
LLVM_DEBUG(llvm::dbgs() << "FM output (through Gaussian elimination):\n");
// Computes the bounding box with respect to 'other' by finding the min of the
// lower bounds and the max of the upper bounds along each of the dimensions.
-Status
+LogicalResult
FlatAffineConstraints::unionBoundingBox(const FlatAffineConstraints &otherCst) {
assert(otherCst.getNumDimIds() == numDims && "dims mismatch");
assert(otherCst.getIds()
if (!extent.hasValue())
// TODO(bondhugula): symbolic extents when necessary.
// TODO(bondhugula): handle union if a dimension is unbounded.
- return Status::failure();
+ return LogicalResult::failure();
auto otherExtent =
other.getConstantBoundOnDimSize(d, &otherLb, &otherLbDivisor);
if (!otherExtent.hasValue() || lbDivisor != otherLbDivisor)
// TODO(bondhugula): symbolic extents when necessary.
- return Status::failure();
+ return LogicalResult::failure();
assert(lbDivisor > 0 && "divisor always expected to be positive");
auto constLb = getConstantLowerBound(d);
auto constOtherLb = other.getConstantLowerBound(d);
if (!constLb.hasValue() || !constOtherLb.hasValue())
- return Status::failure();
+ return LogicalResult::failure();
std::fill(minLb.begin(), minLb.end(), 0);
minLb.back() = std::min(constLb.getValue(), constOtherLb.getValue());
}
auto constUb = getConstantUpperBound(d);
auto constOtherUb = other.getConstantUpperBound(d);
if (!constUb.hasValue() || !constOtherUb.hasValue())
- return Status::failure();
+ return LogicalResult::failure();
std::fill(maxUb.begin(), maxUb.end(), 0);
maxUb.back() = std::max(constUb.getValue(), constOtherUb.getValue());
}
addInequality(boundingUbs[d]);
}
- return Status::success();
+ return LogicalResult::success();
}
}
// Populates 'cst' with FlatAffineConstraints which represent slice bounds.
-Status ComputationSliceState::getAsConstraints(FlatAffineConstraints *cst) {
+LogicalResult
+ComputationSliceState::getAsConstraints(FlatAffineConstraints *cst) {
assert(!lbOperands.empty());
// Adds src 'ivs' as dimension identifiers in 'cst'.
unsigned numDims = ivs.size();
} else {
if (auto loop = getForInductionVarOwner(value)) {
if (failed(cst->addAffineForOpDomain(loop)))
- return Status::failure();
+ return LogicalResult::failure();
}
}
}
// Add slices bounds on 'ivs' using maps 'lbs'/'ubs' with 'lbOperands[0]'
- Status ret = cst->addSliceBounds(ivs, lbs, ubs, lbOperands[0]);
+ LogicalResult ret = cst->addSliceBounds(ivs, lbs, ubs, lbOperands[0]);
assert(succeeded(ret) &&
"should not fail as we never have semi-affine slice maps");
(void)ret;
- return Status::success();
+ return LogicalResult::success();
}
// Clears state bounds and operand state.
return numElements;
}
-Status MemRefRegion::unionBoundingBox(const MemRefRegion &other) {
+LogicalResult MemRefRegion::unionBoundingBox(const MemRefRegion &other) {
assert(memref == other.memref);
return cst.unionBoundingBox(*other.getConstraints());
}
//
// TODO(bondhugula): extend this to any other memref dereferencing ops
// (dma_start, dma_wait).
-Status MemRefRegion::compute(Instruction *inst, unsigned loopDepth,
- ComputationSliceState *sliceState) {
+LogicalResult MemRefRegion::compute(Instruction *inst, unsigned loopDepth,
+ ComputationSliceState *sliceState) {
assert((inst->isa<LoadOp>() || inst->isa<StoreOp>()) &&
"load/store op expected");
extractForInductionVars(ivs, ®ionSymbols);
// A rank 0 memref has a 0-d region.
cst.reset(rank, loopDepth, 0, regionSymbols);
- return Status::success();
+ return LogicalResult::success();
}
// Build the constraints for this region.
// TODO(bondhugula): rewrite this to use getInstIndexSet; this way
// conditionals will be handled when the latter supports it.
if (failed(cst.addAffineForOpDomain(loop)))
- return Status::failure();
+ return LogicalResult::failure();
} else {
// Has to be a valid symbol.
auto *symbol = operand;
cst.addDimOrSymbolId(const_cast<Value *>(operand));
}
// Add upper/lower bounds from 'sliceState' to 'cst'.
- Status ret = cst.addSliceBounds(sliceState->ivs, sliceState->lbs,
- sliceState->ubs, sliceState->lbOperands[0]);
+ LogicalResult ret =
+ cst.addSliceBounds(sliceState->ivs, sliceState->lbs, sliceState->ubs,
+ sliceState->lbOperands[0]);
assert(succeeded(ret) &&
"should not fail as we never have semi-affine slice maps");
(void)ret;
if (failed(cst.composeMap(&accessValueMap))) {
inst->emitError("getMemRefRegion: compose affine map failed");
LLVM_DEBUG(accessValueMap.getAffineMap().dump());
- return Status::failure();
+ return LogicalResult::failure();
}
// Set all identifiers appearing after the first 'rank' identifiers as
LLVM_DEBUG(llvm::dbgs() << "Memory region:\n");
LLVM_DEBUG(cst.dump());
- return Status::success();
+ return LogicalResult::success();
}
// TODO(mlir-team): improve/complete this when we have target data.
}
template <typename LoadOrStoreOpPointer>
-Status mlir::boundCheckLoadOrStoreOp(LoadOrStoreOpPointer loadOrStoreOp,
- bool emitError) {
+LogicalResult mlir::boundCheckLoadOrStoreOp(LoadOrStoreOpPointer loadOrStoreOp,
+ bool emitError) {
static_assert(
std::is_same<LoadOrStoreOpPointer, OpPointer<LoadOp>>::value ||
std::is_same<LoadOrStoreOpPointer, OpPointer<StoreOp>>::value,
MemRefRegion region(opInst->getLoc());
if (failed(region.compute(opInst, /*loopDepth=*/0)))
- return Status::success();
+ return LogicalResult::success();
LLVM_DEBUG(llvm::dbgs() << "Memory region");
LLVM_DEBUG(region.getConstraints()->dump());
"memref out of lower bound access along dimension #" + Twine(r + 1));
}
}
- return outOfBounds ? Status::failure() : Status::success();
+ return outOfBounds ? LogicalResult::failure() : LogicalResult::success();
}
// Explicitly instantiate the template so that the compiler knows we need them!
-template Status mlir::boundCheckLoadOrStoreOp(OpPointer<LoadOp> loadOp,
- bool emitError);
-template Status mlir::boundCheckLoadOrStoreOp(OpPointer<StoreOp> storeOp,
- bool emitError);
+template LogicalResult mlir::boundCheckLoadOrStoreOp(OpPointer<LoadOp> loadOp,
+ bool emitError);
+template LogicalResult mlir::boundCheckLoadOrStoreOp(OpPointer<StoreOp> storeOp,
+ bool emitError);
// Returns in 'positions' the Block positions of 'inst' in each ancestor
// Block from the Block containing instruction, stopping at 'limitBlock'.
// out any dst loop IVs at depth greater than 'dstLoopDepth', and computes slice
// bounds in 'sliceState' which represent the src IVs in terms of the dst IVs,
// symbols and constants.
-Status mlir::getBackwardComputationSliceState(
+LogicalResult mlir::getBackwardComputationSliceState(
const MemRefAccess &srcAccess, const MemRefAccess &dstAccess,
unsigned dstLoopDepth, ComputationSliceState *sliceState) {
bool readReadAccesses =
if (!checkMemrefAccessDependence(
srcAccess, dstAccess, /*loopDepth=*/1, &dependenceConstraints,
/*dependenceComponents=*/nullptr, /*allowRAR=*/readReadAccesses)) {
- return Status::failure();
+ return LogicalResult::failure();
}
// Get loop nest surrounding src operation.
SmallVector<OpPointer<AffineForOp>, 4> srcLoopIVs;
unsigned numDstLoopIVs = dstLoopIVs.size();
if (dstLoopDepth > numDstLoopIVs) {
dstAccess.opInst->emitError("invalid destination loop depth");
- return Status::failure();
+ return LogicalResult::failure();
}
// Project out dimensions other than those up to 'dstLoopDepth'.
break;
}
- return Status::success();
+ return LogicalResult::success();
}
/// Creates a computation slice of the loop nest surrounding 'srcOpInst',
void Pass::anchor() {}
/// Forwarding function to execute this pass.
-Status FunctionPassBase::run(Function *fn, FunctionAnalysisManager &fam) {
+LogicalResult FunctionPassBase::run(Function *fn,
+ FunctionAnalysisManager &fam) {
// Initialize the pass state.
passState.emplace(fn, fam);
fam.invalidate(passState->preservedAnalyses);
// Return false if the pass signaled a failure.
- return passState->irAndPassFailed.getInt() ? Status::failure()
- : Status::success();
+ return passState->irAndPassFailed.getInt() ? LogicalResult::failure()
+ : LogicalResult::success();
}
/// Forwarding function to execute this pass.
-Status ModulePassBase::run(Module *module, ModuleAnalysisManager &mam) {
+LogicalResult ModulePassBase::run(Module *module, ModuleAnalysisManager &mam) {
// Initialize the pass state.
passState.emplace(module, mam);
mam.invalidate(passState->preservedAnalyses);
// Return false if the pass signaled a failure.
- return passState->irAndPassFailed.getInt() ? Status::failure()
- : Status::success();
+ return passState->irAndPassFailed.getInt() ? LogicalResult::failure()
+ : LogicalResult::success();
}
//===----------------------------------------------------------------------===//
FunctionPassExecutor &operator=(const FunctionPassExecutor &) = delete;
/// Run the executor on the given function.
- Status run(Function *function, FunctionAnalysisManager &fam);
+ LogicalResult run(Function *function, FunctionAnalysisManager &fam);
/// Add a pass to the current executor. This takes ownership over the provided
/// pass pointer.
ModulePassExecutor &operator=(const ModulePassExecutor &) = delete;
/// Run the executor on the given module.
- Status run(Module *module, ModuleAnalysisManager &mam);
+ LogicalResult run(Module *module, ModuleAnalysisManager &mam);
/// Add a pass to the current executor. This takes ownership over the provided
/// pass pointer.
} // end namespace mlir
/// Run all of the passes in this manager over the current function.
-Status detail::FunctionPassExecutor::run(Function *function,
- FunctionAnalysisManager &fam) {
+LogicalResult detail::FunctionPassExecutor::run(Function *function,
+ FunctionAnalysisManager &fam) {
// Run each of the held passes.
for (auto &pass : passes)
if (failed(pass->run(function, fam)))
- return Status::failure();
- return Status::success();
+ return LogicalResult::failure();
+ return LogicalResult::success();
}
/// Run all of the passes in this manager over the current module.
-Status detail::ModulePassExecutor::run(Module *module,
- ModuleAnalysisManager &mam) {
+LogicalResult detail::ModulePassExecutor::run(Module *module,
+ ModuleAnalysisManager &mam) {
// Run each of the held passes.
for (auto &pass : passes)
if (failed(pass->run(module, mam)))
- return Status::failure();
- return Status::success();
+ return LogicalResult::failure();
+ return LogicalResult::success();
}
//===----------------------------------------------------------------------===//
}
/// Run the passes within this manager on the provided module.
-Status PassManager::run(Module *module) {
+LogicalResult PassManager::run(Module *module) {
ModuleAnalysisManager mam(module);
return mpe->run(module, mam);
}
// conversion patterns and to convert function and block argument types.
// Converts the `module` in-place by replacing all existing functions with the
// converted ones.
- static Status convert(DialectConversion *conversion, Module *module);
+ static LogicalResult convert(DialectConversion *conversion, Module *module);
private:
// Constructs a FunctionConversion by storing the hooks.
// from `valueRemapping` and the converted blocks from `blockRemapping`, and
// passes them to `converter->rewriteTerminator` function defined in the
// pattern, together with `builder`.
- Status convertOpWithSuccessors(DialectOpConversion *converter,
- Instruction *op, FuncBuilder &builder);
+ LogicalResult convertOpWithSuccessors(DialectOpConversion *converter,
+ Instruction *op, FuncBuilder &builder);
// Converts an operation without successors. Extracts the converted operands
// from `valueRemapping` and passes them to the `converter->rewrite` function
// defined in the pattern, together with `builder`.
- Status convertOp(DialectOpConversion *converter, Instruction *op,
- FuncBuilder &builder);
+ LogicalResult convertOp(DialectOpConversion *converter, Instruction *op,
+ FuncBuilder &builder);
// Converts a block by traversing its instructions sequentially, looking for
// the first pattern match and dispatching the instruction conversion to
//
// After converting operations, traverses the successor blocks unless they
// have been visited already as indicated in `visitedBlocks`.
- Status convertBlock(Block *block, FuncBuilder &builder,
- llvm::DenseSet<Block *> &visitedBlocks);
+ LogicalResult convertBlock(Block *block, FuncBuilder &builder,
+ llvm::DenseSet<Block *> &visitedBlocks);
// Converts the module as follows.
// 1. Call `convertFunction` on each function of the module and collect the
// 2. Remap all function attributes in the new functions to point to the new
// functions instead of the old ones.
// 3. Replace old functions with the new in the module.
- Status run(Module *m);
+ LogicalResult run(Module *m);
// Pointer to a specific dialect pass.
DialectConversion *dialectConversion;
return remapped;
}
-Status impl::FunctionConversion::convertOpWithSuccessors(
+LogicalResult impl::FunctionConversion::convertOpWithSuccessors(
DialectOpConversion *converter, Instruction *op, FuncBuilder &builder) {
SmallVector<Block *, 2> destinations;
destinations.reserve(op->getNumSuccessors());
llvm::makeArrayRef(operands.data(),
operands.data() + firstSuccessorOperand),
destinations, operandsPerDestination, builder);
- return Status::success();
+ return LogicalResult::success();
}
-Status impl::FunctionConversion::convertOp(DialectOpConversion *converter,
- Instruction *op,
- FuncBuilder &builder) {
+LogicalResult
+impl::FunctionConversion::convertOp(DialectOpConversion *converter,
+ Instruction *op, FuncBuilder &builder) {
auto operands = lookupValues(op->getOperands());
assert((!operands.empty() || op->getNumOperands() == 0) &&
"converting op before ops defining its operands");
auto results = converter->rewrite(op, operands, builder);
if (results.size() != op->getNumResults())
return (op->emitError("rewriting produced a different number of results"),
- Status::failure());
+ LogicalResult::failure());
for (unsigned i = 0, e = results.size(); i < e; ++i)
mapping.map(op->getResult(i), results[i]);
- return Status::success();
+ return LogicalResult::success();
}
-Status
+LogicalResult
impl::FunctionConversion::convertBlock(Block *block, FuncBuilder &builder,
llvm::DenseSet<Block *> &visitedBlocks) {
// First, add the current block to the list of visited blocks.
for (Instruction &inst : *block) {
if (inst.getNumBlockLists() != 0) {
inst.emitError("unsupported region instruction");
- return Status::failure();
+ return LogicalResult::failure();
}
// Find the first matching conversion and apply it.
if (inst.getNumSuccessors() != 0) {
if (failed(convertOpWithSuccessors(conversion, &inst, builder)))
- return Status::failure();
+ return LogicalResult::failure();
} else if (failed(convertOp(conversion, &inst, builder))) {
- return Status::failure();
+ return LogicalResult::failure();
}
converted = true;
break;
if (visitedBlocks.count(succ) != 0)
continue;
if (failed(convertBlock(succ, builder, visitedBlocks)))
- return Status::failure();
+ return LogicalResult::failure();
}
- return Status::success();
+ return LogicalResult::success();
}
Function *impl::FunctionConversion::convertFunction(Function *f) {
return newFunction.release();
}
-Status impl::FunctionConversion::convert(DialectConversion *conversion,
- Module *module) {
+LogicalResult impl::FunctionConversion::convert(DialectConversion *conversion,
+ Module *module) {
return impl::FunctionConversion(conversion).run(module);
}
-Status impl::FunctionConversion::run(Module *module) {
+LogicalResult impl::FunctionConversion::run(Module *module) {
if (!module)
- return Status::failure();
+ return LogicalResult::failure();
MLIRContext *context = module->getContext();
conversions = dialectConversion->initConverters(context);
for (auto *func : originalFuncs) {
Function *converted = convertFunction(func);
if (!converted)
- return Status::failure();
+ return LogicalResult::failure();
auto origFuncAttr = FunctionAttr::get(func, context);
auto convertedFuncAttr = FunctionAttr::get(converted, context);
for (auto *func : convertedFuncs)
module->getFunctions().push_back(func);
- return Status::success();
+ return LogicalResult::success();
}
// Create a function type with arguments and results converted, and argument
FunctionType::get(arguments, results, type.getContext()), argAttrs.vec());
}
-Status DialectConversion::convert(Module *m) {
+LogicalResult DialectConversion::convert(Module *m) {
return impl::FunctionConversion::convert(this, m);
}
/// Tiles the specified band of perfectly nested loops creating tile-space loops
/// and intra-tile loops. A band is a contiguous set of loops.
// TODO(bondhugula): handle non hyper-rectangular spaces.
-Status mlir::tileCodeGen(MutableArrayRef<OpPointer<AffineForOp>> band,
- ArrayRef<unsigned> tileSizes) {
+LogicalResult mlir::tileCodeGen(MutableArrayRef<OpPointer<AffineForOp>> band,
+ ArrayRef<unsigned> tileSizes) {
assert(!band.empty());
assert(band.size() == tileSizes.size() && "Incorrect number of tile sizes");
if (!cst.isHyperRectangular(0, width)) {
rootAffineForOp->emitError("tiled code generation unimplemented for the "
"non-hyperrectangular case");
- return Status::failure();
+ return LogicalResult::failure();
}
constructTiledIndexSetHyperRect(origLoops, newLoops, tileSizes);
// Erase the old loop nest.
rootAffineForOp->erase();
- return Status::success();
+ return LogicalResult::success();
}
// Identify valid and profitable bands of loops to tile. This is currently just
void runOnFunction() override;
/// Unroll this for inst. Returns failure if nothing was done.
- Status runOnAffineForOp(OpPointer<AffineForOp> forOp);
+ LogicalResult runOnAffineForOp(OpPointer<AffineForOp> forOp);
static const unsigned kDefaultUnrollFactor = 4;
};
}
}
-/// Unrolls a 'for' inst. Returns true if the loop was unrolled, false
+/// Unrolls a 'for' inst. Returns success if the loop was unrolled, failure
/// otherwise. The default unroll factor is 4.
-Status LoopUnroll::runOnAffineForOp(OpPointer<AffineForOp> forOp) {
+LogicalResult LoopUnroll::runOnAffineForOp(OpPointer<AffineForOp> forOp) {
// Use the function callback if one was provided.
if (getUnrollFactor) {
return loopUnrollByFactor(forOp, getUnrollFactor(forOp));
: unrollJamFactor(unrollJamFactor) {}
void runOnFunction() override;
- Status runOnAffineForOp(OpPointer<AffineForOp> forOp);
+ LogicalResult runOnAffineForOp(OpPointer<AffineForOp> forOp);
};
} // end anonymous namespace
/// Unroll and jam a 'for' inst. Default unroll jam factor is
/// kDefaultUnrollJamFactor. Return failure if nothing was done.
-Status LoopUnrollAndJam::runOnAffineForOp(OpPointer<AffineForOp> forOp) {
+LogicalResult LoopUnrollAndJam::runOnAffineForOp(OpPointer<AffineForOp> forOp) {
// Unroll and jam by the factor that was passed if any.
if (unrollJamFactor.hasValue())
return loopUnrollJamByFactor(forOp, unrollJamFactor.getValue());
return loopUnrollJamByFactor(forOp, kDefaultUnrollJamFactor);
}
-Status mlir::loopUnrollJamUpToFactor(OpPointer<AffineForOp> forOp,
- uint64_t unrollJamFactor) {
+LogicalResult mlir::loopUnrollJamUpToFactor(OpPointer<AffineForOp> forOp,
+ uint64_t unrollJamFactor) {
Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
if (mayBeConstantTripCount.hasValue() &&
}
/// Unrolls and jams this loop by the specified factor.
-Status mlir::loopUnrollJamByFactor(OpPointer<AffineForOp> forOp,
- uint64_t unrollJamFactor) {
+LogicalResult mlir::loopUnrollJamByFactor(OpPointer<AffineForOp> forOp,
+ uint64_t unrollJamFactor) {
// Gathers all maximal sub-blocks of instructions that do not themselves
// include a for inst (a instruction could have a descendant for inst though
// in its tree).
assert(unrollJamFactor >= 1 && "unroll jam factor should be >= 1");
if (unrollJamFactor == 1 || forOp->getBody()->empty())
- return Status::failure();
+ return LogicalResult::failure();
Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
if (!mayBeConstantTripCount.hasValue() &&
getLargestDivisorOfTripCount(forOp) % unrollJamFactor != 0)
- return Status::failure();
+ return LogicalResult::failure();
auto lbMap = forOp->getLowerBoundMap();
auto ubMap = forOp->getUpperBoundMap();
// do such unrolling for a Function would be to specialize the loop for the
// 'hotspot' case and unroll that hotspot.
if (lbMap.getNumResults() != 1 || ubMap.getNumResults() != 1)
- return Status::failure();
+ return LogicalResult::failure();
// Same operand list for lower and upper bound for now.
// TODO(bondhugula): handle bounds with different sets of operands.
if (!forOp->matchingBoundOperandList())
- return Status::failure();
+ return LogicalResult::failure();
// If the trip count is lower than the unroll jam factor, no unroll jam.
// TODO(bondhugula): option to specify cleanup loop unrolling.
if (mayBeConstantTripCount.hasValue() &&
mayBeConstantTripCount.getValue() < unrollJamFactor)
- return Status::failure();
+ return LogicalResult::failure();
auto *forInst = forOp->getInstruction();
// Promote the loop body up if this has turned into a single iteration loop.
promoteIfSingleIteration(forOp);
- return Status::success();
+ return LogicalResult::success();
}
static PassRegistration<LoopUnrollAndJam> pass("loop-unroll-jam",
/// Promotes the loop body of a forOp to its containing block if the forOp
/// was known to have a single iteration.
// TODO(bondhugula): extend this for arbitrary affine bounds.
-Status mlir::promoteIfSingleIteration(OpPointer<AffineForOp> forOp) {
+LogicalResult mlir::promoteIfSingleIteration(OpPointer<AffineForOp> forOp) {
Optional<uint64_t> tripCount = getConstantTripCount(forOp);
if (!tripCount.hasValue() || tripCount.getValue() != 1)
- return Status::failure();
+ return LogicalResult::failure();
// TODO(mlir-team): there is no builder for a max.
if (forOp->getLowerBoundMap().getNumResults() != 1)
- return Status::failure();
+ return LogicalResult::failure();
// Replaces all IV uses to its single iteration value.
auto *iv = forOp->getInductionVar();
block->getInstructions().splice(Block::iterator(forInst),
forOp->getBody()->getInstructions());
forOp->erase();
- return Status::success();
+ return LogicalResult::success();
}
/// Promotes all single iteration for inst's in the Function, i.e., moves
// asserts preservation of SSA dominance. A check for that as well as that for
// memory-based depedence preservation check rests with the users of this
// method.
-Status mlir::instBodySkew(OpPointer<AffineForOp> forOp,
- ArrayRef<uint64_t> shifts,
- bool unrollPrologueEpilogue) {
+LogicalResult mlir::instBodySkew(OpPointer<AffineForOp> forOp,
+ ArrayRef<uint64_t> shifts,
+ bool unrollPrologueEpilogue) {
if (forOp->getBody()->empty())
- return Status::success();
+ return LogicalResult::success();
// If the trip counts aren't constant, we would need versioning and
// conditional guards (or context information to prevent such versioning). The
auto mayBeConstTripCount = getConstantTripCount(forOp);
if (!mayBeConstTripCount.hasValue()) {
LLVM_DEBUG(forOp->emitNote("non-constant trip count loop not handled"));
- return Status::success();
+ return LogicalResult::success();
}
uint64_t tripCount = mayBeConstTripCount.getValue();
// Such large shifts are not the typical use case.
if (maxShift >= numChildInsts) {
forOp->emitWarning("not shifting because shifts are unrealistically large");
- return Status::success();
+ return LogicalResult::success();
}
// An array of instruction groups sorted by shift amount; each group has all
epilogue->getInstruction() != prologue->getInstruction())
loopUnrollFull(epilogue);
- return Status::success();
+ return LogicalResult::success();
}
/// Unrolls this loop completely.
-Status mlir::loopUnrollFull(OpPointer<AffineForOp> forOp) {
+LogicalResult mlir::loopUnrollFull(OpPointer<AffineForOp> forOp) {
Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
if (mayBeConstantTripCount.hasValue()) {
uint64_t tripCount = mayBeConstantTripCount.getValue();
}
return loopUnrollByFactor(forOp, tripCount);
}
- return Status::failure();
+ return LogicalResult::failure();
}
/// Unrolls and jams this loop by the specified factor or by the trip count (if
/// constant) whichever is lower.
-Status mlir::loopUnrollUpToFactor(OpPointer<AffineForOp> forOp,
- uint64_t unrollFactor) {
+LogicalResult mlir::loopUnrollUpToFactor(OpPointer<AffineForOp> forOp,
+ uint64_t unrollFactor) {
Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
if (mayBeConstantTripCount.hasValue() &&
/// Unrolls this loop by the specified factor. Returns success if the loop
/// is successfully unrolled.
-Status mlir::loopUnrollByFactor(OpPointer<AffineForOp> forOp,
- uint64_t unrollFactor) {
+LogicalResult mlir::loopUnrollByFactor(OpPointer<AffineForOp> forOp,
+ uint64_t unrollFactor) {
assert(unrollFactor >= 1 && "unroll factor should be >= 1");
if (unrollFactor == 1)
return promoteIfSingleIteration(forOp);
if (forOp->getBody()->empty())
- return Status::failure();
+ return LogicalResult::failure();
auto lbMap = forOp->getLowerBoundMap();
auto ubMap = forOp->getUpperBoundMap();
// do such unrolling for a Function would be to specialize the loop for the
// 'hotspot' case and unroll that hotspot.
if (lbMap.getNumResults() != 1 || ubMap.getNumResults() != 1)
- return Status::failure();
+ return LogicalResult::failure();
// Same operand list for lower and upper bound for now.
// TODO(bondhugula): handle bounds with different operand lists.
if (!forOp->matchingBoundOperandList())
- return Status::failure();
+ return LogicalResult::failure();
Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
// TODO(bondhugula): option to specify cleanup loop unrolling.
if (mayBeConstantTripCount.hasValue() &&
mayBeConstantTripCount.getValue() < unrollFactor)
- return Status::failure();
+ return LogicalResult::failure();
// Generate the cleanup loop if trip count isn't a multiple of unrollFactor.
Instruction *forInst = forOp->getInstruction();
// Promote the loop body up if this has turned into a single iteration loop.
promoteIfSingleIteration(forOp);
- return Status::success();
+ return LogicalResult::success();
}
/// Performs loop interchange on 'forOpA' and 'forOpB', where 'forOpB' is