From 41d90a85bd7942a9a27011f09c3a49cc32fdaeae Mon Sep 17 00:00:00 2001 From: MLIR Team Date: Sat, 11 May 2019 15:24:47 -0700 Subject: [PATCH] Automated rollback of changelist 247778391. PiperOrigin-RevId: 247778691 --- mlir/examples/Linalg/Linalg1/lib/Analysis.cpp | 4 +- mlir/examples/Linalg/Linalg1/lib/Common.cpp | 2 +- mlir/examples/Linalg/Linalg1/lib/Utils.cpp | 2 +- mlir/examples/Linalg/Linalg2/lib/Transforms.cpp | 4 +- .../Linalg/Linalg3/include/linalg3/TensorOps-inl.h | 2 +- mlir/examples/Linalg/Linalg3/lib/Transforms.cpp | 14 +++--- mlir/examples/Linalg/Linalg4/lib/Transforms.cpp | 16 +++---- mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp | 8 ++-- mlir/examples/toy/Ch4/mlir/ToyCombine.cpp | 4 +- mlir/examples/toy/Ch5/mlir/LateLowering.cpp | 8 ++-- mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp | 6 +-- mlir/include/mlir/EDSC/Builders.h | 2 +- mlir/include/mlir/IR/Builders.h | 2 +- mlir/include/mlir/IR/Function.h | 2 +- mlir/include/mlir/IR/OpDefinition.h | 12 +++-- mlir/include/mlir/IR/Operation.h | 51 ++++++++++------------ mlir/include/mlir/IR/PatternMatch.h | 4 +- mlir/include/mlir/Support/LLVM.h | 1 - mlir/lib/AffineOps/AffineOps.cpp | 8 ++-- mlir/lib/Analysis/LoopAnalysis.cpp | 8 ++-- mlir/lib/Analysis/MemRefBoundCheck.cpp | 4 +- mlir/lib/Analysis/SliceAnalysis.cpp | 2 +- mlir/lib/Analysis/Utils.cpp | 12 ++--- mlir/lib/Analysis/VectorAnalysis.cpp | 8 ++-- mlir/lib/EDSC/Builders.cpp | 6 +-- mlir/lib/Linalg/Transforms/Tiling.cpp | 6 +-- mlir/lib/Linalg/Utils/Utils.cpp | 6 +-- mlir/lib/StandardOps/Ops.cpp | 2 +- mlir/lib/Target/LLVMIR/ModuleTranslation.cpp | 6 +-- mlir/lib/Transforms/DmaGeneration.cpp | 12 ++--- mlir/lib/Transforms/LoopFusion.cpp | 8 ++-- mlir/lib/Transforms/LoopTiling.cpp | 2 +- mlir/lib/Transforms/LoopUnrollAndJam.cpp | 2 +- mlir/lib/Transforms/LowerAffine.cpp | 4 +- mlir/lib/Transforms/MaterializeVectors.cpp | 4 +- mlir/lib/Transforms/MemRefDataFlowOpt.cpp | 2 +- mlir/lib/Transforms/PipelineDataTransfer.cpp | 4 +- mlir/lib/Transforms/TestConstantFold.cpp | 2 +- mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp | 2 +- mlir/lib/Transforms/Utils/LoopUtils.cpp | 2 +- .../Vectorization/VectorizerTestPass.cpp | 2 +- mlir/lib/Transforms/Vectorize.cpp | 8 ++-- mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp | 4 +- 43 files changed, 130 insertions(+), 140 deletions(-) diff --git a/mlir/examples/Linalg/Linalg1/lib/Analysis.cpp b/mlir/examples/Linalg/Linalg1/lib/Analysis.cpp index a7fba17..ecb6309 100644 --- a/mlir/examples/Linalg/Linalg1/lib/Analysis.cpp +++ b/mlir/examples/Linalg/Linalg1/lib/Analysis.cpp @@ -31,7 +31,7 @@ ViewOp linalg::getViewBaseViewOp(Value *view) { auto viewType = view->getType().dyn_cast(); (void)viewType; assert(viewType.isa() && "expected a ViewType"); - while (auto slice = dyn_cast(view->getDefiningOp())) { + while (auto slice = view->getDefiningOp()->dyn_cast()) { view = slice.getParentView(); assert(viewType.isa() && "expected a ViewType"); } @@ -48,7 +48,7 @@ std::pair linalg::getViewRootIndexing(Value *view, (void)viewType; assert(viewType.isa() && "expected a ViewType"); assert(dim < viewType.getRank() && "dim exceeds rank"); - if (auto viewOp = dyn_cast(view->getDefiningOp())) + if (auto viewOp = view->getDefiningOp()->dyn_cast()) return std::make_pair(viewOp.getIndexing(dim), dim); auto sliceOp = view->getDefiningOp()->cast(); diff --git a/mlir/examples/Linalg/Linalg1/lib/Common.cpp b/mlir/examples/Linalg/Linalg1/lib/Common.cpp index 278f9c5..bfdc40a 100644 --- a/mlir/examples/Linalg/Linalg1/lib/Common.cpp +++ b/mlir/examples/Linalg/Linalg1/lib/Common.cpp @@ -40,7 +40,7 @@ linalg::common::LoopNestRangeBuilder::LoopNestRangeBuilder( assert(ivs.size() == indexings.size()); for (unsigned i = 0, e = indexings.size(); i < e; ++i) { auto rangeOp = - llvm::dyn_cast(indexings[i].getValue()->getDefiningOp()); + indexings[i].getValue()->getDefiningOp()->dyn_cast(); if (!rangeOp) { continue; } diff --git a/mlir/examples/Linalg/Linalg1/lib/Utils.cpp b/mlir/examples/Linalg/Linalg1/lib/Utils.cpp index 5bcebc7..372c08f 100644 --- a/mlir/examples/Linalg/Linalg1/lib/Utils.cpp +++ b/mlir/examples/Linalg/Linalg1/lib/Utils.cpp @@ -33,7 +33,7 @@ using namespace linalg::intrinsics; unsigned linalg::getViewRank(Value *view) { assert(view->getType().isa() && "expected a ViewType"); - if (auto viewOp = dyn_cast(view->getDefiningOp())) + if (auto viewOp = view->getDefiningOp()->dyn_cast()) return viewOp.getRank(); return view->getDefiningOp()->cast().getRank(); } diff --git a/mlir/examples/Linalg/Linalg2/lib/Transforms.cpp b/mlir/examples/Linalg/Linalg2/lib/Transforms.cpp index 83fd9ad..d1af750 100644 --- a/mlir/examples/Linalg/Linalg2/lib/Transforms.cpp +++ b/mlir/examples/Linalg/Linalg2/lib/Transforms.cpp @@ -43,7 +43,7 @@ using namespace linalg::intrinsics; // analyses. This builds the chain. static SmallVector getViewChain(mlir::Value *v) { assert(v->getType().isa() && "ViewType expected"); - if (v->getDefiningOp()->isa()) { + if (v->getDefiningOp()->dyn_cast()) { return SmallVector{v}; } @@ -53,7 +53,7 @@ static SmallVector getViewChain(mlir::Value *v) { tmp.push_back(v); v = sliceOp.getParentView(); } while (!v->getType().isa()); - assert(v->getDefiningOp()->isa() && "must be a ViewOp"); + assert(v->getDefiningOp()->cast() && "must be a ViewOp"); tmp.push_back(v); return SmallVector(tmp.rbegin(), tmp.rend()); } diff --git a/mlir/examples/Linalg/Linalg3/include/linalg3/TensorOps-inl.h b/mlir/examples/Linalg/Linalg3/include/linalg3/TensorOps-inl.h index 3090f29..9339d73 100644 --- a/mlir/examples/Linalg/Linalg3/include/linalg3/TensorOps-inl.h +++ b/mlir/examples/Linalg/Linalg3/include/linalg3/TensorOps-inl.h @@ -91,7 +91,7 @@ inline llvm::SmallVector extractRangesFromViewOrSliceOp(mlir::Value *view) { // This expects a viewType which must come from either ViewOp or SliceOp. assert(view->getType().isa() && "expected ViewType"); - if (auto viewOp = llvm::dyn_cast(view->getDefiningOp())) + if (auto viewOp = view->getDefiningOp()->dyn_cast()) return viewOp.getRanges(); auto sliceOp = view->getDefiningOp()->cast(); diff --git a/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp b/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp index bce7f58..42999ae 100644 --- a/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp +++ b/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp @@ -46,9 +46,9 @@ void linalg::composeSliceOps(mlir::Function *f) { void linalg::lowerToFinerGrainedTensorContraction(mlir::Function *f) { f->walk([](Operation *op) { - if (auto matmulOp = dyn_cast(op)) { + if (auto matmulOp = op->dyn_cast()) { matmulOp.writeAsFinerGrainTensorContraction(); - } else if (auto matvecOp = dyn_cast(op)) { + } else if (auto matvecOp = op->dyn_cast()) { matvecOp.writeAsFinerGrainTensorContraction(); } else { return; @@ -205,11 +205,11 @@ writeContractionAsLoops(ContractionOp contraction) { llvm::Optional> linalg::writeAsLoops(Operation *op) { - if (auto matmulOp = dyn_cast(op)) { + if (auto matmulOp = op->dyn_cast()) { return writeContractionAsLoops(matmulOp); - } else if (auto matvecOp = dyn_cast(op)) { + } else if (auto matvecOp = op->dyn_cast()) { return writeContractionAsLoops(matvecOp); - } else if (auto dotOp = dyn_cast(op)) { + } else if (auto dotOp = op->dyn_cast()) { return writeContractionAsLoops(dotOp); } return llvm::None; @@ -276,7 +276,7 @@ PatternMatchResult Rewriter::matchAndRewrite(Operation *op, PatternRewriter &rewriter) const { auto load = op->cast(); - SliceOp slice = dyn_cast(load.getView()->getDefiningOp()); + SliceOp slice = load.getView()->getDefiningOp()->dyn_cast(); ViewOp view = slice ? emitAndReturnFullyComposedView(slice.getResult()) : load.getView()->getDefiningOp()->cast(); ScopedContext scope(FuncBuilder(load), load.getLoc()); @@ -291,7 +291,7 @@ PatternMatchResult Rewriter::matchAndRewrite(Operation *op, PatternRewriter &rewriter) const { auto store = op->cast(); - SliceOp slice = dyn_cast(store.getView()->getDefiningOp()); + SliceOp slice = store.getView()->getDefiningOp()->dyn_cast(); ViewOp view = slice ? emitAndReturnFullyComposedView(slice.getResult()) : store.getView()->getDefiningOp()->cast(); ScopedContext scope(FuncBuilder(store), store.getLoc()); diff --git a/mlir/examples/Linalg/Linalg4/lib/Transforms.cpp b/mlir/examples/Linalg/Linalg4/lib/Transforms.cpp index 6771257..05865e9 100644 --- a/mlir/examples/Linalg/Linalg4/lib/Transforms.cpp +++ b/mlir/examples/Linalg/Linalg4/lib/Transforms.cpp @@ -52,8 +52,8 @@ void linalg::lowerToTiledLoops(mlir::Function *f, } static bool isZeroIndex(Value *v) { - return isa_and_nonnull(v->getDefiningOp()) && - cast(v->getDefiningOp()).getValue() == 0; + return v->getDefiningOp() && v->getDefiningOp()->isa() && + v->getDefiningOp()->dyn_cast().getValue() == 0; } template @@ -178,11 +178,11 @@ writeContractionAsTiledViews(TensorContractionBase &contraction, llvm::Optional> linalg::writeAsTiledViews(Operation *op, ArrayRef tileSizes) { - if (auto matmulOp = dyn_cast(op)) { + if (auto matmulOp = op->dyn_cast()) { return writeContractionAsTiledViews(matmulOp, tileSizes); - } else if (auto matvecOp = dyn_cast(op)) { + } else if (auto matvecOp = op->dyn_cast()) { return writeContractionAsTiledViews(matvecOp, tileSizes); - } else if (auto dotOp = dyn_cast(op)) { + } else if (auto dotOp = op->dyn_cast()) { return writeContractionAsTiledViews(dotOp, tileSizes); } return llvm::None; @@ -190,11 +190,11 @@ linalg::writeAsTiledViews(Operation *op, ArrayRef tileSizes) { void linalg::lowerToTiledViews(mlir::Function *f, ArrayRef tileSizes) { f->walk([tileSizes](Operation *op) { - if (auto matmulOp = dyn_cast(op)) { + if (auto matmulOp = op->dyn_cast()) { writeAsTiledViews(matmulOp, tileSizes); - } else if (auto matvecOp = dyn_cast(op)) { + } else if (auto matvecOp = op->dyn_cast()) { writeAsTiledViews(matvecOp, tileSizes); - } else if (auto dotOp = dyn_cast(op)) { + } else if (auto dotOp = op->dyn_cast()) { writeAsTiledViews(dotOp, tileSizes); } else { return; diff --git a/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp index c9f98e7..a11c882 100644 --- a/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp +++ b/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp @@ -238,13 +238,13 @@ public: LLVM_DEBUG(llvm::dbgs() << "Inferring shape for: " << *op << "\n"); // The add operation is trivial: propagate the input type as is. - if (auto addOp = llvm::dyn_cast(op)) { + if (auto addOp = op->dyn_cast()) { op->getResult(0)->setType(op->getOperand(0)->getType()); continue; } // Transpose is easy: just invert the dimensions. - if (auto transpose = llvm::dyn_cast(op)) { + if (auto transpose = op->dyn_cast()) { SmallVector dims; auto arrayTy = transpose.getOperand()->getType().cast(); dims.insert(dims.end(), arrayTy.getShape().begin(), @@ -259,7 +259,7 @@ public: // catch it but shape inference earlier in the pass could generate an // invalid IR (from an invalid Toy input of course) and we wouldn't want // to crash here. - if (auto mulOp = llvm::dyn_cast(op)) { + if (auto mulOp = op->dyn_cast()) { auto lhs = mulOp.getLHS()->getType().cast(); auto rhs = mulOp.getRHS()->getType().cast(); auto lhsRank = lhs.getShape().size(); @@ -291,7 +291,7 @@ public: // for this function, queue the callee in the inter-procedural work list, // and return. The current function stays in the work list and will // restart after the callee is processed. - if (auto callOp = llvm::dyn_cast(op)) { + if (auto callOp = op->dyn_cast()) { auto calleeName = callOp.getCalleeName(); auto *callee = getModule().getNamedFunction(calleeName); if (!callee) { diff --git a/mlir/examples/toy/Ch4/mlir/ToyCombine.cpp b/mlir/examples/toy/Ch4/mlir/ToyCombine.cpp index 942ce86..f3e8ff0 100644 --- a/mlir/examples/toy/Ch4/mlir/ToyCombine.cpp +++ b/mlir/examples/toy/Ch4/mlir/ToyCombine.cpp @@ -53,7 +53,7 @@ struct SimplifyRedundantTranspose : public mlir::RewritePattern { // Look through the input of the current transpose. mlir::Value *transposeInput = transpose.getOperand(); TransposeOp transposeInputOp = - llvm::dyn_cast_or_null(transposeInput->getDefiningOp()); + mlir::dyn_cast_or_null(transposeInput->getDefiningOp()); // If the input is defined by another Transpose, bingo! if (!transposeInputOp) return matchFailure(); @@ -75,7 +75,7 @@ struct SimplifyReshapeConstant : public mlir::RewritePattern { mlir::PatternRewriter &rewriter) const override { ReshapeOp reshape = op->cast(); // Look through the input of the current reshape. - ConstantOp constantOp = llvm::dyn_cast_or_null( + ConstantOp constantOp = mlir::dyn_cast_or_null( reshape.getOperand()->getDefiningOp()); // If the input is defined by another constant, bingo! if (!constantOp) diff --git a/mlir/examples/toy/Ch5/mlir/LateLowering.cpp b/mlir/examples/toy/Ch5/mlir/LateLowering.cpp index 534b5cb..4ef62d3 100644 --- a/mlir/examples/toy/Ch5/mlir/LateLowering.cpp +++ b/mlir/examples/toy/Ch5/mlir/LateLowering.cpp @@ -366,7 +366,7 @@ struct LateLoweringPass : public ModulePass { // First patch calls type to return memref instead of ToyArray for (auto &function : getModule()) { function.walk([&](Operation *op) { - auto callOp = dyn_cast(op); + auto callOp = op->dyn_cast(); if (!callOp) return; if (!callOp.getNumResults()) @@ -382,14 +382,14 @@ struct LateLoweringPass : public ModulePass { for (auto &function : getModule()) { function.walk([&](Operation *op) { // Turns toy.alloc into sequence of alloc/dealloc (later malloc/free). - if (auto allocOp = dyn_cast(op)) { + if (auto allocOp = op->dyn_cast()) { auto result = allocTensor(allocOp); allocOp.replaceAllUsesWith(result); allocOp.erase(); return; } // Eliminate all type.cast before lowering to LLVM. - if (auto typeCastOp = dyn_cast(op)) { + if (auto typeCastOp = op->dyn_cast()) { typeCastOp.replaceAllUsesWith(typeCastOp.getOperand()); typeCastOp.erase(); return; @@ -429,7 +429,7 @@ struct LateLoweringPass : public ModulePass { // Insert a `dealloc` operation right before the `return` operations, unless // it is returned itself in which case the caller is responsible for it. builder.getFunction()->walk([&](Operation *op) { - auto returnOp = dyn_cast(op); + auto returnOp = op->dyn_cast(); if (!returnOp) return; if (returnOp.getNumOperands() && returnOp.getOperand(0) == alloc) diff --git a/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp index 4e17b23..a083e62 100644 --- a/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp +++ b/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp @@ -238,7 +238,7 @@ public: LLVM_DEBUG(llvm::dbgs() << "Inferring shape for: " << *op << "\n"); // The add operation is trivial: propagate the input type as is. - if (auto addOp = llvm::dyn_cast(op)) { + if (auto addOp = op->dyn_cast()) { op->getResult(0)->setType(op->getOperand(0)->getType()); continue; } @@ -261,7 +261,7 @@ public: // catch it but shape inference earlier in the pass could generate an // invalid IR (from an invalid Toy input of course) and we wouldn't want // to crash here. - if (auto mulOp = llvm::dyn_cast(op)) { + if (auto mulOp = op->dyn_cast()) { auto lhs = mulOp.getLHS()->getType().cast(); auto rhs = mulOp.getRHS()->getType().cast(); auto lhsRank = lhs.getShape().size(); @@ -295,7 +295,7 @@ public: // for this function, queue the callee in the inter-procedural work list, // and return. The current function stays in the work list and will // restart after the callee is processed. - if (auto callOp = llvm::dyn_cast(op)) { + if (auto callOp = op->dyn_cast()) { auto calleeName = callOp.getCalleeName(); auto *callee = getModule().getNamedFunction(calleeName); if (!callee) { diff --git a/mlir/include/mlir/EDSC/Builders.h b/mlir/include/mlir/EDSC/Builders.h index 39302f6..5d23488 100644 --- a/mlir/include/mlir/EDSC/Builders.h +++ b/mlir/include/mlir/EDSC/Builders.h @@ -439,7 +439,7 @@ ValueHandle ValueHandle::create(Args... args) { if (op->getNumResults() == 1) { return ValueHandle(op->getResult(0)); } else if (op->getNumResults() == 0) { - if (auto f = dyn_cast(op)) { + if (auto f = op->dyn_cast()) { return ValueHandle(f.getInductionVar()); } } diff --git a/mlir/include/mlir/IR/Builders.h b/mlir/include/mlir/IR/Builders.h index 7f182e8..1ee6c48 100644 --- a/mlir/include/mlir/IR/Builders.h +++ b/mlir/include/mlir/IR/Builders.h @@ -271,7 +271,7 @@ public: OperationState state(getContext(), location, OpTy::getOperationName()); OpTy::build(this, &state, args...); auto *op = createOperation(state); - auto result = dyn_cast(op); + auto result = op->dyn_cast(); assert(result && "Builder didn't return the right type"); return result; } diff --git a/mlir/include/mlir/IR/Function.h b/mlir/include/mlir/IR/Function.h index d4b85b5..0770d2c 100644 --- a/mlir/include/mlir/IR/Function.h +++ b/mlir/include/mlir/IR/Function.h @@ -116,7 +116,7 @@ public: /// Specialization of walk to only visit operations of 'OpTy'. template void walk(std::function callback) { walk([&](Operation *opInst) { - if (auto op = dyn_cast(opInst)) + if (auto op = opInst->dyn_cast()) callback(op); }); } diff --git a/mlir/include/mlir/IR/OpDefinition.h b/mlir/include/mlir/IR/OpDefinition.h index 2eff412..b80e8ac 100644 --- a/mlir/include/mlir/IR/OpDefinition.h +++ b/mlir/include/mlir/IR/OpDefinition.h @@ -792,7 +792,7 @@ public: /// This is the hook used by the AsmPrinter to emit this to the .mlir file. /// Op implementations should provide a print method. static void printAssembly(Operation *op, OpAsmPrinter *p) { - auto opPointer = dyn_cast(op); + auto opPointer = op->dyn_cast(); assert(opPointer && "op's name does not match name of concrete type instantiated with"); opPointer.print(p); @@ -825,13 +825,11 @@ public: /// This is a public constructor. Any op can be initialized to null. explicit Op() : OpState(nullptr) {} - Op(std::nullptr_t) : OpState(nullptr) {} - /// This is a public constructor to enable access via the llvm::cast family of - /// methods. This should not be used directly. - explicit Op(Operation *state) : OpState(state) { - assert(!state || isa(state)); - } +protected: + /// This is a private constructor only accessible through the + /// Operation::cast family of methods. + explicit Op(Operation *state) : OpState(state) {} friend class Operation; private: diff --git a/mlir/include/mlir/IR/Operation.h b/mlir/include/mlir/IR/Operation.h index 31ec8ea..54e49b7 100644 --- a/mlir/include/mlir/IR/Operation.h +++ b/mlir/include/mlir/IR/Operation.h @@ -389,6 +389,14 @@ public: // Conversions to declared operations like DimOp //===--------------------------------------------------------------------===// + /// The dyn_cast methods perform a dynamic cast from an Operation to a typed + /// Op like DimOp. This returns a null Op on failure. + template OpClass dyn_cast() { + if (isa()) + return cast(); + return OpClass(); + } + /// The cast methods perform a cast from an Operation to a typed Op like /// DimOp. This aborts if the parameter to the template isn't an instance of /// the template type argument. @@ -409,10 +417,10 @@ public: /// including this one. void walk(const std::function &callback); - /// Specialization of walk to only visit operations of 'T'. - template void walk(std::function callback) { + /// Specialization of walk to only visit operations of 'OpTy'. + template void walk(std::function callback) { walk([&](Operation *op) { - if (auto derivedOp = dyn_cast(op)) + if (auto derivedOp = op->dyn_cast()) callback(derivedOp); }); } @@ -526,6 +534,17 @@ inline auto Operation::getOperands() -> operand_range { return {operand_begin(), operand_end()}; } +/// Provide dyn_cast_or_null functionality for Operation casts. +template T dyn_cast_or_null(Operation *op) { + return op ? op->dyn_cast() : T(); +} + +/// Provide isa_and_nonnull functionality for Operation casts, i.e. if the +/// operation is non-null and a class of 'T'. +template bool isa_and_nonnull(Operation *op) { + return op && op->isa(); +} + /// This class implements the result iterators for the Operation class /// in terms of getResult(idx). class ResultIterator final @@ -579,30 +598,4 @@ inline auto Operation::getResultTypes() } // end namespace mlir -namespace llvm { -/// Provide isa functionality for operation casts. -template struct isa_impl { - static inline bool doit(const ::mlir::Operation &op) { - return T::classof(const_cast<::mlir::Operation *>(&op)); - } -}; - -/// Provide specializations for operation casts as the resulting T is value -/// typed. -template struct cast_retty_impl { - using ret_type = T; -}; -template struct cast_retty_impl { - using ret_type = T; -}; -template -struct cast_convert_val { - static T doit(::mlir::Operation &val) { return T(&val); } -}; -template -struct cast_convert_val { - static T doit(::mlir::Operation *val) { return T(val); } -}; -} // end namespace llvm - #endif // MLIR_IR_OPERATION_H diff --git a/mlir/include/mlir/IR/PatternMatch.h b/mlir/include/mlir/IR/PatternMatch.h index 51528c1..3b02ed5 100644 --- a/mlir/include/mlir/IR/PatternMatch.h +++ b/mlir/include/mlir/IR/PatternMatch.h @@ -215,7 +215,7 @@ public: OperationState state(getContext(), location, OpTy::getOperationName()); OpTy::build(this, &state, args...); auto *op = createOperation(state); - auto result = dyn_cast(op); + auto result = op->dyn_cast(); assert(result && "Builder didn't return the right type"); return result; } @@ -231,7 +231,7 @@ public: // If the Operation we produce is valid, return it. if (!OpTy::verifyInvariants(op)) { - auto result = dyn_cast(op); + auto result = op->dyn_cast(); assert(result && "Builder didn't return the right type"); return result; } diff --git a/mlir/include/mlir/Support/LLVM.h b/mlir/include/mlir/Support/LLVM.h index 6676ad0..031dceb 100644 --- a/mlir/include/mlir/Support/LLVM.h +++ b/mlir/include/mlir/Support/LLVM.h @@ -69,7 +69,6 @@ using llvm::cast_or_null; using llvm::dyn_cast; using llvm::dyn_cast_or_null; using llvm::isa; -using llvm::isa_and_nonnull; // Containers. using llvm::ArrayRef; diff --git a/mlir/lib/AffineOps/AffineOps.cpp b/mlir/lib/AffineOps/AffineOps.cpp index 2dfed93..51209da 100644 --- a/mlir/lib/AffineOps/AffineOps.cpp +++ b/mlir/lib/AffineOps/AffineOps.cpp @@ -61,11 +61,11 @@ bool mlir::isValidDim(Value *value) { if (op->getParentOp() == nullptr || op->isa()) return true; // Affine apply operation is ok if all of its operands are ok. - if (auto applyOp = dyn_cast(op)) + if (auto applyOp = op->dyn_cast()) return applyOp.isValidDim(); // The dim op is okay if its operand memref/tensor is defined at the top // level. - if (auto dimOp = dyn_cast(op)) + if (auto dimOp = op->dyn_cast()) return isTopLevelSymbol(dimOp.getOperand()); return false; } @@ -86,11 +86,11 @@ bool mlir::isValidSymbol(Value *value) { if (op->getParentOp() == nullptr || op->isa()) return true; // Affine apply operation is ok if all of its operands are ok. - if (auto applyOp = dyn_cast(op)) + if (auto applyOp = op->dyn_cast()) return applyOp.isValidSymbol(); // The dim op is okay if its operand memref/tensor is defined at the top // level. - if (auto dimOp = dyn_cast(op)) + if (auto dimOp = op->dyn_cast()) return isTopLevelSymbol(dimOp.getOperand()); return false; } diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp index 60f2b14..78caa4c 100644 --- a/mlir/lib/Analysis/LoopAnalysis.cpp +++ b/mlir/lib/Analysis/LoopAnalysis.cpp @@ -320,8 +320,8 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop, loadAndStores.match(forOp, &loadAndStoresMatched); for (auto ls : loadAndStoresMatched) { auto *op = ls.getMatchedOperation(); - auto load = dyn_cast(op); - auto store = dyn_cast(op); + auto load = op->dyn_cast(); + auto store = op->dyn_cast(); // Only scalar types are considered vectorizable, all load/store must be // vectorizable for a loop to qualify as vectorizable. // TODO(ntv): ponder whether we want to be more general here. @@ -338,8 +338,8 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop, bool mlir::isVectorizableLoopBody(AffineForOp loop, int *memRefDim) { VectorizableOpFun fun([memRefDim](AffineForOp loop, Operation &op) { - auto load = dyn_cast(op); - auto store = dyn_cast(op); + auto load = op.dyn_cast(); + auto store = op.dyn_cast(); return load ? isContiguousAccess(loop.getInductionVar(), load, memRefDim) : isContiguousAccess(loop.getInductionVar(), store, memRefDim); }); diff --git a/mlir/lib/Analysis/MemRefBoundCheck.cpp b/mlir/lib/Analysis/MemRefBoundCheck.cpp index 4e23441..0fb8862 100644 --- a/mlir/lib/Analysis/MemRefBoundCheck.cpp +++ b/mlir/lib/Analysis/MemRefBoundCheck.cpp @@ -48,9 +48,9 @@ FunctionPassBase *mlir::createMemRefBoundCheckPass() { void MemRefBoundCheck::runOnFunction() { getFunction().walk([](Operation *opInst) { - if (auto loadOp = dyn_cast(opInst)) { + if (auto loadOp = opInst->dyn_cast()) { boundCheckLoadOrStoreOp(loadOp); - } else if (auto storeOp = dyn_cast(opInst)) { + } else if (auto storeOp = opInst->dyn_cast()) { boundCheckLoadOrStoreOp(storeOp); } // TODO(bondhugula): do this for DMA ops as well. diff --git a/mlir/lib/Analysis/SliceAnalysis.cpp b/mlir/lib/Analysis/SliceAnalysis.cpp index 155a2bb..bce000a 100644 --- a/mlir/lib/Analysis/SliceAnalysis.cpp +++ b/mlir/lib/Analysis/SliceAnalysis.cpp @@ -50,7 +50,7 @@ static void getForwardSliceImpl(Operation *op, return; } - if (auto forOp = dyn_cast(op)) { + if (auto forOp = op->dyn_cast()) { for (auto &u : forOp.getInductionVar()->getUses()) { auto *ownerInst = u.getOwner(); if (forwardSlice->count(ownerInst) == 0) { diff --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp index 8d963e4..1eaab67 100644 --- a/mlir/lib/Analysis/Utils.cpp +++ b/mlir/lib/Analysis/Utils.cpp @@ -44,7 +44,7 @@ void mlir::getLoopIVs(Operation &op, SmallVectorImpl *loops) { AffineForOp currAffineForOp; // Traverse up the hierarchy collecing all 'affine.for' operation while // skipping over 'affine.if' operations. - while (currOp && ((currAffineForOp = dyn_cast(currOp)) || + while (currOp && ((currAffineForOp = currOp->dyn_cast()) || currOp->isa())) { if (currAffineForOp) loops->push_back(currAffineForOp); @@ -239,7 +239,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth, assert(isValidSymbol(symbol)); // Check if the symbol is a constant. if (auto *op = symbol->getDefiningOp()) { - if (auto constOp = dyn_cast(op)) { + if (auto constOp = op->dyn_cast()) { cst.setIdToConstant(*symbol, constOp.getValue()); } } @@ -467,7 +467,7 @@ static Operation *getInstAtPosition(ArrayRef positions, } if (level == positions.size() - 1) return &op; - if (auto childAffineForOp = dyn_cast(op)) + if (auto childAffineForOp = op.dyn_cast()) return getInstAtPosition(positions, level + 1, childAffineForOp.getBody()); @@ -633,7 +633,7 @@ mlir::insertBackwardComputationSlice(Operation *srcOpInst, Operation *dstOpInst, // Constructs MemRefAccess populating it with the memref, its indices and // opinst from 'loadOrStoreOpInst'. MemRefAccess::MemRefAccess(Operation *loadOrStoreOpInst) { - if (auto loadOp = dyn_cast(loadOrStoreOpInst)) { + if (auto loadOp = loadOrStoreOpInst->dyn_cast()) { memref = loadOp.getMemRef(); opInst = loadOrStoreOpInst; auto loadMemrefType = loadOp.getMemRefType(); @@ -643,7 +643,7 @@ MemRefAccess::MemRefAccess(Operation *loadOrStoreOpInst) { } } else { assert(loadOrStoreOpInst->isa() && "load/store op expected"); - auto storeOp = dyn_cast(loadOrStoreOpInst); + auto storeOp = loadOrStoreOpInst->dyn_cast(); opInst = loadOrStoreOpInst; memref = storeOp.getMemRef(); auto storeMemrefType = storeOp.getMemRefType(); @@ -750,7 +750,7 @@ Optional mlir::getMemoryFootprintBytes(AffineForOp forOp, void mlir::getSequentialLoops( AffineForOp forOp, llvm::SmallDenseSet *sequentialLoops) { forOp.getOperation()->walk([&](Operation *op) { - if (auto innerFor = dyn_cast(op)) + if (auto innerFor = op->dyn_cast()) if (!isLoopParallel(innerFor)) sequentialLoops->insert(innerFor.getInductionVar()); }); diff --git a/mlir/lib/Analysis/VectorAnalysis.cpp b/mlir/lib/Analysis/VectorAnalysis.cpp index 8fecf05..b45ac00 100644 --- a/mlir/lib/Analysis/VectorAnalysis.cpp +++ b/mlir/lib/Analysis/VectorAnalysis.cpp @@ -152,7 +152,7 @@ static SetVector getParentsOfType(Operation *op) { SetVector res; auto *current = op; while (auto *parent = current->getParentOp()) { - if (auto typedParent = dyn_cast(parent)) { + if (auto typedParent = parent->template dyn_cast()) { assert(res.count(parent) == 0 && "Already inserted"); res.insert(parent); } @@ -177,7 +177,7 @@ AffineMap mlir::makePermutationMap( } } - if (auto load = dyn_cast(op)) { + if (auto load = op->dyn_cast()) { return ::makePermutationMap(load.getIndices(), enclosingLoopToVectorDim); } @@ -198,10 +198,10 @@ bool mlir::matcher::operatesOnSuperVectorsOf(Operation &op, /// do not have to special case. Maybe a trait, or just a method, unclear atm. bool mustDivide = false; VectorType superVectorType; - if (auto read = dyn_cast(op)) { + if (auto read = op.dyn_cast()) { superVectorType = read.getResultType(); mustDivide = true; - } else if (auto write = dyn_cast(op)) { + } else if (auto write = op.dyn_cast()) { superVectorType = write.getVectorType(); mustDivide = true; } else if (op.getNumResults() == 0) { diff --git a/mlir/lib/EDSC/Builders.cpp b/mlir/lib/EDSC/Builders.cpp index 2c91177..610c8b6 100644 --- a/mlir/lib/EDSC/Builders.cpp +++ b/mlir/lib/EDSC/Builders.cpp @@ -100,7 +100,7 @@ ValueHandle ValueHandle::create(StringRef name, ArrayRef operands, if (op->getNumResults() == 1) { return ValueHandle(op->getResult(0)); } - if (auto f = dyn_cast(op)) { + if (auto f = op->dyn_cast()) { return ValueHandle(f.getInductionVar()); } llvm_unreachable("unsupported operation, use an OperationHandle instead"); @@ -147,8 +147,8 @@ static llvm::Optional emitStaticFor(ArrayRef lbs, if (!lbDef || !ubDef) return llvm::Optional(); - auto lbConst = dyn_cast(lbDef); - auto ubConst = dyn_cast(ubDef); + auto lbConst = lbDef->dyn_cast(); + auto ubConst = ubDef->dyn_cast(); if (!lbConst || !ubConst) return llvm::Optional(); diff --git a/mlir/lib/Linalg/Transforms/Tiling.cpp b/mlir/lib/Linalg/Transforms/Tiling.cpp index 6e20542a..434f720 100644 --- a/mlir/lib/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Linalg/Transforms/Tiling.cpp @@ -319,11 +319,11 @@ static LogicalResult tileLinalgOp(LinalgOp &op, ArrayRef tileSizes, // TODO(ntv) expose as a primitive for other passes. static LogicalResult tileLinalgOp(Operation *op, ArrayRef tileSizes, PerFunctionState &state) { - if (auto matmulOp = dyn_cast(op)) { + if (auto matmulOp = op->dyn_cast()) { return tileLinalgOp(matmulOp, tileSizes, state); - } else if (auto matvecOp = dyn_cast(op)) { + } else if (auto matvecOp = op->dyn_cast()) { return tileLinalgOp(matvecOp, tileSizes, state); - } else if (auto dotOp = dyn_cast(op)) { + } else if (auto dotOp = op->dyn_cast()) { return tileLinalgOp(dotOp, tileSizes, state); } return failure(); diff --git a/mlir/lib/Linalg/Utils/Utils.cpp b/mlir/lib/Linalg/Utils/Utils.cpp index 98cf4b7..4b77ece 100644 --- a/mlir/lib/Linalg/Utils/Utils.cpp +++ b/mlir/lib/Linalg/Utils/Utils.cpp @@ -68,9 +68,9 @@ ValueHandle LoopNestRangeBuilder::LoopNestRangeBuilder::operator()( SmallVector mlir::getRanges(Operation *op) { SmallVector res; - if (auto view = dyn_cast(op)) { + if (auto view = op->dyn_cast()) { res.append(view.getIndexings().begin(), view.getIndexings().end()); - } else if (auto slice = dyn_cast(op)) { + } else if (auto slice = op->dyn_cast()) { for (auto *i : slice.getIndexings()) if (i->getType().isa()) res.push_back(i); @@ -100,7 +100,7 @@ SmallVector mlir::getRanges(Operation *op) { Value *mlir::createOrReturnView(FuncBuilder *b, Location loc, Operation *viewDefiningOp, ArrayRef ranges) { - if (auto view = dyn_cast(viewDefiningOp)) { + if (auto view = viewDefiningOp->dyn_cast()) { auto indexings = view.getIndexings(); if (std::equal(indexings.begin(), indexings.end(), ranges.begin())) return view.getResult(); diff --git a/mlir/lib/StandardOps/Ops.cpp b/mlir/lib/StandardOps/Ops.cpp index bc68a78..05e3b13 100644 --- a/mlir/lib/StandardOps/Ops.cpp +++ b/mlir/lib/StandardOps/Ops.cpp @@ -134,7 +134,7 @@ struct MemRefCastFolder : public RewritePattern { void rewrite(Operation *op, PatternRewriter &rewriter) const override { for (unsigned i = 0, e = op->getNumOperands(); i != e; ++i) if (auto *memref = op->getOperand(i)->getDefiningOp()) - if (auto cast = dyn_cast(memref)) + if (auto cast = memref->dyn_cast()) op->setOperand(i, cast.getOperand()); rewriter.updatedRootInPlace(op); } diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp index 597efc3..8a9c649 100644 --- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp @@ -199,11 +199,11 @@ bool ModuleTranslation::convertOperation(Operation &opInst, // Emit branches. We need to look up the remapped blocks and ignore the block // arguments that were transformed into PHI nodes. - if (auto brOp = dyn_cast(opInst)) { + if (auto brOp = opInst.dyn_cast()) { builder.CreateBr(blockMapping[brOp.getSuccessor(0)]); return false; } - if (auto condbrOp = dyn_cast(opInst)) { + if (auto condbrOp = opInst.dyn_cast()) { builder.CreateCondBr(valueMapping.lookup(condbrOp.getOperand(0)), blockMapping[condbrOp.getSuccessor(0)], blockMapping[condbrOp.getSuccessor(1)]); @@ -264,7 +264,7 @@ static Value *getPHISourceValue(Block *current, Block *pred, // For conditional branches, we need to check if the current block is reached // through the "true" or the "false" branch and take the relevant operands. - auto condBranchOp = dyn_cast(terminator); + auto condBranchOp = terminator.dyn_cast(); assert(condBranchOp && "only branch operations can be terminators of a block that " "has successors"); diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp index 937399c..10f47fe 100644 --- a/mlir/lib/Transforms/DmaGeneration.cpp +++ b/mlir/lib/Transforms/DmaGeneration.cpp @@ -173,11 +173,11 @@ static void getMultiLevelStrides(const MemRefRegion ®ion, static bool getFullMemRefAsRegion(Operation *opInst, unsigned numParamLoopIVs, MemRefRegion *region) { unsigned rank; - if (auto loadOp = dyn_cast(opInst)) { + if (auto loadOp = opInst->dyn_cast()) { rank = loadOp.getMemRefType().getRank(); region->memref = loadOp.getMemRef(); region->setWrite(false); - } else if (auto storeOp = dyn_cast(opInst)) { + } else if (auto storeOp = opInst->dyn_cast()) { rank = storeOp.getMemRefType().getRank(); region->memref = storeOp.getMemRef(); region->setWrite(true); @@ -483,7 +483,7 @@ bool DmaGeneration::runOnBlock(Block *block) { }); for (auto it = curBegin; it != block->end(); ++it) { - if (auto forOp = dyn_cast(&*it)) { + if (auto forOp = it->dyn_cast()) { // Returns true if the footprint is known to exceed capacity. auto exceedsCapacity = [&](AffineForOp forOp) { Optional footprint = @@ -607,10 +607,10 @@ uint64_t DmaGeneration::runOnBlock(Block::iterator begin, Block::iterator end) { // Walk this range of operations to gather all memory regions. block->walk(begin, end, [&](Operation *opInst) { // Gather regions to allocate to buffers in faster memory space. - if (auto loadOp = dyn_cast(opInst)) { + if (auto loadOp = opInst->dyn_cast()) { if (loadOp.getMemRefType().getMemorySpace() != slowMemorySpace) return; - } else if (auto storeOp = dyn_cast(opInst)) { + } else if (auto storeOp = opInst->dyn_cast()) { if (storeOp.getMemRefType().getMemorySpace() != slowMemorySpace) return; } else { @@ -739,7 +739,7 @@ uint64_t DmaGeneration::runOnBlock(Block::iterator begin, Block::iterator end) { // For a range of operations, a note will be emitted at the caller. AffineForOp forOp; uint64_t sizeInKib = llvm::divideCeil(totalDmaBuffersSizeInBytes, 1024); - if (llvm::DebugFlag && (forOp = dyn_cast(&*begin))) { + if (llvm::DebugFlag && (forOp = begin->dyn_cast())) { forOp.emitRemark() << sizeInKib << " KiB of DMA buffers in fast memory space for this block\n"; diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp index 1c4a4d1..796d216 100644 --- a/mlir/lib/Transforms/LoopFusion.cpp +++ b/mlir/lib/Transforms/LoopFusion.cpp @@ -644,7 +644,7 @@ bool MemRefDependenceGraph::init(Function &f) { DenseMap forToNodeMap; for (auto &op : f.front()) { - if (auto forOp = dyn_cast(op)) { + if (auto forOp = op.dyn_cast()) { // Create graph node 'id' to represent top-level 'forOp' and record // all loads and store accesses it contains. LoopNestStateCollector collector; @@ -666,14 +666,14 @@ bool MemRefDependenceGraph::init(Function &f) { } forToNodeMap[&op] = node.id; nodes.insert({node.id, node}); - } else if (auto loadOp = dyn_cast(op)) { + } else if (auto loadOp = op.dyn_cast()) { // Create graph node for top-level load op. Node node(nextNodeId++, &op); node.loads.push_back(&op); auto *memref = op.cast().getMemRef(); memrefAccesses[memref].insert(node.id); nodes.insert({node.id, node}); - } else if (auto storeOp = dyn_cast(op)) { + } else if (auto storeOp = op.dyn_cast()) { // Create graph node for top-level store op. Node node(nextNodeId++, &op); node.stores.push_back(&op); @@ -2125,7 +2125,7 @@ public: auto *fn = dstNode->op->getFunction(); for (unsigned i = 0, e = fn->getNumArguments(); i != e; ++i) { for (auto &use : fn->getArgument(i)->getUses()) { - if (auto loadOp = dyn_cast(use.getOwner())) { + if (auto loadOp = use.getOwner()->dyn_cast()) { // Gather loops surrounding 'use'. SmallVector loops; getLoopIVs(*use.getOwner(), &loops); diff --git a/mlir/lib/Transforms/LoopTiling.cpp b/mlir/lib/Transforms/LoopTiling.cpp index 28e13d8..ce42a5e 100644 --- a/mlir/lib/Transforms/LoopTiling.cpp +++ b/mlir/lib/Transforms/LoopTiling.cpp @@ -273,7 +273,7 @@ static void getTileableBands(Function &f, for (auto &block : f) for (auto &op : block) - if (auto forOp = dyn_cast(op)) + if (auto forOp = op.dyn_cast()) getMaximalPerfectLoopNest(forOp); } diff --git a/mlir/lib/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Transforms/LoopUnrollAndJam.cpp index 0a23295..366a7ed 100644 --- a/mlir/lib/Transforms/LoopUnrollAndJam.cpp +++ b/mlir/lib/Transforms/LoopUnrollAndJam.cpp @@ -92,7 +92,7 @@ void LoopUnrollAndJam::runOnFunction() { // unroll-and-jammed by this pass. However, runOnAffineForOp can be called on // any for operation. auto &entryBlock = getFunction().front(); - if (auto forOp = dyn_cast(entryBlock.front())) + if (auto forOp = entryBlock.front().dyn_cast()) runOnAffineForOp(forOp); } diff --git a/mlir/lib/Transforms/LowerAffine.cpp b/mlir/lib/Transforms/LowerAffine.cpp index 1ffe5e3..dc389c8 100644 --- a/mlir/lib/Transforms/LowerAffine.cpp +++ b/mlir/lib/Transforms/LowerAffine.cpp @@ -620,10 +620,10 @@ void LowerAffinePass::runOnFunction() { // Rewrite all of the ifs and fors. We walked the operations in postorders, // so we know that we will rewrite them in the reverse order. for (auto *op : llvm::reverse(instsToRewrite)) { - if (auto ifOp = dyn_cast(op)) { + if (auto ifOp = op->dyn_cast()) { if (lowerAffineIf(ifOp)) return signalPassFailure(); - } else if (auto forOp = dyn_cast(op)) { + } else if (auto forOp = op->dyn_cast()) { if (lowerAffineFor(forOp)) return signalPassFailure(); } else if (lowerAffineApply(op->cast())) { diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp index 28dfb22..2f06a9a 100644 --- a/mlir/lib/Transforms/MaterializeVectors.cpp +++ b/mlir/lib/Transforms/MaterializeVectors.cpp @@ -556,12 +556,12 @@ static bool instantiateMaterialization(Operation *op, if (op->getNumRegions() != 0) return op->emitError("NYI path Op with region"), true; - if (auto write = dyn_cast(op)) { + if (auto write = op->dyn_cast()) { auto *clone = instantiate(&b, write, state->hwVectorType, state->hwVectorInstance, state->substitutionsMap); return clone == nullptr; } - if (auto read = dyn_cast(op)) { + if (auto read = op->dyn_cast()) { auto *clone = instantiate(&b, read, state->hwVectorType, state->hwVectorInstance, state->substitutionsMap); if (!clone) { diff --git a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp index 94df936..a63d462 100644 --- a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp +++ b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp @@ -103,7 +103,7 @@ void MemRefDataFlowOpt::forwardStoreToLoad(LoadOp loadOp) { SmallVector storeOps; unsigned minSurroundingLoops = getNestingDepth(*loadOpInst); for (auto &use : loadOp.getMemRef()->getUses()) { - auto storeOp = dyn_cast(use.getOwner()); + auto storeOp = use.getOwner()->dyn_cast(); if (!storeOp) continue; auto *storeOpInst = storeOp.getOperation(); diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp index 0da97f7..66fbf4a 100644 --- a/mlir/lib/Transforms/PipelineDataTransfer.cpp +++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp @@ -181,7 +181,7 @@ static void findMatchingStartFinishInsts( // Collect outgoing DMA operations - needed to check for dependences below. SmallVector outgoingDmaOps; for (auto &op : *forOp.getBody()) { - auto dmaStartOp = dyn_cast(op); + auto dmaStartOp = op.dyn_cast(); if (dmaStartOp && dmaStartOp.isSrcMemorySpaceFaster()) outgoingDmaOps.push_back(dmaStartOp); } @@ -193,7 +193,7 @@ static void findMatchingStartFinishInsts( dmaFinishInsts.push_back(&op); continue; } - auto dmaStartOp = dyn_cast(op); + auto dmaStartOp = op.dyn_cast(); if (!dmaStartOp) continue; diff --git a/mlir/lib/Transforms/TestConstantFold.cpp b/mlir/lib/Transforms/TestConstantFold.cpp index ec1e971..0990d7a 100644 --- a/mlir/lib/Transforms/TestConstantFold.cpp +++ b/mlir/lib/Transforms/TestConstantFold.cpp @@ -48,7 +48,7 @@ void TestConstantFold::foldOperation(Operation *op, } // If this op is a constant that are used and cannot be de-duplicated, // remember it for cleanup later. - else if (auto constant = dyn_cast(op)) { + else if (auto constant = op->dyn_cast()) { existingConstants.push_back(op); } } diff --git a/mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp b/mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp index b907840..fc8209b 100644 --- a/mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp +++ b/mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp @@ -40,7 +40,7 @@ bool ConstantFoldHelper::tryToConstantFold( // into the value it contains. We need to consider constants before the // constant folding logic to avoid re-creating the same constant later. // TODO: Extend to support dialect-specific constant ops. - if (auto constant = dyn_cast(op)) { + if (auto constant = op->dyn_cast()) { // If this constant is dead, update bookkeeping and signal the caller. if (constant.use_empty()) { notifyRemoval(op); diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp index 7fbb48e..a10e4a1 100644 --- a/mlir/lib/Transforms/Utils/LoopUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp @@ -363,7 +363,7 @@ void mlir::getPerfectlyNestedLoops(SmallVectorImpl &nestedLoops, nestedLoops.push_back(curr); auto *currBody = curr.getBody(); while (currBody->begin() == std::prev(currBody->end(), 2) && - (curr = dyn_cast(curr.getBody()->front()))) { + (curr = curr.getBody()->front().dyn_cast())) { nestedLoops.push_back(curr); currBody = curr.getBody(); } diff --git a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp index b64dc53..753f7cf 100644 --- a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp +++ b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp @@ -234,7 +234,7 @@ void VectorizerTestPass::testComposeMaps(llvm::raw_ostream &outs) { static bool affineApplyOp(Operation &op) { return op.isa(); } static bool singleResultAffineApplyOpWithoutUses(Operation &op) { - auto app = dyn_cast(op); + auto app = op.dyn_cast(); return app && app.use_empty(); } diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp index 9b8768a..025a653 100644 --- a/mlir/lib/Transforms/Vectorize.cpp +++ b/mlir/lib/Transforms/Vectorize.cpp @@ -839,8 +839,8 @@ static LogicalResult vectorizeAffineForOp(AffineForOp loop, int64_t step, loadAndStores.match(loop.getOperation(), &loadAndStoresMatches); for (auto ls : loadAndStoresMatches) { auto *opInst = ls.getMatchedOperation(); - auto load = dyn_cast(opInst); - auto store = dyn_cast(opInst); + auto load = opInst->dyn_cast(); + auto store = opInst->dyn_cast(); LLVM_DEBUG(opInst->print(dbgs())); LogicalResult result = load ? vectorizeRootOrTerminal(loop.getInductionVar(), load, state) @@ -982,7 +982,7 @@ static Value *vectorizeOperand(Value *operand, Operation *op, return nullptr; } // 3. vectorize constant. - if (auto constant = dyn_cast(operand->getDefiningOp())) { + if (auto constant = operand->getDefiningOp()->dyn_cast()) { return vectorizeConstant( op, constant, VectorType::get(state->strategy->vectorSizes, operand->getType())); @@ -1012,7 +1012,7 @@ static Operation *vectorizeOneOperation(Operation *opInst, assert(!opInst->isa() && "vector.transfer_write cannot be further vectorized"); - if (auto store = dyn_cast(opInst)) { + if (auto store = opInst->dyn_cast()) { auto *memRef = store.getMemRef(); auto *value = store.getValueToStore(); auto *vectorValue = vectorizeOperand(value, opInst, state); diff --git a/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp b/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp index 5c34ed1..ec566e2 100644 --- a/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp +++ b/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp @@ -161,8 +161,8 @@ static bool emitOneBuilder(const Record &record, raw_ostream &os) { } // Output the check and the rewritten builder string. - os << "if (auto op = dyn_cast<" << op.getQualCppClassName() - << ">(opInst)) {\n"; + os << "if (auto op = opInst.dyn_cast<" << op.getQualCppClassName() + << ">()) {\n"; os << bs.str() << builderStrRef << "\n"; os << " return false;\n"; os << "}\n"; -- 2.7.4