From: River Riddle Date: Sat, 11 May 2019 22:56:50 +0000 (-0700) Subject: Add support for using llvm::dyn_cast/cast/isa for operation casts and replace... X-Git-Tag: llvmorg-11-init~1466^2~1744 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=c5ecf9910a209d96ab768a205783871b4316d711;p=platform%2Fupstream%2Fllvm.git Add support for using llvm::dyn_cast/cast/isa for operation casts and replace usages of Operation::dyn_cast with llvm::dyn_cast. -- PiperOrigin-RevId: 247780086 --- diff --git a/mlir/examples/Linalg/Linalg1/lib/Analysis.cpp b/mlir/examples/Linalg/Linalg1/lib/Analysis.cpp index ecb6309..a7fba17 100644 --- a/mlir/examples/Linalg/Linalg1/lib/Analysis.cpp +++ b/mlir/examples/Linalg/Linalg1/lib/Analysis.cpp @@ -31,7 +31,7 @@ ViewOp linalg::getViewBaseViewOp(Value *view) { auto viewType = view->getType().dyn_cast(); (void)viewType; assert(viewType.isa() && "expected a ViewType"); - while (auto slice = view->getDefiningOp()->dyn_cast()) { + while (auto slice = dyn_cast(view->getDefiningOp())) { view = slice.getParentView(); assert(viewType.isa() && "expected a ViewType"); } @@ -48,7 +48,7 @@ std::pair linalg::getViewRootIndexing(Value *view, (void)viewType; assert(viewType.isa() && "expected a ViewType"); assert(dim < viewType.getRank() && "dim exceeds rank"); - if (auto viewOp = view->getDefiningOp()->dyn_cast()) + if (auto viewOp = dyn_cast(view->getDefiningOp())) return std::make_pair(viewOp.getIndexing(dim), dim); auto sliceOp = view->getDefiningOp()->cast(); diff --git a/mlir/examples/Linalg/Linalg1/lib/Common.cpp b/mlir/examples/Linalg/Linalg1/lib/Common.cpp index bfdc40a..278f9c5 100644 --- a/mlir/examples/Linalg/Linalg1/lib/Common.cpp +++ b/mlir/examples/Linalg/Linalg1/lib/Common.cpp @@ -40,7 +40,7 @@ linalg::common::LoopNestRangeBuilder::LoopNestRangeBuilder( assert(ivs.size() == indexings.size()); for (unsigned i = 0, e = indexings.size(); i < e; ++i) { auto rangeOp = - indexings[i].getValue()->getDefiningOp()->dyn_cast(); + llvm::dyn_cast(indexings[i].getValue()->getDefiningOp()); if (!rangeOp) { continue; } diff --git a/mlir/examples/Linalg/Linalg1/lib/Utils.cpp b/mlir/examples/Linalg/Linalg1/lib/Utils.cpp index 372c08f..5bcebc7 100644 --- a/mlir/examples/Linalg/Linalg1/lib/Utils.cpp +++ b/mlir/examples/Linalg/Linalg1/lib/Utils.cpp @@ -33,7 +33,7 @@ using namespace linalg::intrinsics; unsigned linalg::getViewRank(Value *view) { assert(view->getType().isa() && "expected a ViewType"); - if (auto viewOp = view->getDefiningOp()->dyn_cast()) + if (auto viewOp = dyn_cast(view->getDefiningOp())) return viewOp.getRank(); return view->getDefiningOp()->cast().getRank(); } diff --git a/mlir/examples/Linalg/Linalg2/lib/Transforms.cpp b/mlir/examples/Linalg/Linalg2/lib/Transforms.cpp index d1af750..83fd9ad 100644 --- a/mlir/examples/Linalg/Linalg2/lib/Transforms.cpp +++ b/mlir/examples/Linalg/Linalg2/lib/Transforms.cpp @@ -43,7 +43,7 @@ using namespace linalg::intrinsics; // analyses. This builds the chain. static SmallVector getViewChain(mlir::Value *v) { assert(v->getType().isa() && "ViewType expected"); - if (v->getDefiningOp()->dyn_cast()) { + if (v->getDefiningOp()->isa()) { return SmallVector{v}; } @@ -53,7 +53,7 @@ static SmallVector getViewChain(mlir::Value *v) { tmp.push_back(v); v = sliceOp.getParentView(); } while (!v->getType().isa()); - assert(v->getDefiningOp()->cast() && "must be a ViewOp"); + assert(v->getDefiningOp()->isa() && "must be a ViewOp"); tmp.push_back(v); return SmallVector(tmp.rbegin(), tmp.rend()); } diff --git a/mlir/examples/Linalg/Linalg3/include/linalg3/TensorOps-inl.h b/mlir/examples/Linalg/Linalg3/include/linalg3/TensorOps-inl.h index 9339d73..3090f29 100644 --- a/mlir/examples/Linalg/Linalg3/include/linalg3/TensorOps-inl.h +++ b/mlir/examples/Linalg/Linalg3/include/linalg3/TensorOps-inl.h @@ -91,7 +91,7 @@ inline llvm::SmallVector extractRangesFromViewOrSliceOp(mlir::Value *view) { // This expects a viewType which must come from either ViewOp or SliceOp. assert(view->getType().isa() && "expected ViewType"); - if (auto viewOp = view->getDefiningOp()->dyn_cast()) + if (auto viewOp = llvm::dyn_cast(view->getDefiningOp())) return viewOp.getRanges(); auto sliceOp = view->getDefiningOp()->cast(); diff --git a/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp b/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp index 42999ae..bce7f58 100644 --- a/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp +++ b/mlir/examples/Linalg/Linalg3/lib/Transforms.cpp @@ -46,9 +46,9 @@ void linalg::composeSliceOps(mlir::Function *f) { void linalg::lowerToFinerGrainedTensorContraction(mlir::Function *f) { f->walk([](Operation *op) { - if (auto matmulOp = op->dyn_cast()) { + if (auto matmulOp = dyn_cast(op)) { matmulOp.writeAsFinerGrainTensorContraction(); - } else if (auto matvecOp = op->dyn_cast()) { + } else if (auto matvecOp = dyn_cast(op)) { matvecOp.writeAsFinerGrainTensorContraction(); } else { return; @@ -205,11 +205,11 @@ writeContractionAsLoops(ContractionOp contraction) { llvm::Optional> linalg::writeAsLoops(Operation *op) { - if (auto matmulOp = op->dyn_cast()) { + if (auto matmulOp = dyn_cast(op)) { return writeContractionAsLoops(matmulOp); - } else if (auto matvecOp = op->dyn_cast()) { + } else if (auto matvecOp = dyn_cast(op)) { return writeContractionAsLoops(matvecOp); - } else if (auto dotOp = op->dyn_cast()) { + } else if (auto dotOp = dyn_cast(op)) { return writeContractionAsLoops(dotOp); } return llvm::None; @@ -276,7 +276,7 @@ PatternMatchResult Rewriter::matchAndRewrite(Operation *op, PatternRewriter &rewriter) const { auto load = op->cast(); - SliceOp slice = load.getView()->getDefiningOp()->dyn_cast(); + SliceOp slice = dyn_cast(load.getView()->getDefiningOp()); ViewOp view = slice ? emitAndReturnFullyComposedView(slice.getResult()) : load.getView()->getDefiningOp()->cast(); ScopedContext scope(FuncBuilder(load), load.getLoc()); @@ -291,7 +291,7 @@ PatternMatchResult Rewriter::matchAndRewrite(Operation *op, PatternRewriter &rewriter) const { auto store = op->cast(); - SliceOp slice = store.getView()->getDefiningOp()->dyn_cast(); + SliceOp slice = dyn_cast(store.getView()->getDefiningOp()); ViewOp view = slice ? emitAndReturnFullyComposedView(slice.getResult()) : store.getView()->getDefiningOp()->cast(); ScopedContext scope(FuncBuilder(store), store.getLoc()); diff --git a/mlir/examples/Linalg/Linalg4/lib/Transforms.cpp b/mlir/examples/Linalg/Linalg4/lib/Transforms.cpp index 05865e9..6771257 100644 --- a/mlir/examples/Linalg/Linalg4/lib/Transforms.cpp +++ b/mlir/examples/Linalg/Linalg4/lib/Transforms.cpp @@ -52,8 +52,8 @@ void linalg::lowerToTiledLoops(mlir::Function *f, } static bool isZeroIndex(Value *v) { - return v->getDefiningOp() && v->getDefiningOp()->isa() && - v->getDefiningOp()->dyn_cast().getValue() == 0; + return isa_and_nonnull(v->getDefiningOp()) && + cast(v->getDefiningOp()).getValue() == 0; } template @@ -178,11 +178,11 @@ writeContractionAsTiledViews(TensorContractionBase &contraction, llvm::Optional> linalg::writeAsTiledViews(Operation *op, ArrayRef tileSizes) { - if (auto matmulOp = op->dyn_cast()) { + if (auto matmulOp = dyn_cast(op)) { return writeContractionAsTiledViews(matmulOp, tileSizes); - } else if (auto matvecOp = op->dyn_cast()) { + } else if (auto matvecOp = dyn_cast(op)) { return writeContractionAsTiledViews(matvecOp, tileSizes); - } else if (auto dotOp = op->dyn_cast()) { + } else if (auto dotOp = dyn_cast(op)) { return writeContractionAsTiledViews(dotOp, tileSizes); } return llvm::None; @@ -190,11 +190,11 @@ linalg::writeAsTiledViews(Operation *op, ArrayRef tileSizes) { void linalg::lowerToTiledViews(mlir::Function *f, ArrayRef tileSizes) { f->walk([tileSizes](Operation *op) { - if (auto matmulOp = op->dyn_cast()) { + if (auto matmulOp = dyn_cast(op)) { writeAsTiledViews(matmulOp, tileSizes); - } else if (auto matvecOp = op->dyn_cast()) { + } else if (auto matvecOp = dyn_cast(op)) { writeAsTiledViews(matvecOp, tileSizes); - } else if (auto dotOp = op->dyn_cast()) { + } else if (auto dotOp = dyn_cast(op)) { writeAsTiledViews(dotOp, tileSizes); } else { return; diff --git a/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp index a11c882..c9f98e7 100644 --- a/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp +++ b/mlir/examples/toy/Ch4/mlir/ShapeInferencePass.cpp @@ -238,13 +238,13 @@ public: LLVM_DEBUG(llvm::dbgs() << "Inferring shape for: " << *op << "\n"); // The add operation is trivial: propagate the input type as is. - if (auto addOp = op->dyn_cast()) { + if (auto addOp = llvm::dyn_cast(op)) { op->getResult(0)->setType(op->getOperand(0)->getType()); continue; } // Transpose is easy: just invert the dimensions. - if (auto transpose = op->dyn_cast()) { + if (auto transpose = llvm::dyn_cast(op)) { SmallVector dims; auto arrayTy = transpose.getOperand()->getType().cast(); dims.insert(dims.end(), arrayTy.getShape().begin(), @@ -259,7 +259,7 @@ public: // catch it but shape inference earlier in the pass could generate an // invalid IR (from an invalid Toy input of course) and we wouldn't want // to crash here. - if (auto mulOp = op->dyn_cast()) { + if (auto mulOp = llvm::dyn_cast(op)) { auto lhs = mulOp.getLHS()->getType().cast(); auto rhs = mulOp.getRHS()->getType().cast(); auto lhsRank = lhs.getShape().size(); @@ -291,7 +291,7 @@ public: // for this function, queue the callee in the inter-procedural work list, // and return. The current function stays in the work list and will // restart after the callee is processed. - if (auto callOp = op->dyn_cast()) { + if (auto callOp = llvm::dyn_cast(op)) { auto calleeName = callOp.getCalleeName(); auto *callee = getModule().getNamedFunction(calleeName); if (!callee) { diff --git a/mlir/examples/toy/Ch4/mlir/ToyCombine.cpp b/mlir/examples/toy/Ch4/mlir/ToyCombine.cpp index f3e8ff0..942ce86 100644 --- a/mlir/examples/toy/Ch4/mlir/ToyCombine.cpp +++ b/mlir/examples/toy/Ch4/mlir/ToyCombine.cpp @@ -53,7 +53,7 @@ struct SimplifyRedundantTranspose : public mlir::RewritePattern { // Look through the input of the current transpose. mlir::Value *transposeInput = transpose.getOperand(); TransposeOp transposeInputOp = - mlir::dyn_cast_or_null(transposeInput->getDefiningOp()); + llvm::dyn_cast_or_null(transposeInput->getDefiningOp()); // If the input is defined by another Transpose, bingo! if (!transposeInputOp) return matchFailure(); @@ -75,7 +75,7 @@ struct SimplifyReshapeConstant : public mlir::RewritePattern { mlir::PatternRewriter &rewriter) const override { ReshapeOp reshape = op->cast(); // Look through the input of the current reshape. - ConstantOp constantOp = mlir::dyn_cast_or_null( + ConstantOp constantOp = llvm::dyn_cast_or_null( reshape.getOperand()->getDefiningOp()); // If the input is defined by another constant, bingo! if (!constantOp) diff --git a/mlir/examples/toy/Ch5/mlir/LateLowering.cpp b/mlir/examples/toy/Ch5/mlir/LateLowering.cpp index 4ef62d3..534b5cb 100644 --- a/mlir/examples/toy/Ch5/mlir/LateLowering.cpp +++ b/mlir/examples/toy/Ch5/mlir/LateLowering.cpp @@ -366,7 +366,7 @@ struct LateLoweringPass : public ModulePass { // First patch calls type to return memref instead of ToyArray for (auto &function : getModule()) { function.walk([&](Operation *op) { - auto callOp = op->dyn_cast(); + auto callOp = dyn_cast(op); if (!callOp) return; if (!callOp.getNumResults()) @@ -382,14 +382,14 @@ struct LateLoweringPass : public ModulePass { for (auto &function : getModule()) { function.walk([&](Operation *op) { // Turns toy.alloc into sequence of alloc/dealloc (later malloc/free). - if (auto allocOp = op->dyn_cast()) { + if (auto allocOp = dyn_cast(op)) { auto result = allocTensor(allocOp); allocOp.replaceAllUsesWith(result); allocOp.erase(); return; } // Eliminate all type.cast before lowering to LLVM. - if (auto typeCastOp = op->dyn_cast()) { + if (auto typeCastOp = dyn_cast(op)) { typeCastOp.replaceAllUsesWith(typeCastOp.getOperand()); typeCastOp.erase(); return; @@ -429,7 +429,7 @@ struct LateLoweringPass : public ModulePass { // Insert a `dealloc` operation right before the `return` operations, unless // it is returned itself in which case the caller is responsible for it. builder.getFunction()->walk([&](Operation *op) { - auto returnOp = op->dyn_cast(); + auto returnOp = dyn_cast(op); if (!returnOp) return; if (returnOp.getNumOperands() && returnOp.getOperand(0) == alloc) diff --git a/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp b/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp index a083e62..4e17b23 100644 --- a/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp +++ b/mlir/examples/toy/Ch5/mlir/ShapeInferencePass.cpp @@ -238,7 +238,7 @@ public: LLVM_DEBUG(llvm::dbgs() << "Inferring shape for: " << *op << "\n"); // The add operation is trivial: propagate the input type as is. - if (auto addOp = op->dyn_cast()) { + if (auto addOp = llvm::dyn_cast(op)) { op->getResult(0)->setType(op->getOperand(0)->getType()); continue; } @@ -261,7 +261,7 @@ public: // catch it but shape inference earlier in the pass could generate an // invalid IR (from an invalid Toy input of course) and we wouldn't want // to crash here. - if (auto mulOp = op->dyn_cast()) { + if (auto mulOp = llvm::dyn_cast(op)) { auto lhs = mulOp.getLHS()->getType().cast(); auto rhs = mulOp.getRHS()->getType().cast(); auto lhsRank = lhs.getShape().size(); @@ -295,7 +295,7 @@ public: // for this function, queue the callee in the inter-procedural work list, // and return. The current function stays in the work list and will // restart after the callee is processed. - if (auto callOp = op->dyn_cast()) { + if (auto callOp = llvm::dyn_cast(op)) { auto calleeName = callOp.getCalleeName(); auto *callee = getModule().getNamedFunction(calleeName); if (!callee) { diff --git a/mlir/include/mlir/EDSC/Builders.h b/mlir/include/mlir/EDSC/Builders.h index 5d23488..39302f6 100644 --- a/mlir/include/mlir/EDSC/Builders.h +++ b/mlir/include/mlir/EDSC/Builders.h @@ -439,7 +439,7 @@ ValueHandle ValueHandle::create(Args... args) { if (op->getNumResults() == 1) { return ValueHandle(op->getResult(0)); } else if (op->getNumResults() == 0) { - if (auto f = op->dyn_cast()) { + if (auto f = dyn_cast(op)) { return ValueHandle(f.getInductionVar()); } } diff --git a/mlir/include/mlir/IR/Builders.h b/mlir/include/mlir/IR/Builders.h index 1ee6c48..7f182e8 100644 --- a/mlir/include/mlir/IR/Builders.h +++ b/mlir/include/mlir/IR/Builders.h @@ -271,7 +271,7 @@ public: OperationState state(getContext(), location, OpTy::getOperationName()); OpTy::build(this, &state, args...); auto *op = createOperation(state); - auto result = op->dyn_cast(); + auto result = dyn_cast(op); assert(result && "Builder didn't return the right type"); return result; } diff --git a/mlir/include/mlir/IR/Function.h b/mlir/include/mlir/IR/Function.h index 0770d2c..d4b85b5 100644 --- a/mlir/include/mlir/IR/Function.h +++ b/mlir/include/mlir/IR/Function.h @@ -116,7 +116,7 @@ public: /// Specialization of walk to only visit operations of 'OpTy'. template void walk(std::function callback) { walk([&](Operation *opInst) { - if (auto op = opInst->dyn_cast()) + if (auto op = dyn_cast(opInst)) callback(op); }); } diff --git a/mlir/include/mlir/IR/OpDefinition.h b/mlir/include/mlir/IR/OpDefinition.h index b80e8ac..2eff412 100644 --- a/mlir/include/mlir/IR/OpDefinition.h +++ b/mlir/include/mlir/IR/OpDefinition.h @@ -792,7 +792,7 @@ public: /// This is the hook used by the AsmPrinter to emit this to the .mlir file. /// Op implementations should provide a print method. static void printAssembly(Operation *op, OpAsmPrinter *p) { - auto opPointer = op->dyn_cast(); + auto opPointer = dyn_cast(op); assert(opPointer && "op's name does not match name of concrete type instantiated with"); opPointer.print(p); @@ -825,11 +825,13 @@ public: /// This is a public constructor. Any op can be initialized to null. explicit Op() : OpState(nullptr) {} + Op(std::nullptr_t) : OpState(nullptr) {} -protected: - /// This is a private constructor only accessible through the - /// Operation::cast family of methods. - explicit Op(Operation *state) : OpState(state) {} + /// This is a public constructor to enable access via the llvm::cast family of + /// methods. This should not be used directly. + explicit Op(Operation *state) : OpState(state) { + assert(!state || isa(state)); + } friend class Operation; private: diff --git a/mlir/include/mlir/IR/Operation.h b/mlir/include/mlir/IR/Operation.h index 54e49b7..31ec8ea 100644 --- a/mlir/include/mlir/IR/Operation.h +++ b/mlir/include/mlir/IR/Operation.h @@ -389,14 +389,6 @@ public: // Conversions to declared operations like DimOp //===--------------------------------------------------------------------===// - /// The dyn_cast methods perform a dynamic cast from an Operation to a typed - /// Op like DimOp. This returns a null Op on failure. - template OpClass dyn_cast() { - if (isa()) - return cast(); - return OpClass(); - } - /// The cast methods perform a cast from an Operation to a typed Op like /// DimOp. This aborts if the parameter to the template isn't an instance of /// the template type argument. @@ -417,10 +409,10 @@ public: /// including this one. void walk(const std::function &callback); - /// Specialization of walk to only visit operations of 'OpTy'. - template void walk(std::function callback) { + /// Specialization of walk to only visit operations of 'T'. + template void walk(std::function callback) { walk([&](Operation *op) { - if (auto derivedOp = op->dyn_cast()) + if (auto derivedOp = dyn_cast(op)) callback(derivedOp); }); } @@ -534,17 +526,6 @@ inline auto Operation::getOperands() -> operand_range { return {operand_begin(), operand_end()}; } -/// Provide dyn_cast_or_null functionality for Operation casts. -template T dyn_cast_or_null(Operation *op) { - return op ? op->dyn_cast() : T(); -} - -/// Provide isa_and_nonnull functionality for Operation casts, i.e. if the -/// operation is non-null and a class of 'T'. -template bool isa_and_nonnull(Operation *op) { - return op && op->isa(); -} - /// This class implements the result iterators for the Operation class /// in terms of getResult(idx). class ResultIterator final @@ -598,4 +579,30 @@ inline auto Operation::getResultTypes() } // end namespace mlir +namespace llvm { +/// Provide isa functionality for operation casts. +template struct isa_impl { + static inline bool doit(const ::mlir::Operation &op) { + return T::classof(const_cast<::mlir::Operation *>(&op)); + } +}; + +/// Provide specializations for operation casts as the resulting T is value +/// typed. +template struct cast_retty_impl { + using ret_type = T; +}; +template struct cast_retty_impl { + using ret_type = T; +}; +template +struct cast_convert_val { + static T doit(::mlir::Operation &val) { return T(&val); } +}; +template +struct cast_convert_val { + static T doit(::mlir::Operation *val) { return T(val); } +}; +} // end namespace llvm + #endif // MLIR_IR_OPERATION_H diff --git a/mlir/include/mlir/IR/PatternMatch.h b/mlir/include/mlir/IR/PatternMatch.h index 3b02ed5..51528c1 100644 --- a/mlir/include/mlir/IR/PatternMatch.h +++ b/mlir/include/mlir/IR/PatternMatch.h @@ -215,7 +215,7 @@ public: OperationState state(getContext(), location, OpTy::getOperationName()); OpTy::build(this, &state, args...); auto *op = createOperation(state); - auto result = op->dyn_cast(); + auto result = dyn_cast(op); assert(result && "Builder didn't return the right type"); return result; } @@ -231,7 +231,7 @@ public: // If the Operation we produce is valid, return it. if (!OpTy::verifyInvariants(op)) { - auto result = op->dyn_cast(); + auto result = dyn_cast(op); assert(result && "Builder didn't return the right type"); return result; } diff --git a/mlir/include/mlir/Support/LLVM.h b/mlir/include/mlir/Support/LLVM.h index 031dceb..6676ad0 100644 --- a/mlir/include/mlir/Support/LLVM.h +++ b/mlir/include/mlir/Support/LLVM.h @@ -69,6 +69,7 @@ using llvm::cast_or_null; using llvm::dyn_cast; using llvm::dyn_cast_or_null; using llvm::isa; +using llvm::isa_and_nonnull; // Containers. using llvm::ArrayRef; diff --git a/mlir/lib/AffineOps/AffineOps.cpp b/mlir/lib/AffineOps/AffineOps.cpp index 51209da..2dfed93 100644 --- a/mlir/lib/AffineOps/AffineOps.cpp +++ b/mlir/lib/AffineOps/AffineOps.cpp @@ -61,11 +61,11 @@ bool mlir::isValidDim(Value *value) { if (op->getParentOp() == nullptr || op->isa()) return true; // Affine apply operation is ok if all of its operands are ok. - if (auto applyOp = op->dyn_cast()) + if (auto applyOp = dyn_cast(op)) return applyOp.isValidDim(); // The dim op is okay if its operand memref/tensor is defined at the top // level. - if (auto dimOp = op->dyn_cast()) + if (auto dimOp = dyn_cast(op)) return isTopLevelSymbol(dimOp.getOperand()); return false; } @@ -86,11 +86,11 @@ bool mlir::isValidSymbol(Value *value) { if (op->getParentOp() == nullptr || op->isa()) return true; // Affine apply operation is ok if all of its operands are ok. - if (auto applyOp = op->dyn_cast()) + if (auto applyOp = dyn_cast(op)) return applyOp.isValidSymbol(); // The dim op is okay if its operand memref/tensor is defined at the top // level. - if (auto dimOp = op->dyn_cast()) + if (auto dimOp = dyn_cast(op)) return isTopLevelSymbol(dimOp.getOperand()); return false; } diff --git a/mlir/lib/Analysis/LoopAnalysis.cpp b/mlir/lib/Analysis/LoopAnalysis.cpp index 78caa4c..60f2b14 100644 --- a/mlir/lib/Analysis/LoopAnalysis.cpp +++ b/mlir/lib/Analysis/LoopAnalysis.cpp @@ -320,8 +320,8 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop, loadAndStores.match(forOp, &loadAndStoresMatched); for (auto ls : loadAndStoresMatched) { auto *op = ls.getMatchedOperation(); - auto load = op->dyn_cast(); - auto store = op->dyn_cast(); + auto load = dyn_cast(op); + auto store = dyn_cast(op); // Only scalar types are considered vectorizable, all load/store must be // vectorizable for a loop to qualify as vectorizable. // TODO(ntv): ponder whether we want to be more general here. @@ -338,8 +338,8 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop, bool mlir::isVectorizableLoopBody(AffineForOp loop, int *memRefDim) { VectorizableOpFun fun([memRefDim](AffineForOp loop, Operation &op) { - auto load = op.dyn_cast(); - auto store = op.dyn_cast(); + auto load = dyn_cast(op); + auto store = dyn_cast(op); return load ? isContiguousAccess(loop.getInductionVar(), load, memRefDim) : isContiguousAccess(loop.getInductionVar(), store, memRefDim); }); diff --git a/mlir/lib/Analysis/MemRefBoundCheck.cpp b/mlir/lib/Analysis/MemRefBoundCheck.cpp index 0fb8862..4e23441 100644 --- a/mlir/lib/Analysis/MemRefBoundCheck.cpp +++ b/mlir/lib/Analysis/MemRefBoundCheck.cpp @@ -48,9 +48,9 @@ FunctionPassBase *mlir::createMemRefBoundCheckPass() { void MemRefBoundCheck::runOnFunction() { getFunction().walk([](Operation *opInst) { - if (auto loadOp = opInst->dyn_cast()) { + if (auto loadOp = dyn_cast(opInst)) { boundCheckLoadOrStoreOp(loadOp); - } else if (auto storeOp = opInst->dyn_cast()) { + } else if (auto storeOp = dyn_cast(opInst)) { boundCheckLoadOrStoreOp(storeOp); } // TODO(bondhugula): do this for DMA ops as well. diff --git a/mlir/lib/Analysis/SliceAnalysis.cpp b/mlir/lib/Analysis/SliceAnalysis.cpp index bce000a..155a2bb 100644 --- a/mlir/lib/Analysis/SliceAnalysis.cpp +++ b/mlir/lib/Analysis/SliceAnalysis.cpp @@ -50,7 +50,7 @@ static void getForwardSliceImpl(Operation *op, return; } - if (auto forOp = op->dyn_cast()) { + if (auto forOp = dyn_cast(op)) { for (auto &u : forOp.getInductionVar()->getUses()) { auto *ownerInst = u.getOwner(); if (forwardSlice->count(ownerInst) == 0) { diff --git a/mlir/lib/Analysis/Utils.cpp b/mlir/lib/Analysis/Utils.cpp index 1eaab67..8d963e4 100644 --- a/mlir/lib/Analysis/Utils.cpp +++ b/mlir/lib/Analysis/Utils.cpp @@ -44,7 +44,7 @@ void mlir::getLoopIVs(Operation &op, SmallVectorImpl *loops) { AffineForOp currAffineForOp; // Traverse up the hierarchy collecing all 'affine.for' operation while // skipping over 'affine.if' operations. - while (currOp && ((currAffineForOp = currOp->dyn_cast()) || + while (currOp && ((currAffineForOp = dyn_cast(currOp)) || currOp->isa())) { if (currAffineForOp) loops->push_back(currAffineForOp); @@ -239,7 +239,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth, assert(isValidSymbol(symbol)); // Check if the symbol is a constant. if (auto *op = symbol->getDefiningOp()) { - if (auto constOp = op->dyn_cast()) { + if (auto constOp = dyn_cast(op)) { cst.setIdToConstant(*symbol, constOp.getValue()); } } @@ -467,7 +467,7 @@ static Operation *getInstAtPosition(ArrayRef positions, } if (level == positions.size() - 1) return &op; - if (auto childAffineForOp = op.dyn_cast()) + if (auto childAffineForOp = dyn_cast(op)) return getInstAtPosition(positions, level + 1, childAffineForOp.getBody()); @@ -633,7 +633,7 @@ mlir::insertBackwardComputationSlice(Operation *srcOpInst, Operation *dstOpInst, // Constructs MemRefAccess populating it with the memref, its indices and // opinst from 'loadOrStoreOpInst'. MemRefAccess::MemRefAccess(Operation *loadOrStoreOpInst) { - if (auto loadOp = loadOrStoreOpInst->dyn_cast()) { + if (auto loadOp = dyn_cast(loadOrStoreOpInst)) { memref = loadOp.getMemRef(); opInst = loadOrStoreOpInst; auto loadMemrefType = loadOp.getMemRefType(); @@ -643,7 +643,7 @@ MemRefAccess::MemRefAccess(Operation *loadOrStoreOpInst) { } } else { assert(loadOrStoreOpInst->isa() && "load/store op expected"); - auto storeOp = loadOrStoreOpInst->dyn_cast(); + auto storeOp = dyn_cast(loadOrStoreOpInst); opInst = loadOrStoreOpInst; memref = storeOp.getMemRef(); auto storeMemrefType = storeOp.getMemRefType(); @@ -750,7 +750,7 @@ Optional mlir::getMemoryFootprintBytes(AffineForOp forOp, void mlir::getSequentialLoops( AffineForOp forOp, llvm::SmallDenseSet *sequentialLoops) { forOp.getOperation()->walk([&](Operation *op) { - if (auto innerFor = op->dyn_cast()) + if (auto innerFor = dyn_cast(op)) if (!isLoopParallel(innerFor)) sequentialLoops->insert(innerFor.getInductionVar()); }); diff --git a/mlir/lib/Analysis/VectorAnalysis.cpp b/mlir/lib/Analysis/VectorAnalysis.cpp index b45ac00..8fecf05 100644 --- a/mlir/lib/Analysis/VectorAnalysis.cpp +++ b/mlir/lib/Analysis/VectorAnalysis.cpp @@ -152,7 +152,7 @@ static SetVector getParentsOfType(Operation *op) { SetVector res; auto *current = op; while (auto *parent = current->getParentOp()) { - if (auto typedParent = parent->template dyn_cast()) { + if (auto typedParent = dyn_cast(parent)) { assert(res.count(parent) == 0 && "Already inserted"); res.insert(parent); } @@ -177,7 +177,7 @@ AffineMap mlir::makePermutationMap( } } - if (auto load = op->dyn_cast()) { + if (auto load = dyn_cast(op)) { return ::makePermutationMap(load.getIndices(), enclosingLoopToVectorDim); } @@ -198,10 +198,10 @@ bool mlir::matcher::operatesOnSuperVectorsOf(Operation &op, /// do not have to special case. Maybe a trait, or just a method, unclear atm. bool mustDivide = false; VectorType superVectorType; - if (auto read = op.dyn_cast()) { + if (auto read = dyn_cast(op)) { superVectorType = read.getResultType(); mustDivide = true; - } else if (auto write = op.dyn_cast()) { + } else if (auto write = dyn_cast(op)) { superVectorType = write.getVectorType(); mustDivide = true; } else if (op.getNumResults() == 0) { diff --git a/mlir/lib/EDSC/Builders.cpp b/mlir/lib/EDSC/Builders.cpp index 610c8b6..2c91177 100644 --- a/mlir/lib/EDSC/Builders.cpp +++ b/mlir/lib/EDSC/Builders.cpp @@ -100,7 +100,7 @@ ValueHandle ValueHandle::create(StringRef name, ArrayRef operands, if (op->getNumResults() == 1) { return ValueHandle(op->getResult(0)); } - if (auto f = op->dyn_cast()) { + if (auto f = dyn_cast(op)) { return ValueHandle(f.getInductionVar()); } llvm_unreachable("unsupported operation, use an OperationHandle instead"); @@ -147,8 +147,8 @@ static llvm::Optional emitStaticFor(ArrayRef lbs, if (!lbDef || !ubDef) return llvm::Optional(); - auto lbConst = lbDef->dyn_cast(); - auto ubConst = ubDef->dyn_cast(); + auto lbConst = dyn_cast(lbDef); + auto ubConst = dyn_cast(ubDef); if (!lbConst || !ubConst) return llvm::Optional(); diff --git a/mlir/lib/Linalg/Transforms/Tiling.cpp b/mlir/lib/Linalg/Transforms/Tiling.cpp index 434f720..6e20542a 100644 --- a/mlir/lib/Linalg/Transforms/Tiling.cpp +++ b/mlir/lib/Linalg/Transforms/Tiling.cpp @@ -319,11 +319,11 @@ static LogicalResult tileLinalgOp(LinalgOp &op, ArrayRef tileSizes, // TODO(ntv) expose as a primitive for other passes. static LogicalResult tileLinalgOp(Operation *op, ArrayRef tileSizes, PerFunctionState &state) { - if (auto matmulOp = op->dyn_cast()) { + if (auto matmulOp = dyn_cast(op)) { return tileLinalgOp(matmulOp, tileSizes, state); - } else if (auto matvecOp = op->dyn_cast()) { + } else if (auto matvecOp = dyn_cast(op)) { return tileLinalgOp(matvecOp, tileSizes, state); - } else if (auto dotOp = op->dyn_cast()) { + } else if (auto dotOp = dyn_cast(op)) { return tileLinalgOp(dotOp, tileSizes, state); } return failure(); diff --git a/mlir/lib/Linalg/Utils/Utils.cpp b/mlir/lib/Linalg/Utils/Utils.cpp index 4b77ece..98cf4b7 100644 --- a/mlir/lib/Linalg/Utils/Utils.cpp +++ b/mlir/lib/Linalg/Utils/Utils.cpp @@ -68,9 +68,9 @@ ValueHandle LoopNestRangeBuilder::LoopNestRangeBuilder::operator()( SmallVector mlir::getRanges(Operation *op) { SmallVector res; - if (auto view = op->dyn_cast()) { + if (auto view = dyn_cast(op)) { res.append(view.getIndexings().begin(), view.getIndexings().end()); - } else if (auto slice = op->dyn_cast()) { + } else if (auto slice = dyn_cast(op)) { for (auto *i : slice.getIndexings()) if (i->getType().isa()) res.push_back(i); @@ -100,7 +100,7 @@ SmallVector mlir::getRanges(Operation *op) { Value *mlir::createOrReturnView(FuncBuilder *b, Location loc, Operation *viewDefiningOp, ArrayRef ranges) { - if (auto view = viewDefiningOp->dyn_cast()) { + if (auto view = dyn_cast(viewDefiningOp)) { auto indexings = view.getIndexings(); if (std::equal(indexings.begin(), indexings.end(), ranges.begin())) return view.getResult(); diff --git a/mlir/lib/StandardOps/Ops.cpp b/mlir/lib/StandardOps/Ops.cpp index 05e3b13..bc68a78 100644 --- a/mlir/lib/StandardOps/Ops.cpp +++ b/mlir/lib/StandardOps/Ops.cpp @@ -134,7 +134,7 @@ struct MemRefCastFolder : public RewritePattern { void rewrite(Operation *op, PatternRewriter &rewriter) const override { for (unsigned i = 0, e = op->getNumOperands(); i != e; ++i) if (auto *memref = op->getOperand(i)->getDefiningOp()) - if (auto cast = memref->dyn_cast()) + if (auto cast = dyn_cast(memref)) op->setOperand(i, cast.getOperand()); rewriter.updatedRootInPlace(op); } diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp index 8a9c649..597efc3 100644 --- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp @@ -199,11 +199,11 @@ bool ModuleTranslation::convertOperation(Operation &opInst, // Emit branches. We need to look up the remapped blocks and ignore the block // arguments that were transformed into PHI nodes. - if (auto brOp = opInst.dyn_cast()) { + if (auto brOp = dyn_cast(opInst)) { builder.CreateBr(blockMapping[brOp.getSuccessor(0)]); return false; } - if (auto condbrOp = opInst.dyn_cast()) { + if (auto condbrOp = dyn_cast(opInst)) { builder.CreateCondBr(valueMapping.lookup(condbrOp.getOperand(0)), blockMapping[condbrOp.getSuccessor(0)], blockMapping[condbrOp.getSuccessor(1)]); @@ -264,7 +264,7 @@ static Value *getPHISourceValue(Block *current, Block *pred, // For conditional branches, we need to check if the current block is reached // through the "true" or the "false" branch and take the relevant operands. - auto condBranchOp = terminator.dyn_cast(); + auto condBranchOp = dyn_cast(terminator); assert(condBranchOp && "only branch operations can be terminators of a block that " "has successors"); diff --git a/mlir/lib/Transforms/DmaGeneration.cpp b/mlir/lib/Transforms/DmaGeneration.cpp index 10f47fe..937399c 100644 --- a/mlir/lib/Transforms/DmaGeneration.cpp +++ b/mlir/lib/Transforms/DmaGeneration.cpp @@ -173,11 +173,11 @@ static void getMultiLevelStrides(const MemRefRegion ®ion, static bool getFullMemRefAsRegion(Operation *opInst, unsigned numParamLoopIVs, MemRefRegion *region) { unsigned rank; - if (auto loadOp = opInst->dyn_cast()) { + if (auto loadOp = dyn_cast(opInst)) { rank = loadOp.getMemRefType().getRank(); region->memref = loadOp.getMemRef(); region->setWrite(false); - } else if (auto storeOp = opInst->dyn_cast()) { + } else if (auto storeOp = dyn_cast(opInst)) { rank = storeOp.getMemRefType().getRank(); region->memref = storeOp.getMemRef(); region->setWrite(true); @@ -483,7 +483,7 @@ bool DmaGeneration::runOnBlock(Block *block) { }); for (auto it = curBegin; it != block->end(); ++it) { - if (auto forOp = it->dyn_cast()) { + if (auto forOp = dyn_cast(&*it)) { // Returns true if the footprint is known to exceed capacity. auto exceedsCapacity = [&](AffineForOp forOp) { Optional footprint = @@ -607,10 +607,10 @@ uint64_t DmaGeneration::runOnBlock(Block::iterator begin, Block::iterator end) { // Walk this range of operations to gather all memory regions. block->walk(begin, end, [&](Operation *opInst) { // Gather regions to allocate to buffers in faster memory space. - if (auto loadOp = opInst->dyn_cast()) { + if (auto loadOp = dyn_cast(opInst)) { if (loadOp.getMemRefType().getMemorySpace() != slowMemorySpace) return; - } else if (auto storeOp = opInst->dyn_cast()) { + } else if (auto storeOp = dyn_cast(opInst)) { if (storeOp.getMemRefType().getMemorySpace() != slowMemorySpace) return; } else { @@ -739,7 +739,7 @@ uint64_t DmaGeneration::runOnBlock(Block::iterator begin, Block::iterator end) { // For a range of operations, a note will be emitted at the caller. AffineForOp forOp; uint64_t sizeInKib = llvm::divideCeil(totalDmaBuffersSizeInBytes, 1024); - if (llvm::DebugFlag && (forOp = begin->dyn_cast())) { + if (llvm::DebugFlag && (forOp = dyn_cast(&*begin))) { forOp.emitRemark() << sizeInKib << " KiB of DMA buffers in fast memory space for this block\n"; diff --git a/mlir/lib/Transforms/LoopFusion.cpp b/mlir/lib/Transforms/LoopFusion.cpp index 796d216..1c4a4d1 100644 --- a/mlir/lib/Transforms/LoopFusion.cpp +++ b/mlir/lib/Transforms/LoopFusion.cpp @@ -644,7 +644,7 @@ bool MemRefDependenceGraph::init(Function &f) { DenseMap forToNodeMap; for (auto &op : f.front()) { - if (auto forOp = op.dyn_cast()) { + if (auto forOp = dyn_cast(op)) { // Create graph node 'id' to represent top-level 'forOp' and record // all loads and store accesses it contains. LoopNestStateCollector collector; @@ -666,14 +666,14 @@ bool MemRefDependenceGraph::init(Function &f) { } forToNodeMap[&op] = node.id; nodes.insert({node.id, node}); - } else if (auto loadOp = op.dyn_cast()) { + } else if (auto loadOp = dyn_cast(op)) { // Create graph node for top-level load op. Node node(nextNodeId++, &op); node.loads.push_back(&op); auto *memref = op.cast().getMemRef(); memrefAccesses[memref].insert(node.id); nodes.insert({node.id, node}); - } else if (auto storeOp = op.dyn_cast()) { + } else if (auto storeOp = dyn_cast(op)) { // Create graph node for top-level store op. Node node(nextNodeId++, &op); node.stores.push_back(&op); @@ -2125,7 +2125,7 @@ public: auto *fn = dstNode->op->getFunction(); for (unsigned i = 0, e = fn->getNumArguments(); i != e; ++i) { for (auto &use : fn->getArgument(i)->getUses()) { - if (auto loadOp = use.getOwner()->dyn_cast()) { + if (auto loadOp = dyn_cast(use.getOwner())) { // Gather loops surrounding 'use'. SmallVector loops; getLoopIVs(*use.getOwner(), &loops); diff --git a/mlir/lib/Transforms/LoopTiling.cpp b/mlir/lib/Transforms/LoopTiling.cpp index ce42a5e..28e13d8 100644 --- a/mlir/lib/Transforms/LoopTiling.cpp +++ b/mlir/lib/Transforms/LoopTiling.cpp @@ -273,7 +273,7 @@ static void getTileableBands(Function &f, for (auto &block : f) for (auto &op : block) - if (auto forOp = op.dyn_cast()) + if (auto forOp = dyn_cast(op)) getMaximalPerfectLoopNest(forOp); } diff --git a/mlir/lib/Transforms/LoopUnrollAndJam.cpp b/mlir/lib/Transforms/LoopUnrollAndJam.cpp index 366a7ed..0a23295 100644 --- a/mlir/lib/Transforms/LoopUnrollAndJam.cpp +++ b/mlir/lib/Transforms/LoopUnrollAndJam.cpp @@ -92,7 +92,7 @@ void LoopUnrollAndJam::runOnFunction() { // unroll-and-jammed by this pass. However, runOnAffineForOp can be called on // any for operation. auto &entryBlock = getFunction().front(); - if (auto forOp = entryBlock.front().dyn_cast()) + if (auto forOp = dyn_cast(entryBlock.front())) runOnAffineForOp(forOp); } diff --git a/mlir/lib/Transforms/LowerAffine.cpp b/mlir/lib/Transforms/LowerAffine.cpp index dc389c8..1ffe5e3 100644 --- a/mlir/lib/Transforms/LowerAffine.cpp +++ b/mlir/lib/Transforms/LowerAffine.cpp @@ -620,10 +620,10 @@ void LowerAffinePass::runOnFunction() { // Rewrite all of the ifs and fors. We walked the operations in postorders, // so we know that we will rewrite them in the reverse order. for (auto *op : llvm::reverse(instsToRewrite)) { - if (auto ifOp = op->dyn_cast()) { + if (auto ifOp = dyn_cast(op)) { if (lowerAffineIf(ifOp)) return signalPassFailure(); - } else if (auto forOp = op->dyn_cast()) { + } else if (auto forOp = dyn_cast(op)) { if (lowerAffineFor(forOp)) return signalPassFailure(); } else if (lowerAffineApply(op->cast())) { diff --git a/mlir/lib/Transforms/MaterializeVectors.cpp b/mlir/lib/Transforms/MaterializeVectors.cpp index 2f06a9a..28dfb22 100644 --- a/mlir/lib/Transforms/MaterializeVectors.cpp +++ b/mlir/lib/Transforms/MaterializeVectors.cpp @@ -556,12 +556,12 @@ static bool instantiateMaterialization(Operation *op, if (op->getNumRegions() != 0) return op->emitError("NYI path Op with region"), true; - if (auto write = op->dyn_cast()) { + if (auto write = dyn_cast(op)) { auto *clone = instantiate(&b, write, state->hwVectorType, state->hwVectorInstance, state->substitutionsMap); return clone == nullptr; } - if (auto read = op->dyn_cast()) { + if (auto read = dyn_cast(op)) { auto *clone = instantiate(&b, read, state->hwVectorType, state->hwVectorInstance, state->substitutionsMap); if (!clone) { diff --git a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp index a63d462..94df936 100644 --- a/mlir/lib/Transforms/MemRefDataFlowOpt.cpp +++ b/mlir/lib/Transforms/MemRefDataFlowOpt.cpp @@ -103,7 +103,7 @@ void MemRefDataFlowOpt::forwardStoreToLoad(LoadOp loadOp) { SmallVector storeOps; unsigned minSurroundingLoops = getNestingDepth(*loadOpInst); for (auto &use : loadOp.getMemRef()->getUses()) { - auto storeOp = use.getOwner()->dyn_cast(); + auto storeOp = dyn_cast(use.getOwner()); if (!storeOp) continue; auto *storeOpInst = storeOp.getOperation(); diff --git a/mlir/lib/Transforms/PipelineDataTransfer.cpp b/mlir/lib/Transforms/PipelineDataTransfer.cpp index 66fbf4a..0da97f7 100644 --- a/mlir/lib/Transforms/PipelineDataTransfer.cpp +++ b/mlir/lib/Transforms/PipelineDataTransfer.cpp @@ -181,7 +181,7 @@ static void findMatchingStartFinishInsts( // Collect outgoing DMA operations - needed to check for dependences below. SmallVector outgoingDmaOps; for (auto &op : *forOp.getBody()) { - auto dmaStartOp = op.dyn_cast(); + auto dmaStartOp = dyn_cast(op); if (dmaStartOp && dmaStartOp.isSrcMemorySpaceFaster()) outgoingDmaOps.push_back(dmaStartOp); } @@ -193,7 +193,7 @@ static void findMatchingStartFinishInsts( dmaFinishInsts.push_back(&op); continue; } - auto dmaStartOp = op.dyn_cast(); + auto dmaStartOp = dyn_cast(op); if (!dmaStartOp) continue; diff --git a/mlir/lib/Transforms/TestConstantFold.cpp b/mlir/lib/Transforms/TestConstantFold.cpp index 0990d7a..ec1e971 100644 --- a/mlir/lib/Transforms/TestConstantFold.cpp +++ b/mlir/lib/Transforms/TestConstantFold.cpp @@ -48,7 +48,7 @@ void TestConstantFold::foldOperation(Operation *op, } // If this op is a constant that are used and cannot be de-duplicated, // remember it for cleanup later. - else if (auto constant = op->dyn_cast()) { + else if (auto constant = dyn_cast(op)) { existingConstants.push_back(op); } } diff --git a/mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp b/mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp index fc8209b..b907840 100644 --- a/mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp +++ b/mlir/lib/Transforms/Utils/ConstantFoldUtils.cpp @@ -40,7 +40,7 @@ bool ConstantFoldHelper::tryToConstantFold( // into the value it contains. We need to consider constants before the // constant folding logic to avoid re-creating the same constant later. // TODO: Extend to support dialect-specific constant ops. - if (auto constant = op->dyn_cast()) { + if (auto constant = dyn_cast(op)) { // If this constant is dead, update bookkeeping and signal the caller. if (constant.use_empty()) { notifyRemoval(op); diff --git a/mlir/lib/Transforms/Utils/LoopUtils.cpp b/mlir/lib/Transforms/Utils/LoopUtils.cpp index a10e4a1..7fbb48e 100644 --- a/mlir/lib/Transforms/Utils/LoopUtils.cpp +++ b/mlir/lib/Transforms/Utils/LoopUtils.cpp @@ -363,7 +363,7 @@ void mlir::getPerfectlyNestedLoops(SmallVectorImpl &nestedLoops, nestedLoops.push_back(curr); auto *currBody = curr.getBody(); while (currBody->begin() == std::prev(currBody->end(), 2) && - (curr = curr.getBody()->front().dyn_cast())) { + (curr = dyn_cast(curr.getBody()->front()))) { nestedLoops.push_back(curr); currBody = curr.getBody(); } diff --git a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp index 753f7cf..b64dc53 100644 --- a/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp +++ b/mlir/lib/Transforms/Vectorization/VectorizerTestPass.cpp @@ -234,7 +234,7 @@ void VectorizerTestPass::testComposeMaps(llvm::raw_ostream &outs) { static bool affineApplyOp(Operation &op) { return op.isa(); } static bool singleResultAffineApplyOpWithoutUses(Operation &op) { - auto app = op.dyn_cast(); + auto app = dyn_cast(op); return app && app.use_empty(); } diff --git a/mlir/lib/Transforms/Vectorize.cpp b/mlir/lib/Transforms/Vectorize.cpp index 025a653..9b8768a 100644 --- a/mlir/lib/Transforms/Vectorize.cpp +++ b/mlir/lib/Transforms/Vectorize.cpp @@ -839,8 +839,8 @@ static LogicalResult vectorizeAffineForOp(AffineForOp loop, int64_t step, loadAndStores.match(loop.getOperation(), &loadAndStoresMatches); for (auto ls : loadAndStoresMatches) { auto *opInst = ls.getMatchedOperation(); - auto load = opInst->dyn_cast(); - auto store = opInst->dyn_cast(); + auto load = dyn_cast(opInst); + auto store = dyn_cast(opInst); LLVM_DEBUG(opInst->print(dbgs())); LogicalResult result = load ? vectorizeRootOrTerminal(loop.getInductionVar(), load, state) @@ -982,7 +982,7 @@ static Value *vectorizeOperand(Value *operand, Operation *op, return nullptr; } // 3. vectorize constant. - if (auto constant = operand->getDefiningOp()->dyn_cast()) { + if (auto constant = dyn_cast(operand->getDefiningOp())) { return vectorizeConstant( op, constant, VectorType::get(state->strategy->vectorSizes, operand->getType())); @@ -1012,7 +1012,7 @@ static Operation *vectorizeOneOperation(Operation *opInst, assert(!opInst->isa() && "vector.transfer_write cannot be further vectorized"); - if (auto store = opInst->dyn_cast()) { + if (auto store = dyn_cast(opInst)) { auto *memRef = store.getMemRef(); auto *value = store.getValueToStore(); auto *vectorValue = vectorizeOperand(value, opInst, state); diff --git a/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp b/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp index ec566e2..5c34ed1 100644 --- a/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp +++ b/mlir/tools/mlir-tblgen/LLVMIRConversionGen.cpp @@ -161,8 +161,8 @@ static bool emitOneBuilder(const Record &record, raw_ostream &os) { } // Output the check and the rewritten builder string. - os << "if (auto op = opInst.dyn_cast<" << op.getQualCppClassName() - << ">()) {\n"; + os << "if (auto op = dyn_cast<" << op.getQualCppClassName() + << ">(opInst)) {\n"; os << bs.str() << builderStrRef << "\n"; os << " return false;\n"; os << "}\n";