private:
LogicalResult (*implFn)(OpType, PatternRewriter &rewriter);
};
- insert(std::make_unique<FnPattern>(std::move(implFn), getContext()));
+ add(std::make_unique<FnPattern>(std::move(implFn), getContext()));
return *this;
}
void mlir::populateGpuWMMAToNVVMConversionPatterns(
LLVMTypeConverter &converter, RewritePatternSet &patterns) {
- patterns.insert<WmmaLoadOpToNVVMLowering, WmmaMmaOpToNVVMLowering,
- WmmaStoreOpToNVVMLowering, WmmaConstantOpToNVVMLowering,
- WmmaElementwiseOpToNVVMLowering>(converter);
+ patterns.add<WmmaLoadOpToNVVMLowering, WmmaMmaOpToNVVMLowering,
+ WmmaStoreOpToNVVMLowering, WmmaConstantOpToNVVMLowering,
+ WmmaElementwiseOpToNVVMLowering>(converter);
}
void arith::AddIOp::getCanonicalizationPatterns(
RewritePatternSet &patterns, MLIRContext *context) {
- patterns.insert<AddIAddConstant, AddISubConstantRHS, AddISubConstantLHS>(
+ patterns.add<AddIAddConstant, AddISubConstantRHS, AddISubConstantLHS>(
context);
}
void arith::SubIOp::getCanonicalizationPatterns(
RewritePatternSet &patterns, MLIRContext *context) {
- patterns.insert<SubIRHSAddConstant, SubILHSAddConstant, SubIRHSSubConstantRHS,
- SubIRHSSubConstantLHS, SubILHSSubConstantRHS,
- SubILHSSubConstantLHS>(context);
+ patterns
+ .add<SubIRHSAddConstant, SubILHSAddConstant, SubIRHSSubConstantRHS,
+ SubIRHSSubConstantLHS, SubILHSSubConstantRHS, SubILHSSubConstantLHS>(
+ context);
}
//===----------------------------------------------------------------------===//
void arith::XOrIOp::getCanonicalizationPatterns(
RewritePatternSet &patterns, MLIRContext *context) {
- patterns.insert<XOrINotCmpI>(context);
+ patterns.add<XOrINotCmpI>(context);
}
//===----------------------------------------------------------------------===//
void arith::ExtSIOp::getCanonicalizationPatterns(
RewritePatternSet &patterns, MLIRContext *context) {
- patterns.insert<ExtSIOfExtUI>(context);
+ patterns.add<ExtSIOfExtUI>(context);
}
LogicalResult arith::ExtSIOp::verify() {
void arith::AndIOp::getCanonicalizationPatterns(
RewritePatternSet &patterns, MLIRContext *context) {
- patterns.insert<AndOfExtUI, AndOfExtSI>(context);
+ patterns.add<AndOfExtUI, AndOfExtSI>(context);
}
//===----------------------------------------------------------------------===//
void arith::OrIOp::getCanonicalizationPatterns(
RewritePatternSet &patterns, MLIRContext *context) {
- patterns.insert<OrOfExtUI, OrOfExtSI>(context);
+ patterns.add<OrOfExtUI, OrOfExtSI>(context);
}
//===----------------------------------------------------------------------===//
void arith::IndexCastOp::getCanonicalizationPatterns(
RewritePatternSet &patterns, MLIRContext *context) {
- patterns.insert<IndexCastOfIndexCast, IndexCastOfExtSI>(context);
+ patterns.add<IndexCastOfIndexCast, IndexCastOfExtSI>(context);
}
//===----------------------------------------------------------------------===//
void arith::BitcastOp::getCanonicalizationPatterns(
RewritePatternSet &patterns, MLIRContext *context) {
- patterns.insert<BitcastOfBitcast>(context);
+ patterns.add<BitcastOfBitcast>(context);
}
//===----------------------------------------------------------------------===//
void arith::SelectOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.insert<SelectI1Simplify, SelectToExtUI>(context);
+ results.add<SelectI1Simplify, SelectToExtUI>(context);
}
OpFoldResult arith::SelectOp::fold(ArrayRef<Attribute> operands) {
void CloneOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.insert<SimplifyClones>(context);
+ results.add<SimplifyClones>(context);
}
//===----------------------------------------------------------------------===//
void TiledLoopOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.insert<TiledLoopInputsFolder, TiledLoopResultsFolder,
- DimOfTiledLoopInsOutsFolder<tensor::DimOp>,
- DimOfTiledLoopInsOutsFolder<memref::DimOp>,
- DimOfTiledLoopResultFolder<tensor::DimOp>,
- DimOfTiledLoopResultFolder<memref::DimOp>>(context);
+ results.add<TiledLoopInputsFolder, TiledLoopResultsFolder,
+ DimOfTiledLoopInsOutsFolder<tensor::DimOp>,
+ DimOfTiledLoopInsOutsFolder<memref::DimOp>,
+ DimOfTiledLoopResultFolder<tensor::DimOp>,
+ DimOfTiledLoopResultFolder<memref::DimOp>>(context);
}
LogicalResult TiledLoopOp::fold(ArrayRef<Attribute>,
return false;
});
- patterns.insert<DetensorizeGenericOp>(typeConverter, context);
- patterns.insert<FunctionNonEntryBlockConversion>(context, typeConverter,
- blockArgsToDetensor);
+ patterns.add<DetensorizeGenericOp>(typeConverter, context);
+ patterns.add<FunctionNonEntryBlockConversion>(context, typeConverter,
+ blockArgsToDetensor);
// Since non-entry block arguments get detensorized, we also need to
// update the control flow inside the function to reflect the correct
// types.
void mlir::memref::populateComposeSubViewPatterns(
RewritePatternSet &patterns, MLIRContext *context) {
- patterns.insert<ComposeSubViewOpPattern>(context);
+ patterns.add<ComposeSubViewOpPattern>(context);
}
void WhileOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.insert<RemoveLoopInvariantArgsFromBeforeBlock,
- RemoveLoopInvariantValueYielded, WhileConditionTruth,
- WhileCmpCond, WhileUnusedResult>(context);
+ results.add<RemoveLoopInvariantArgsFromBeforeBlock,
+ RemoveLoopInvariantValueYielded, WhileConditionTruth,
+ WhileCmpCond, WhileUnusedResult>(context);
}
//===----------------------------------------------------------------------===//
RewritePatternSet &patterns) {
MLIRContext *ctx = patterns.getContext();
patterns
- .insert<AffineOpSCFCanonicalizationPattern<AffineMinOp, /*IsMin=*/true>,
- AffineOpSCFCanonicalizationPattern<AffineMaxOp, /*IsMin=*/false>,
- DimOfIterArgFolder<tensor::DimOp>,
- DimOfIterArgFolder<memref::DimOp>,
- DimOfLoopResultFolder<tensor::DimOp>,
- DimOfLoopResultFolder<memref::DimOp>>(ctx);
+ .add<AffineOpSCFCanonicalizationPattern<AffineMinOp, /*IsMin=*/true>,
+ AffineOpSCFCanonicalizationPattern<AffineMaxOp, /*IsMin=*/false>,
+ DimOfIterArgFolder<tensor::DimOp>, DimOfIterArgFolder<memref::DimOp>,
+ DimOfLoopResultFolder<tensor::DimOp>,
+ DimOfLoopResultFolder<memref::DimOp>>(ctx);
}
std::unique_ptr<Pass> mlir::createSCFForLoopCanonicalizationPass() {
void mlir::populateBranchOpInterfaceTypeConversionPattern(
RewritePatternSet &patterns, TypeConverter &typeConverter,
function_ref<bool(BranchOpInterface, int)> shouldConvertBranchOperand) {
- patterns.insert<BranchOpInterfaceTypeConversion>(
+ patterns.add<BranchOpInterfaceTypeConversion>(
typeConverter, patterns.getContext(), shouldConvertBranchOperand);
}
void ConcatOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.insert<ConcatOptimization>(context);
+ results.add<ConcatOptimization>(context);
}
struct ReshapeReshapeOptimization : public OpRewritePattern<tosa::ReshapeOp> {
void ReshapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.insert<ReshapeReshapeOptimization>(context);
- results.insert<ReshapeConstOptimization>(context);
+ results.add<ReshapeReshapeOptimization>(context);
+ results.add<ReshapeConstOptimization>(context);
}
struct ConstantTransposeOptimization
void TransposeOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.insert<ConstantTransposeOptimization>(context);
- results.insert<NoOpOptimization>(context);
+ results.add<ConstantTransposeOptimization>(context);
+ results.add<NoOpOptimization>(context);
}
struct AddZeroOptimization : public OpRewritePattern<tosa::AddOp> {
void AddOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.insert<AddZeroOptimization>(context);
+ results.add<AddZeroOptimization>(context);
}
struct MulOneOptimization : public OpRewritePattern<tosa::MulOp> {
void MulOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.insert<MulOneOptimization>(context);
+ results.add<MulOneOptimization>(context);
}
struct MaterializePadValue : public OpRewritePattern<tosa::PadOp> {
void PadOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.insert<MaterializePadValue>(context);
+ results.add<MaterializePadValue>(context);
}
struct MaxPool2dIsNoOp : public OpRewritePattern<tosa::MaxPool2dOp> {
void MaxPool2dOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.insert<MaxPool2dIsNoOp>(context);
+ results.add<MaxPool2dIsNoOp>(context);
}
struct ClampIsNoOp : public OpRewritePattern<tosa::ClampOp> {
void ClampOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.insert<ClampIsNoOp>(context);
- results.insert<ClampClampOptimization>(context);
+ results.add<ClampIsNoOp>(context);
+ results.add<ClampClampOptimization>(context);
}
//===----------------------------------------------------------------------===//
void mlir::tosa::populateTosaDecomposeConv2D(MLIRContext *ctx,
RewritePatternSet &patterns) {
- patterns.insert<Conv2DIsFullyConnected>(ctx);
+ patterns.add<Conv2DIsFullyConnected>(ctx);
}
void mlir::tosa::populateTosaDecomposeDepthwise(MLIRContext *ctx,
RewritePatternSet &patterns) {
- patterns.insert<DepthwiseConv2DIsMul>(ctx);
+ patterns.add<DepthwiseConv2DIsMul>(ctx);
}
void mlir::tosa::populateTosaDecomposeTransposeConv(
MLIRContext *ctx, RewritePatternSet &patterns) {
- patterns.insert<TransposeConvDilatedConverter>(ctx);
- patterns.insert<TransposeConvStridedConverter>(ctx);
+ patterns.add<TransposeConvDilatedConverter>(ctx);
+ patterns.add<TransposeConvStridedConverter>(ctx);
}
}
void getCanonicalizationPatterns(RewritePatternSet &results) const override {
- results.insert<DisabledPattern, EnabledPattern>(results.getContext());
+ results.add<DisabledPattern, EnabledPattern>(results.getContext());
}
};