From 2b61b7979eb59df579a3a4bf6fe768ddf2a556f4 Mon Sep 17 00:00:00 2001 From: River Riddle Date: Thu, 24 Oct 2019 15:00:36 -0700 Subject: [PATCH] Convert the Canonicalize and CSE passes to generic Operation Passes. This allows for them to be used on other non-function, or even other function-like, operations. The algorithms are already generic, so this is simply changing the derived pass type. The majority of this change is just ensuring that the nesting of these passes remains the same, as the pass manager won't auto-nest them anymore. PiperOrigin-RevId: 276573038 --- mlir/bindings/python/pybind.cpp | 4 +-- mlir/examples/toy/Ch3/toyc.cpp | 2 +- mlir/examples/toy/Ch4/toyc.cpp | 7 ++-- mlir/examples/toy/Ch5/toyc.cpp | 17 ++++++---- mlir/examples/toy/Ch6/toyc.cpp | 17 ++++++---- mlir/g3doc/Tutorials/Toy/Ch-3.md | 2 +- mlir/include/mlir/Pass/PassManager.h | 6 ++++ mlir/include/mlir/Transforms/Passes.h | 10 ++---- mlir/lib/Transforms/CSE.cpp | 21 ++++++------ mlir/lib/Transforms/Canonicalizer.cpp | 39 +++++++++------------- mlir/test/AffineOps/canonicalize.mlir | 2 +- .../FxpMathOps/lower-uniform-real-math-addew.mlir | 2 +- .../FxpMathOps/lower-uniform-real-math-mulew.mlir | 2 +- mlir/test/Dialect/GPU/canonicalize.mlir | 2 +- mlir/test/Dialect/LLVMIR/terminator.mlir | 2 +- mlir/test/Dialect/QuantOps/canonicalize.mlir | 2 +- mlir/test/Dialect/SPIRV/canonicalize.mlir | 2 +- mlir/test/Pass/ir-printing.mlir | 10 +++--- mlir/test/Pass/pass-timing.mlir | 8 ++--- mlir/test/Pass/pipeline-parsing.mlir | 2 +- mlir/test/Quantizer/matmul.mlir | 2 +- mlir/test/Transforms/canonicalize.mlir | 2 +- mlir/test/Transforms/cse.mlir | 2 +- mlir/test/Transforms/test-canonicalize.mlir | 2 +- mlir/test/lib/Transforms/TestConstantFold.cpp | 5 --- 25 files changed, 83 insertions(+), 89 deletions(-) diff --git a/mlir/bindings/python/pybind.cpp b/mlir/bindings/python/pybind.cpp index 40c5c05..61f42af 100644 --- a/mlir/bindings/python/pybind.cpp +++ b/mlir/bindings/python/pybind.cpp @@ -196,8 +196,8 @@ struct PythonMLIRModule { // transformations and codegen. -1 means ExecutionEngine default. void compile(int optLevel, int codegenOptLevel) { PassManager manager(module->getContext()); - manager.addPass(mlir::createCanonicalizerPass()); - manager.addPass(mlir::createCSEPass()); + manager.addNestedPass(mlir::createCanonicalizerPass()); + manager.addNestedPass(mlir::createCSEPass()); manager.addPass(mlir::createLowerAffinePass()); manager.addPass(mlir::createLowerToLLVMPass()); if (failed(manager.run(*module))) { diff --git a/mlir/examples/toy/Ch3/toyc.cpp b/mlir/examples/toy/Ch3/toyc.cpp index 72e1c6f..7e62e13 100644 --- a/mlir/examples/toy/Ch3/toyc.cpp +++ b/mlir/examples/toy/Ch3/toyc.cpp @@ -124,7 +124,7 @@ int dumpMLIR() { applyPassManagerCLOptions(pm); // Add a run of the canonicalizer to optimize the mlir module. - pm.addPass(mlir::createCanonicalizerPass()); + pm.addNestedPass(mlir::createCanonicalizerPass()); if (mlir::failed(pm.run(*module))) return 4; } diff --git a/mlir/examples/toy/Ch4/toyc.cpp b/mlir/examples/toy/Ch4/toyc.cpp index 3dd079d..d8e04d6 100644 --- a/mlir/examples/toy/Ch4/toyc.cpp +++ b/mlir/examples/toy/Ch4/toyc.cpp @@ -130,9 +130,10 @@ int dumpMLIR() { // Now that there is only one function, we can infer the shapes of each of // the operations. - pm.addPass(mlir::toy::createShapeInferencePass()); - pm.addPass(mlir::createCanonicalizerPass()); - pm.addPass(mlir::createCSEPass()); + mlir::OpPassManager &optPM = pm.nest(); + optPM.addPass(mlir::toy::createShapeInferencePass()); + optPM.addPass(mlir::createCanonicalizerPass()); + optPM.addPass(mlir::createCSEPass()); if (mlir::failed(pm.run(*module))) return 4; diff --git a/mlir/examples/toy/Ch5/toyc.cpp b/mlir/examples/toy/Ch5/toyc.cpp index 9b47b43..54cbdf1 100644 --- a/mlir/examples/toy/Ch5/toyc.cpp +++ b/mlir/examples/toy/Ch5/toyc.cpp @@ -135,21 +135,24 @@ int dumpMLIR() { // Now that there is only one function, we can infer the shapes of each of // the operations. - pm.addPass(mlir::toy::createShapeInferencePass()); - pm.addPass(mlir::createCanonicalizerPass()); - pm.addPass(mlir::createCSEPass()); + mlir::OpPassManager &optPM = pm.nest(); + optPM.addPass(mlir::toy::createShapeInferencePass()); + optPM.addPass(mlir::createCanonicalizerPass()); + optPM.addPass(mlir::createCSEPass()); } if (isLoweringToAffine) { // Partially lower the toy dialect with a few cleanups afterwards. pm.addPass(mlir::toy::createLowerToAffinePass()); - pm.addPass(mlir::createCanonicalizerPass()); - pm.addPass(mlir::createCSEPass()); + + mlir::OpPassManager &optPM = pm.nest(); + optPM.addPass(mlir::createCanonicalizerPass()); + optPM.addPass(mlir::createCSEPass()); // Add optimizations if enabled. if (EnableOpt) { - pm.addPass(mlir::createLoopFusionPass()); - pm.addPass(mlir::createMemRefDataFlowOptPass()); + optPM.addPass(mlir::createLoopFusionPass()); + optPM.addPass(mlir::createMemRefDataFlowOptPass()); } } diff --git a/mlir/examples/toy/Ch6/toyc.cpp b/mlir/examples/toy/Ch6/toyc.cpp index 018dd0f..b7aed1b 100644 --- a/mlir/examples/toy/Ch6/toyc.cpp +++ b/mlir/examples/toy/Ch6/toyc.cpp @@ -149,21 +149,24 @@ int loadAndProcessMLIR(mlir::MLIRContext &context, // Now that there is only one function, we can infer the shapes of each of // the operations. - pm.addPass(mlir::toy::createShapeInferencePass()); - pm.addPass(mlir::createCanonicalizerPass()); - pm.addPass(mlir::createCSEPass()); + mlir::OpPassManager &optPM = pm.nest(); + optPM.addPass(mlir::toy::createShapeInferencePass()); + optPM.addPass(mlir::createCanonicalizerPass()); + optPM.addPass(mlir::createCSEPass()); } if (isLoweringToAffine) { // Partially lower the toy dialect with a few cleanups afterwards. pm.addPass(mlir::toy::createLowerToAffinePass()); - pm.addPass(mlir::createCanonicalizerPass()); - pm.addPass(mlir::createCSEPass()); + + mlir::OpPassManager &optPM = pm.nest(); + optPM.addPass(mlir::createCanonicalizerPass()); + optPM.addPass(mlir::createCSEPass()); // Add optimizations if enabled. if (EnableOpt) { - pm.addPass(mlir::createLoopFusionPass()); - pm.addPass(mlir::createMemRefDataFlowOptPass()); + optPM.addPass(mlir::createLoopFusionPass()); + optPM.addPass(mlir::createMemRefDataFlowOptPass()); } } diff --git a/mlir/g3doc/Tutorials/Toy/Ch-3.md b/mlir/g3doc/Tutorials/Toy/Ch-3.md index 9b028f6..07f8c5c 100644 --- a/mlir/g3doc/Tutorials/Toy/Ch-3.md +++ b/mlir/g3doc/Tutorials/Toy/Ch-3.md @@ -123,7 +123,7 @@ similar way to LLVM: ```c++ mlir::PassManager pm(module.getContext()); - pm.addPass(mlir::createCanonicalizerPass()); + pm.addNestedPass(mlir::createCanonicalizerPass()); ``` Finally, we can try to run `toyc-ch3 test/transpose_transpose.toy -emit=mlir -opt` diff --git a/mlir/include/mlir/Pass/PassManager.h b/mlir/include/mlir/Pass/PassManager.h index 387b269..fa0788f 100644 --- a/mlir/include/mlir/Pass/PassManager.h +++ b/mlir/include/mlir/Pass/PassManager.h @@ -69,6 +69,12 @@ public: /// operation type, it must be the same type as this pass manager. void addPass(std::unique_ptr pass); + /// Add the given pass to a nested pass manager for the given operation kind + /// `OpT`. + template void addNestedPass(std::unique_ptr pass) { + nest().addPass(std::move(pass)); + } + /// Returns the number of passes held by this manager. size_t size() const; diff --git a/mlir/include/mlir/Transforms/Passes.h b/mlir/include/mlir/Transforms/Passes.h index 67ec4a3..c286e1a 100644 --- a/mlir/include/mlir/Transforms/Passes.h +++ b/mlir/include/mlir/Transforms/Passes.h @@ -35,17 +35,11 @@ class ModuleOp; class Pass; template class OpPassBase; -/// Creates a constant folding pass. Note that this pass solely provides simple -/// top-down constant folding functionality; it is intended to be used for -/// testing purpose. Use Canonicalizer pass, which exploits more simplification -/// opportunities exposed by constant folding, for the general cases. -std::unique_ptr> createTestConstantFoldPass(); - /// Creates an instance of the Canonicalizer pass. -std::unique_ptr> createCanonicalizerPass(); +std::unique_ptr createCanonicalizerPass(); /// Creates a pass to perform common sub expression elimination. -std::unique_ptr> createCSEPass(); +std::unique_ptr createCSEPass(); /// Creates a pass to vectorize loops, operations and data types using a /// target-independent, n-D super-vector abstraction. diff --git a/mlir/lib/Transforms/CSE.cpp b/mlir/lib/Transforms/CSE.cpp index 0e6dae6..c3d30bf 100644 --- a/mlir/lib/Transforms/CSE.cpp +++ b/mlir/lib/Transforms/CSE.cpp @@ -83,7 +83,7 @@ struct SimpleOperationInfo : public llvm::DenseMapInfo { namespace { /// Simple common sub-expression elimination. -struct CSE : public FunctionPass { +struct CSE : public OperationPass { CSE() = default; CSE(const CSE &) {} @@ -119,7 +119,7 @@ struct CSE : public FunctionPass { void simplifyRegion(ScopedMapTy &knownValues, DominanceInfo &domInfo, Region ®ion); - void runOnFunction() override; + void runOnOperation() override; private: /// Operations marked as dead and to be erased. @@ -238,11 +238,13 @@ void CSE::simplifyRegion(ScopedMapTy &knownValues, DominanceInfo &domInfo, } } -void CSE::runOnFunction() { - /// A scoped hash table of defining operations within a function. +void CSE::runOnOperation() { + /// A scoped hash table of defining operations within a region. ScopedMapTy knownValues; - simplifyRegion(knownValues, getAnalysis(), - getFunction().getBody()); + + DominanceInfo &domInfo = getAnalysis(); + for (Region ®ion : getOperation()->getRegions()) + simplifyRegion(knownValues, domInfo, region); // If no operations were erased, then we mark all analyses as preserved. if (opsToErase.empty()) @@ -258,9 +260,6 @@ void CSE::runOnFunction() { markAnalysesPreserved(); } -std::unique_ptr> mlir::createCSEPass() { - return std::make_unique(); -} +std::unique_ptr mlir::createCSEPass() { return std::make_unique(); } -static PassRegistration - pass("cse", "Eliminate common sub-expressions in functions"); +static PassRegistration pass("cse", "Eliminate common sub-expressions"); diff --git a/mlir/lib/Transforms/Canonicalizer.cpp b/mlir/lib/Transforms/Canonicalizer.cpp index 7e08d36..7dcdeb6 100644 --- a/mlir/lib/Transforms/Canonicalizer.cpp +++ b/mlir/lib/Transforms/Canonicalizer.cpp @@ -26,34 +26,27 @@ #include "mlir/Transforms/Passes.h" using namespace mlir; -//===----------------------------------------------------------------------===// -// The actual Canonicalizer Pass. -//===----------------------------------------------------------------------===// - namespace { - -/// Canonicalize operations in functions. -struct Canonicalizer : public FunctionPass { - void runOnFunction() override; +/// Canonicalize operations in nested regions. +struct Canonicalizer : public OperationPass { + void runOnOperation() override { + OwningRewritePatternList patterns; + + // TODO: Instead of adding all known patterns from the whole system lazily + // add and cache the canonicalization patterns for ops we see in practice + // when building the worklist. For now, we just grab everything. + auto *context = &getContext(); + for (auto *op : context->getRegisteredOperations()) + op->getCanonicalizationPatterns(patterns, context); + + Operation *op = getOperation(); + applyPatternsGreedily(op->getRegions(), patterns); + } }; } // end anonymous namespace -void Canonicalizer::runOnFunction() { - OwningRewritePatternList patterns; - auto func = getFunction(); - - // TODO: Instead of adding all known patterns from the whole system lazily add - // and cache the canonicalization patterns for ops we see in practice when - // building the worklist. For now, we just grab everything. - auto *context = &getContext(); - for (auto *op : context->getRegisteredOperations()) - op->getCanonicalizationPatterns(patterns, context); - - applyPatternsGreedily(func, patterns); -} - /// Create a Canonicalizer pass. -std::unique_ptr> mlir::createCanonicalizerPass() { +std::unique_ptr mlir::createCanonicalizerPass() { return std::make_unique(); } diff --git a/mlir/test/AffineOps/canonicalize.mlir b/mlir/test/AffineOps/canonicalize.mlir index b8c00d9..e99a794 100644 --- a/mlir/test/AffineOps/canonicalize.mlir +++ b/mlir/test/AffineOps/canonicalize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -canonicalize | FileCheck %s +// RUN: mlir-opt %s -split-input-file -pass-pipeline='func(canonicalize)' | FileCheck %s // Affine maps for test case: compose_affine_maps_1dto2d_no_symbols // CHECK-DAG: [[MAP0:#map[0-9]+]] = (d0) -> (d0 - 1) diff --git a/mlir/test/Dialect/FxpMathOps/lower-uniform-real-math-addew.mlir b/mlir/test/Dialect/FxpMathOps/lower-uniform-real-math-addew.mlir index 9d59721..0af8c5c 100644 --- a/mlir/test/Dialect/FxpMathOps/lower-uniform-real-math-addew.mlir +++ b/mlir/test/Dialect/FxpMathOps/lower-uniform-real-math-addew.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -fxpmath-lower-uniform-real-math -canonicalize | FileCheck %s --dump-input=always +// RUN: mlir-opt %s -split-input-file -fxpmath-lower-uniform-real-math -pass-pipeline='func(canonicalize)' | FileCheck %s --dump-input=always // ----- // Verify lowering when operands and result have the same fixedpoint scale. diff --git a/mlir/test/Dialect/FxpMathOps/lower-uniform-real-math-mulew.mlir b/mlir/test/Dialect/FxpMathOps/lower-uniform-real-math-mulew.mlir index 9fc120f..85bcffd 100644 --- a/mlir/test/Dialect/FxpMathOps/lower-uniform-real-math-mulew.mlir +++ b/mlir/test/Dialect/FxpMathOps/lower-uniform-real-math-mulew.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -fxpmath-lower-uniform-real-math -canonicalize -verify-diagnostics | FileCheck %s --dump-input=always +// RUN: mlir-opt %s -split-input-file -fxpmath-lower-uniform-real-math -pass-pipeline='func(canonicalize)' -verify-diagnostics | FileCheck %s --dump-input=always // ----- // Verify lowering when operands and result have the same fixedpoint scale. diff --git a/mlir/test/Dialect/GPU/canonicalize.mlir b/mlir/test/Dialect/GPU/canonicalize.mlir index 5b25cc9..8bb170c 100644 --- a/mlir/test/Dialect/GPU/canonicalize.mlir +++ b/mlir/test/Dialect/GPU/canonicalize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -canonicalize %s | FileCheck %s +// RUN: mlir-opt -pass-pipeline='func(canonicalize)' %s | FileCheck %s // CHECK-LABEL: @propagate_constant // CHECK-SAME: %[[arg1:.*]]: memref diff --git a/mlir/test/Dialect/LLVMIR/terminator.mlir b/mlir/test/Dialect/LLVMIR/terminator.mlir index 9a8f1aa..b8e8fcd 100644 --- a/mlir/test/Dialect/LLVMIR/terminator.mlir +++ b/mlir/test/Dialect/LLVMIR/terminator.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -canonicalize %s | FileCheck %s +// RUN: mlir-opt -pass-pipeline='func(canonicalize)' %s | FileCheck %s // verify that terminators survive the canonicalizer // CHECK-LABEL: @return diff --git a/mlir/test/Dialect/QuantOps/canonicalize.mlir b/mlir/test/Dialect/QuantOps/canonicalize.mlir index 7234381..f9fc4fc 100644 --- a/mlir/test/Dialect/QuantOps/canonicalize.mlir +++ b/mlir/test/Dialect/QuantOps/canonicalize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -canonicalize | FileCheck %s --dump-input=fail +// RUN: mlir-opt %s -split-input-file -pass-pipeline='func(canonicalize)' | FileCheck %s --dump-input=fail // ----- // CHECK-LABEL: redundant_scast diff --git a/mlir/test/Dialect/SPIRV/canonicalize.mlir b/mlir/test/Dialect/SPIRV/canonicalize.mlir index 0b19e97..a9a6d0f 100644 --- a/mlir/test/Dialect/SPIRV/canonicalize.mlir +++ b/mlir/test/Dialect/SPIRV/canonicalize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -canonicalize | FileCheck %s +// RUN: mlir-opt %s -split-input-file -pass-pipeline='func(canonicalize)' | FileCheck %s //===----------------------------------------------------------------------===// // spv.CompositeExtract diff --git a/mlir/test/Pass/ir-printing.mlir b/mlir/test/Pass/ir-printing.mlir index 27fecc3..b2a8005 100644 --- a/mlir/test/Pass/ir-printing.mlir +++ b/mlir/test/Pass/ir-printing.mlir @@ -1,8 +1,8 @@ -// RUN: mlir-opt %s -disable-pass-threading=true -cse -canonicalize -print-ir-before=cse -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE %s -// RUN: mlir-opt %s -disable-pass-threading=true -cse -canonicalize -print-ir-before-all -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE_ALL %s -// RUN: mlir-opt %s -disable-pass-threading=true -cse -canonicalize -print-ir-after=cse -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER %s -// RUN: mlir-opt %s -disable-pass-threading=true -cse -canonicalize -print-ir-after-all -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER_ALL %s -// RUN: mlir-opt %s -disable-pass-threading=true -cse -canonicalize -print-ir-before=cse -print-ir-module-scope -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE_MODULE %s +// RUN: mlir-opt %s -disable-pass-threading=true -pass-pipeline='func(cse,canonicalize)' -print-ir-before=cse -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE %s +// RUN: mlir-opt %s -disable-pass-threading=true -pass-pipeline='func(cse,canonicalize)' -print-ir-before-all -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE_ALL %s +// RUN: mlir-opt %s -disable-pass-threading=true -pass-pipeline='func(cse,canonicalize)' -print-ir-after=cse -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER %s +// RUN: mlir-opt %s -disable-pass-threading=true -pass-pipeline='func(cse,canonicalize)' -print-ir-after-all -o /dev/null 2>&1 | FileCheck -check-prefix=AFTER_ALL %s +// RUN: mlir-opt %s -disable-pass-threading=true -pass-pipeline='func(cse,canonicalize)' -print-ir-before=cse -print-ir-module-scope -o /dev/null 2>&1 | FileCheck -check-prefix=BEFORE_MODULE %s func @foo() { return diff --git a/mlir/test/Pass/pass-timing.mlir b/mlir/test/Pass/pass-timing.mlir index e600806..db39ad6 100644 --- a/mlir/test/Pass/pass-timing.mlir +++ b/mlir/test/Pass/pass-timing.mlir @@ -1,7 +1,7 @@ -// RUN: mlir-opt %s -disable-pass-threading=true -verify-each=true -cse -canonicalize -cse -pass-timing -pass-timing-display=list 2>&1 | FileCheck -check-prefix=LIST %s -// RUN: mlir-opt %s -disable-pass-threading=true -verify-each=true -cse -canonicalize -cse -pass-timing -pass-timing-display=pipeline 2>&1 | FileCheck -check-prefix=PIPELINE %s -// RUN: mlir-opt %s -disable-pass-threading=false -verify-each=true -cse -canonicalize -cse -pass-timing -pass-timing-display=list 2>&1 | FileCheck -check-prefix=MT_LIST %s -// RUN: mlir-opt %s -disable-pass-threading=false -verify-each=true -cse -canonicalize -cse -pass-timing -pass-timing-display=pipeline 2>&1 | FileCheck -check-prefix=MT_PIPELINE %s +// RUN: mlir-opt %s -disable-pass-threading=true -verify-each=true -pass-pipeline='func(cse,canonicalize,cse)' -pass-timing -pass-timing-display=list 2>&1 | FileCheck -check-prefix=LIST %s +// RUN: mlir-opt %s -disable-pass-threading=true -verify-each=true -pass-pipeline='func(cse,canonicalize,cse)' -pass-timing -pass-timing-display=pipeline 2>&1 | FileCheck -check-prefix=PIPELINE %s +// RUN: mlir-opt %s -disable-pass-threading=false -verify-each=true -pass-pipeline='func(cse,canonicalize,cse)' -pass-timing -pass-timing-display=list 2>&1 | FileCheck -check-prefix=MT_LIST %s +// RUN: mlir-opt %s -disable-pass-threading=false -verify-each=true -pass-pipeline='func(cse,canonicalize,cse)' -pass-timing -pass-timing-display=pipeline 2>&1 | FileCheck -check-prefix=MT_PIPELINE %s // RUN: mlir-opt %s -disable-pass-threading=false -verify-each=false -test-pm-nested-pipeline -pass-timing -pass-timing-display=pipeline 2>&1 | FileCheck -check-prefix=NESTED_MT_PIPELINE %s // LIST: Pass execution timing report diff --git a/mlir/test/Pass/pipeline-parsing.mlir b/mlir/test/Pass/pipeline-parsing.mlir index 5328899..118a87d 100644 --- a/mlir/test/Pass/pipeline-parsing.mlir +++ b/mlir/test/Pass/pipeline-parsing.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -pass-pipeline='module(test-module-pass,func(test-function-pass)),func(test-function-pass)' -cse -pass-pipeline="func(canonicalize)" -verify-each=false -pass-timing -pass-timing-display=pipeline 2>&1 | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline='module(test-module-pass,func(test-function-pass)),func(test-function-pass)' -pass-pipeline="func(cse,canonicalize)" -verify-each=false -pass-timing -pass-timing-display=pipeline 2>&1 | FileCheck %s // RUN: mlir-opt %s -test-textual-pm-nested-pipeline -verify-each=false -pass-timing -pass-timing-display=pipeline 2>&1 | FileCheck %s --check-prefix=TEXTUAL_CHECK // RUN: not mlir-opt %s -pass-pipeline='module(test-module-pass' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_1 %s // RUN: not mlir-opt %s -pass-pipeline='module(test-module-pass))' 2>&1 | FileCheck --check-prefix=CHECK_ERROR_2 %s diff --git a/mlir/test/Quantizer/matmul.mlir b/mlir/test/Quantizer/matmul.mlir index 3ef9ed1..38d6c1e 100644 --- a/mlir/test/Quantizer/matmul.mlir +++ b/mlir/test/Quantizer/matmul.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -quantizer-infer-quantized-types -quant-convert-const -quantizer-remove-instrumentation -canonicalize -split-input-file | FileCheck %s +// RUN: mlir-opt %s -quantizer-infer-quantized-types -quant-convert-const -quantizer-remove-instrumentation -pass-pipeline='func(canonicalize)' -split-input-file | FileCheck %s // ---- // A matmul without fused clamp or bias. diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir index cecd666..fc8e92e 100644 --- a/mlir/test/Transforms/canonicalize.mlir +++ b/mlir/test/Transforms/canonicalize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -canonicalize | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline='func(canonicalize)' | FileCheck %s // CHECK-LABEL: func @test_subi_zero func @test_subi_zero(%arg0: i32) -> i32 { diff --git a/mlir/test/Transforms/cse.mlir b/mlir/test/Transforms/cse.mlir index 0e26834..8cc41e6 100644 --- a/mlir/test/Transforms/cse.mlir +++ b/mlir/test/Transforms/cse.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -cse | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline='func(cse)' | FileCheck %s // CHECK-DAG: #map0 = (d0) -> (d0 mod 2) #map0 = (d0) -> (d0 mod 2) diff --git a/mlir/test/Transforms/test-canonicalize.mlir b/mlir/test/Transforms/test-canonicalize.mlir index fdbfd59..dfcc156 100644 --- a/mlir/test/Transforms/test-canonicalize.mlir +++ b/mlir/test/Transforms/test-canonicalize.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -canonicalize | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline='func(canonicalize)' | FileCheck %s // CHECK-LABEL: func @remove_op_with_inner_ops_pattern func @remove_op_with_inner_ops_pattern() { diff --git a/mlir/test/lib/Transforms/TestConstantFold.cpp b/mlir/test/lib/Transforms/TestConstantFold.cpp index 15ecaab..5a0e9ed 100644 --- a/mlir/test/lib/Transforms/TestConstantFold.cpp +++ b/mlir/test/lib/Transforms/TestConstantFold.cpp @@ -73,10 +73,5 @@ void TestConstantFold::runOnFunction() { } } -/// Creates a constant folding pass. -std::unique_ptr> mlir::createTestConstantFoldPass() { - return std::make_unique(); -} - static PassRegistration pass("test-constant-fold", "Test operation constant folding"); -- 2.7.4