From 47f175b09b63ba340f0eb8c736945f36a5e11393 Mon Sep 17 00:00:00 2001 From: River Riddle Date: Mon, 7 Mar 2022 13:56:38 -0800 Subject: [PATCH] [mlir] Update FuncOp conversion passes to Pass/InterfacePass These passes generally don't rely on any special aspects of FuncOp, and moving allows for these passes to be used in many more situations. The passes that obviously weren't relying on invariants guaranteed by a "function" were updated to be generic pass, the rest were updated to be FunctionOpinterface InterfacePasses. The test updates are NFC switching from implicit nesting (-pass -pass2) form to the -pass-pipeline form (generic passes do not implicitly nest as op-specific passes do). Differential Revision: https://reviews.llvm.org/D121190 --- .../Conversion/ArmNeon2dToIntr/ArmNeon2dToIntr.h | 6 +--- .../ComplexToStandard/ComplexToStandard.h | 8 ++--- mlir/include/mlir/Conversion/Passes.td | 42 +++++++++++----------- .../mlir/Conversion/SCFToGPU/SCFToGPUPass.h | 8 ++--- .../Conversion/ShapeToStandard/ShapeToStandard.h | 4 +-- .../mlir/Conversion/VectorToGPU/VectorToGPU.h | 9 +++-- .../Conversion/ArmNeon2dToIntr/ArmNeon2dToIntr.cpp | 12 +++---- .../ComplexToStandard/ComplexToStandard.cpp | 8 ++--- mlir/lib/Conversion/PassDetail.h | 1 + mlir/lib/Conversion/SCFToGPU/SCFToGPUPass.cpp | 8 +++-- .../ShapeToStandard/ConvertShapeConstraints.cpp | 3 +- .../TosaToLinalg/TosaToLinalgNamedPass.cpp | 2 +- .../Conversion/TosaToLinalg/TosaToLinalgPass.cpp | 2 +- mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp | 11 +++--- mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp | 10 ++---- .../Conversion/ArithmeticToLLVM/arith-to-llvm.mlir | 2 +- .../convert-nd-vector-to-llvmir.mlir | 2 +- .../ComplexToStandard/convert-to-standard.mlir | 2 +- .../ComplexToStandard/full-conversion.mlir | 2 +- mlir/test/Conversion/FuncToLLVM/func-memref.mlir | 4 +-- mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir | 4 +-- mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir | 2 +- .../Conversion/SCFToGPU/no_blocks_no_threads.mlir | 4 +-- mlir/test/Conversion/SCFToGPU/step_one.mlir | 4 +-- mlir/test/Conversion/SCFToGPU/step_positive.mlir | 2 +- .../ShapeToStandard/convert-shape-constraints.mlir | 2 +- .../TosaToLinalg/tosa-to-linalg-named.mlir | 2 +- .../Conversion/TosaToLinalg/tosa-to-linalg.mlir | 2 +- .../Conversion/VectorToGPU/vector-to-mma-ops.mlir | 2 +- .../VectorToSCF/tensor-transfer-ops.mlir | 2 +- .../VectorToSCF/unrolled-tensor-transfer-ops.mlir | 2 +- .../VectorToSCF/unrolled-vector-to-loops.mlir | 2 +- .../vector-to-scf-mask-and-permutation-map.mlir | 2 +- .../test/Conversion/VectorToSCF/vector-to-scf.mlir | 4 +-- .../Dialect/Linalg/CPU/benchmark_matmul.mlir | 4 +-- .../Linalg/CPU/test-comprehensive-bufferize.mlir | 6 ++-- .../Standard/CPU/test-ceil-floor-pos-neg.mlir | 5 +-- .../Dialect/Vector/CPU/test-transfer-read-1d.mlir | 8 ++--- .../Dialect/Vector/CPU/test-transfer-read-2d.mlir | 8 ++--- .../Dialect/Vector/CPU/test-transfer-read-3d.mlir | 8 ++--- .../Dialect/Vector/CPU/test-transfer-read.mlir | 4 +-- .../Dialect/Vector/CPU/test-transfer-to-loops.mlir | 4 +-- .../Dialect/Vector/CPU/test-vector-distribute.mlir | 8 ++--- mlir/test/Target/LLVMIR/arm-neon-2d.mlir | 2 +- mlir/test/Target/LLVMIR/vector-to-llvm-ir.mlir | 2 +- mlir/test/mlir-cpu-runner/async-error.mlir | 12 +------ mlir/test/mlir-cpu-runner/async-group.mlir | 8 +---- mlir/test/mlir-cpu-runner/async-value.mlir | 10 +----- mlir/test/mlir-cpu-runner/async.mlir | 12 +------ mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir | 2 +- mlir/test/mlir-cpu-runner/copy.mlir | 2 +- mlir/test/mlir-cpu-runner/global-memref.mlir | 2 +- .../mlir-cpu-runner/math-polynomial-approx.mlir | 7 +--- .../mlir-cpu-runner/memref-reinterpret-cast.mlir | 2 +- mlir/test/mlir-cpu-runner/memref-reshape.mlir | 2 +- mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir | 2 +- mlir/test/mlir-cpu-runner/unranked-memref.mlir | 8 +---- mlir/test/mlir-cpu-runner/utils.mlir | 8 ++--- mlir/test/mlir-opt/async.mlir | 12 +------ 59 files changed, 126 insertions(+), 204 deletions(-) diff --git a/mlir/include/mlir/Conversion/ArmNeon2dToIntr/ArmNeon2dToIntr.h b/mlir/include/mlir/Conversion/ArmNeon2dToIntr/ArmNeon2dToIntr.h index a27e91f..5250931 100644 --- a/mlir/include/mlir/Conversion/ArmNeon2dToIntr/ArmNeon2dToIntr.h +++ b/mlir/include/mlir/Conversion/ArmNeon2dToIntr/ArmNeon2dToIntr.h @@ -12,10 +12,6 @@ #include "mlir/Pass/Pass.h" namespace mlir { -class FuncOp; -template -class OperationPass; - /// Populates patterns for the lowering of Arm NEON 2D ops to intrinsics. /// See createConvertArmNeon2dToIntrPass. void populateConvertArmNeon2dToIntrPatterns(RewritePatternSet &patterns); @@ -23,7 +19,7 @@ void populateConvertArmNeon2dToIntrPatterns(RewritePatternSet &patterns); /// Creates a pass to lower Arm NEON 2D ops to intrinsics, i.e. /// equivalent ops operating on flattened 1D vectors and mapping more /// directly to the corresponding Arm NEON instruction. -std::unique_ptr> createConvertArmNeon2dToIntrPass(); +std::unique_ptr createConvertArmNeon2dToIntrPass(); } // namespace mlir diff --git a/mlir/include/mlir/Conversion/ComplexToStandard/ComplexToStandard.h b/mlir/include/mlir/Conversion/ComplexToStandard/ComplexToStandard.h index 285881b..b0e69db 100644 --- a/mlir/include/mlir/Conversion/ComplexToStandard/ComplexToStandard.h +++ b/mlir/include/mlir/Conversion/ComplexToStandard/ComplexToStandard.h @@ -10,19 +10,15 @@ #include -#include "mlir/Transforms/DialectConversion.h" - namespace mlir { -class FuncOp; class RewritePatternSet; -template -class OperationPass; +class Pass; /// Populate the given list with patterns that convert from Complex to Standard. void populateComplexToStandardConversionPatterns(RewritePatternSet &patterns); /// Create a pass to convert Complex operations to the Standard dialect. -std::unique_ptr> createConvertComplexToStandardPass(); +std::unique_ptr createConvertComplexToStandardPass(); } // namespace mlir diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td index 9864d04..f4d0d27 100644 --- a/mlir/include/mlir/Conversion/Passes.td +++ b/mlir/include/mlir/Conversion/Passes.td @@ -77,7 +77,7 @@ def ConvertAffineToStandard : Pass<"lower-affine"> { // ArithmeticToLLVM //===----------------------------------------------------------------------===// -def ConvertArithmeticToLLVM : Pass<"convert-arith-to-llvm", "FuncOp"> { +def ConvertArithmeticToLLVM : Pass<"convert-arith-to-llvm"> { let summary = "Convert Arithmetic dialect to LLVM dialect"; let description = [{ This pass converts supported Arithmetic ops to LLVM dialect instructions. @@ -108,6 +108,16 @@ def ConvertArithmeticToSPIRV : Pass<"convert-arith-to-spirv", "ModuleOp"> { } //===----------------------------------------------------------------------===// +// ArmNeon2dToIntr +//===----------------------------------------------------------------------===// + +def ConvertArmNeon2dToIntr : Pass<"arm-neon-2d-to-intr"> { + let summary = "Convert Arm NEON structured ops to intrinsics"; + let constructor = "mlir::createConvertArmNeon2dToIntrPass()"; + let dependentDialects = ["arm_neon::ArmNeonDialect", "vector::VectorDialect"]; +} + +//===----------------------------------------------------------------------===// // AsyncToLLVM //===----------------------------------------------------------------------===// @@ -174,7 +184,7 @@ def ConvertComplexToLLVM : Pass<"convert-complex-to-llvm", "ModuleOp"> { // ComplexToStandard //===----------------------------------------------------------------------===// -def ConvertComplexToStandard : Pass<"convert-complex-to-standard", "FuncOp"> { +def ConvertComplexToStandard : Pass<"convert-complex-to-standard"> { let summary = "Convert Complex dialect to standard dialect"; let constructor = "mlir::createConvertComplexToStandardPass()"; let dependentDialects = ["math::MathDialect"]; @@ -444,7 +454,7 @@ def ConvertMathToLibm : Pass<"convert-math-to-libm", "ModuleOp"> { // MathToLLVM //===----------------------------------------------------------------------===// -def ConvertMathToLLVM : Pass<"convert-math-to-llvm", "FuncOp"> { +def ConvertMathToLLVM : Pass<"convert-math-to-llvm"> { let summary = "Convert Math dialect to LLVM dialect"; let description = [{ This pass converts supported Math ops to LLVM dialect intrinsics. @@ -605,7 +615,8 @@ def SCFToSPIRV : Pass<"convert-scf-to-spirv", "ModuleOp"> { // SCFToGPU //===----------------------------------------------------------------------===// -def ConvertAffineForToGPU : Pass<"convert-affine-for-to-gpu", "FuncOp"> { +def ConvertAffineForToGPU + : InterfacePass<"convert-affine-for-to-gpu", "FunctionOpInterface"> { let summary = "Convert top-level AffineFor Ops to GPU kernels"; let constructor = "mlir::createAffineForToGPUPass()"; let dependentDialects = ["gpu::GPUDialect"]; @@ -636,7 +647,7 @@ def ConvertShapeToStandard : Pass<"convert-shape-to-std", "ModuleOp"> { ]; } -def ConvertShapeConstraints: Pass<"convert-shape-constraints", "FuncOp"> { +def ConvertShapeConstraints : Pass<"convert-shape-constraints"> { let summary = "Convert shape constraint operations to the standard dialect"; let description = [{ This pass eliminates shape constraints from the program, converting them to @@ -685,7 +696,8 @@ def ConvertTensorToSPIRV : Pass<"convert-tensor-to-spirv", "ModuleOp"> { // TosaToLinalg //===----------------------------------------------------------------------===// -def TosaToLinalg : Pass<"tosa-to-linalg", "FuncOp"> { +def TosaToLinalg + : InterfacePass<"tosa-to-linalg", "FunctionOpInterface"> { let summary = "Lower TOSA to LinAlg on tensors"; let description = [{ Pass that converts TOSA operations to the equivalent operations using the @@ -699,7 +711,8 @@ def TosaToLinalg : Pass<"tosa-to-linalg", "FuncOp"> { // TosaToLinalgNamed //===----------------------------------------------------------------------===// -def TosaToLinalgNamed : Pass<"tosa-to-linalg-named", "FuncOp"> { +def TosaToLinalgNamed + : InterfacePass<"tosa-to-linalg-named", "FunctionOpInterface"> { let summary = "Lower TOSA to LinAlg named operations"; let description = [{ Pass that converts TOSA operations to the equivalent operations using the @@ -746,7 +759,7 @@ def TosaToStandard : Pass<"tosa-to-standard"> { // VectorToGPU //===----------------------------------------------------------------------===// -def ConvertVectorToGPU : Pass<"convert-vector-to-gpu", "FuncOp"> { +def ConvertVectorToGPU : Pass<"convert-vector-to-gpu"> { let summary = "Lower the operations from the vector dialect into the GPU " "dialect"; let constructor = "mlir::createConvertVectorToGPUPass()"; @@ -760,7 +773,7 @@ def ConvertVectorToGPU : Pass<"convert-vector-to-gpu", "FuncOp"> { // VectorToSCF //===----------------------------------------------------------------------===// -def ConvertVectorToSCF : Pass<"convert-vector-to-scf", "FuncOp"> { +def ConvertVectorToSCF : Pass<"convert-vector-to-scf"> { let summary = "Lower the operations from the vector dialect into the SCF " "dialect"; let constructor = "mlir::createConvertVectorToSCFPass()"; @@ -850,15 +863,4 @@ def ConvertVectorToSPIRV : Pass<"convert-vector-to-spirv", "ModuleOp"> { let dependentDialects = ["spirv::SPIRVDialect"]; } -//===----------------------------------------------------------------------===// -// ArmNeon2dToIntr -//===----------------------------------------------------------------------===// - -def ConvertArmNeon2dToIntr : Pass<"arm-neon-2d-to-intr", "FuncOp"> { - let summary = "Convert Arm NEON structured ops to intrinsics"; - let constructor = "mlir::createConvertArmNeon2dToIntrPass()"; - let dependentDialects = ["arm_neon::ArmNeonDialect", "vector::VectorDialect"]; -} - - #endif // MLIR_CONVERSION_PASSES diff --git a/mlir/include/mlir/Conversion/SCFToGPU/SCFToGPUPass.h b/mlir/include/mlir/Conversion/SCFToGPU/SCFToGPUPass.h index 1bbdd18..c1b0eb0 100644 --- a/mlir/include/mlir/Conversion/SCFToGPU/SCFToGPUPass.h +++ b/mlir/include/mlir/Conversion/SCFToGPU/SCFToGPUPass.h @@ -13,9 +13,9 @@ #include namespace mlir { -class FuncOp; +class FunctionOpInterface; template -class OperationPass; +class InterfacePass; class Pass; /// Create a pass that converts loop nests into GPU kernels. It considers @@ -26,9 +26,9 @@ class Pass; /// parallelization is performed, it is under the responsibility of the caller /// to strip-mine the loops and to perform the dependence analysis before /// calling the conversion. -std::unique_ptr> +std::unique_ptr> createAffineForToGPUPass(unsigned numBlockDims, unsigned numThreadDims); -std::unique_ptr> createAffineForToGPUPass(); +std::unique_ptr> createAffineForToGPUPass(); /// Creates a pass that converts scf.parallel operations into a gpu.launch /// operation. The mapping of loop dimensions to launch dimensions is derived diff --git a/mlir/include/mlir/Conversion/ShapeToStandard/ShapeToStandard.h b/mlir/include/mlir/Conversion/ShapeToStandard/ShapeToStandard.h index 7dc8101..607f968 100644 --- a/mlir/include/mlir/Conversion/ShapeToStandard/ShapeToStandard.h +++ b/mlir/include/mlir/Conversion/ShapeToStandard/ShapeToStandard.h @@ -13,8 +13,8 @@ namespace mlir { -class FuncOp; class ModuleOp; +class Pass; template class OperationPass; class RewritePatternSet; @@ -26,7 +26,7 @@ std::unique_ptr> createConvertShapeToStandardPass(); void populateConvertShapeConstraintsConversionPatterns( RewritePatternSet &patterns); -std::unique_ptr> createConvertShapeConstraintsPass(); +std::unique_ptr createConvertShapeConstraintsPass(); } // namespace mlir diff --git a/mlir/include/mlir/Conversion/VectorToGPU/VectorToGPU.h b/mlir/include/mlir/Conversion/VectorToGPU/VectorToGPU.h index edba3d5c..266fa0e 100644 --- a/mlir/include/mlir/Conversion/VectorToGPU/VectorToGPU.h +++ b/mlir/include/mlir/Conversion/VectorToGPU/VectorToGPU.h @@ -14,17 +14,16 @@ namespace mlir { class MLIRContext; class Pass; -class FuncOp; class RewritePatternSet; /// Patterns to transform vector ops into a canonical form to convert to MMA /// matrix operations. void populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns); -/// Convert vector ops to MMA matrix operations. This will convert slice of -/// operations that can be legally converted to MMA operations. The rest of the -/// vector operations are left untouched. -void convertVectorToMMAOps(FuncOp funcOp); +/// Convert vector ops to MMA matrix operations nested under `rootOp`. This will +/// convert slice of operations that can be legally converted to MMA operations. +/// The rest of the vector operations are left untouched. +void convertVectorToMMAOps(Operation *rootOp); /// Convert from vector to GPU ops. std::unique_ptr createConvertVectorToGPUPass(); diff --git a/mlir/lib/Conversion/ArmNeon2dToIntr/ArmNeon2dToIntr.cpp b/mlir/lib/Conversion/ArmNeon2dToIntr/ArmNeon2dToIntr.cpp index 8040f97..e8c74c9 100644 --- a/mlir/lib/Conversion/ArmNeon2dToIntr/ArmNeon2dToIntr.cpp +++ b/mlir/lib/Conversion/ArmNeon2dToIntr/ArmNeon2dToIntr.cpp @@ -49,27 +49,23 @@ public: class ConvertArmNeon2dToIntr : public ConvertArmNeon2dToIntrBase { void runOnOperation() override { - auto func = getOperation(); auto *context = &getContext(); RewritePatternSet patterns(context); populateConvertArmNeon2dToIntrPatterns(patterns); - if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns)))) + if (failed( + applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)))) return signalPassFailure(); } }; } // namespace -namespace mlir { - -void populateConvertArmNeon2dToIntrPatterns(RewritePatternSet &patterns) { +void mlir::populateConvertArmNeon2dToIntrPatterns(RewritePatternSet &patterns) { patterns.add(patterns.getContext()); } -std::unique_ptr> createConvertArmNeon2dToIntrPass() { +std::unique_ptr mlir::createConvertArmNeon2dToIntrPass() { return std::make_unique(); } - -} // namespace mlir diff --git a/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp b/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp index 877b811..4bfd5e2 100644 --- a/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp +++ b/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp @@ -644,8 +644,6 @@ struct ConvertComplexToStandardPass }; void ConvertComplexToStandardPass::runOnOperation() { - auto function = getOperation(); - // Convert to the Standard dialect using the converter defined above. RewritePatternSet patterns(&getContext()); populateComplexToStandardConversionPatterns(patterns); @@ -653,12 +651,12 @@ void ConvertComplexToStandardPass::runOnOperation() { ConversionTarget target(getContext()); target.addLegalDialect(); target.addLegalOp(); - if (failed(applyPartialConversion(function, target, std::move(patterns)))) + if (failed( + applyPartialConversion(getOperation(), target, std::move(patterns)))) signalPassFailure(); } } // namespace -std::unique_ptr> -mlir::createConvertComplexToStandardPass() { +std::unique_ptr mlir::createConvertComplexToStandardPass() { return std::make_unique(); } diff --git a/mlir/lib/Conversion/PassDetail.h b/mlir/lib/Conversion/PassDetail.h index 6eba811..8cb3166 100644 --- a/mlir/lib/Conversion/PassDetail.h +++ b/mlir/lib/Conversion/PassDetail.h @@ -15,6 +15,7 @@ namespace mlir { class AffineDialect; +class FunctionOpInterface; // Forward declaration from Dialect.h template diff --git a/mlir/lib/Conversion/SCFToGPU/SCFToGPUPass.cpp b/mlir/lib/Conversion/SCFToGPU/SCFToGPUPass.cpp index 2295d37..c0e983b 100644 --- a/mlir/lib/Conversion/SCFToGPU/SCFToGPUPass.cpp +++ b/mlir/lib/Conversion/SCFToGPU/SCFToGPUPass.cpp @@ -34,7 +34,8 @@ struct ForLoopMapper : public ConvertAffineForToGPUBase { } void runOnOperation() override { - for (Operation &op : llvm::make_early_inc_range(getOperation().getOps())) { + for (Operation &op : + llvm::make_early_inc_range(getOperation().getBody().getOps())) { if (auto forOp = dyn_cast(&op)) { if (failed(convertAffineLoopNestToGPULaunch(forOp, numBlockDims, numThreadDims))) @@ -61,11 +62,12 @@ struct ParallelLoopToGpuPass } // namespace -std::unique_ptr> +std::unique_ptr> mlir::createAffineForToGPUPass(unsigned numBlockDims, unsigned numThreadDims) { return std::make_unique(numBlockDims, numThreadDims); } -std::unique_ptr> mlir::createAffineForToGPUPass() { +std::unique_ptr> +mlir::createAffineForToGPUPass() { return std::make_unique(); } diff --git a/mlir/lib/Conversion/ShapeToStandard/ConvertShapeConstraints.cpp b/mlir/lib/Conversion/ShapeToStandard/ConvertShapeConstraints.cpp index ee4dc0a..fb71126 100644 --- a/mlir/lib/Conversion/ShapeToStandard/ConvertShapeConstraints.cpp +++ b/mlir/lib/Conversion/ShapeToStandard/ConvertShapeConstraints.cpp @@ -63,7 +63,6 @@ class ConvertShapeConstraints }; } // namespace -std::unique_ptr> -mlir::createConvertShapeConstraintsPass() { +std::unique_ptr mlir::createConvertShapeConstraintsPass() { return std::make_unique(); } diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamedPass.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamedPass.cpp index 815cfb5..2e43e03 100644 --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamedPass.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgNamedPass.cpp @@ -53,7 +53,7 @@ public: target.markUnknownOpDynamicallyLegal([](Operation *) { return true; }); - FuncOp func = getOperation(); + FunctionOpInterface func = getOperation(); mlir::tosa::populateTosaToLinalgNamedConversionPatterns(&patterns); if (failed(applyFullConversion(func, target, std::move(patterns)))) signalPassFailure(); diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgPass.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgPass.cpp index 8f1928d..cc21090 100644 --- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgPass.cpp +++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalgPass.cpp @@ -54,7 +54,7 @@ public: target.markUnknownOpDynamicallyLegal([](Operation *) { return true; }); - FuncOp func = getOperation(); + FunctionOpInterface func = getOperation(); mlir::tosa::populateTosaToLinalgConversionPatterns(&patterns); if (failed(applyFullConversion(func, target, std::move(patterns)))) signalPassFailure(); diff --git a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp index 07cc493..e3dbfc9 100644 --- a/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp +++ b/mlir/lib/Conversion/VectorToGPU/VectorToGPU.cpp @@ -503,15 +503,13 @@ static void convertElementwiseOp(Operation *op, gpu::MMAElementwiseOp opType, valueMapping[op->getResult(0)] = newOp; } -namespace mlir { - -void populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns) { +void mlir::populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns) { patterns.add( patterns.getContext()); } -void convertVectorToMMAOps(FuncOp funcOp) { - SetVector ops = getOpToConvert(funcOp); +void mlir::convertVectorToMMAOps(Operation *rootOp) { + SetVector ops = getOpToConvert(rootOp); llvm::DenseMap valueMapping; for (Operation *op : ops) { if (auto transferRead = dyn_cast(op)) { @@ -534,13 +532,12 @@ void convertVectorToMMAOps(FuncOp funcOp) { } } -} // namespace mlir namespace { struct ConvertVectorToGPUPass : public ConvertVectorToGPUBase { void runOnOperation() override { - RewritePatternSet patterns(getOperation().getContext()); + RewritePatternSet patterns(&getContext()); populatePrepareVectorToMMAPatterns(patterns); (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)); diff --git a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp index 5af5693..0c79403 100644 --- a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp +++ b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp @@ -1254,9 +1254,7 @@ struct TransferOp1dConversion : public VectorToSCFPattern { } // namespace lowering_1_d } // namespace -namespace mlir { - -void populateVectorToSCFConversionPatterns( +void mlir::populateVectorToSCFConversionPatterns( RewritePatternSet &patterns, const VectorTransferToSCFOptions &options) { if (options.unroll) { patterns.add, %arg1: vector<4xi1>, %arg2: vector<4xi64>, %arg3: vector<4xi64>) -> vector<4xf32> { diff --git a/mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir b/mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir index d754235..328b3edd 100644 --- a/mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir +++ b/mlir/test/Conversion/ArithmeticToLLVM/convert-nd-vector-to-llvmir.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -convert-arith-to-llvm %s -split-input-file | FileCheck %s +// RUN: mlir-opt -pass-pipeline="builtin.func(convert-arith-to-llvm)" %s -split-input-file | FileCheck %s // CHECK-LABEL: @vec_bin func @vec_bin(%arg0: vector<2x2x2xf32>) -> vector<2x2x2xf32> { diff --git a/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir b/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir index e43f182..2efd194 100644 --- a/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir +++ b/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-complex-to-standard | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-complex-to-standard)" | FileCheck %s // CHECK-LABEL: func @complex_abs // CHECK-SAME: %[[ARG:.*]]: complex diff --git a/mlir/test/Conversion/ComplexToStandard/full-conversion.mlir b/mlir/test/Conversion/ComplexToStandard/full-conversion.mlir index 77e0f2b..135af60 100644 --- a/mlir/test/Conversion/ComplexToStandard/full-conversion.mlir +++ b/mlir/test/Conversion/ComplexToStandard/full-conversion.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-complex-to-standard -convert-complex-to-llvm -convert-math-to-llvm -convert-arith-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-complex-to-standard),convert-complex-to-llvm,builtin.func(convert-math-to-llvm,convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" | FileCheck %s // CHECK-LABEL: llvm.func @complex_abs // CHECK-SAME: %[[ARG:.*]]: ![[C_TY:.*]]) diff --git a/mlir/test/Conversion/FuncToLLVM/func-memref.mlir b/mlir/test/Conversion/FuncToLLVM/func-memref.mlir index 4b6b5d4..70af9af 100644 --- a/mlir/test/Conversion/FuncToLLVM/func-memref.mlir +++ b/mlir/test/Conversion/FuncToLLVM/func-memref.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt -convert-arith-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts -split-input-file %s | FileCheck %s -// RUN: mlir-opt -convert-arith-to-llvm -convert-func-to-llvm='use-bare-ptr-memref-call-conv=1' -reconcile-unrealized-casts -split-input-file %s | FileCheck %s --check-prefix=BAREPTR +// RUN: mlir-opt -pass-pipeline="builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" -split-input-file %s | FileCheck %s +// RUN: mlir-opt -pass-pipeline="builtin.func(convert-arith-to-llvm),convert-func-to-llvm{use-bare-ptr-memref-call-conv=1},reconcile-unrealized-casts" -split-input-file %s | FileCheck %s --check-prefix=BAREPTR // BAREPTR-LABEL: func @check_noalias // BAREPTR-SAME: %{{.*}}: !llvm.ptr {llvm.noalias}, %{{.*}}: !llvm.ptr {llvm.noalias} diff --git a/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir b/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir index 5dfdf4d..2d32857 100644 --- a/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir +++ b/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt -convert-math-to-llvm -convert-arith-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts %s -split-input-file | FileCheck %s -// RUN: mlir-opt -convert-math-to-llvm -convert-arith-to-llvm='index-bitwidth=32' -convert-func-to-llvm='index-bitwidth=32' -reconcile-unrealized-casts %s -split-input-file | FileCheck --check-prefix=CHECK32 %s +// RUN: mlir-opt -pass-pipeline="builtin.func(convert-math-to-llvm,convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" %s -split-input-file | FileCheck %s +// RUN: mlir-opt -pass-pipeline="builtin.func(convert-math-to-llvm,convert-arith-to-llvm{index-bitwidth=32}),convert-func-to-llvm{index-bitwidth=32},reconcile-unrealized-casts" %s -split-input-file | FileCheck --check-prefix=CHECK32 %s // CHECK-LABEL: func @empty() { // CHECK-NEXT: llvm.return diff --git a/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir b/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir index 002c3cd..ab687fc 100644 --- a/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir +++ b/mlir/test/Conversion/MathToLLVM/math-to-llvm.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -split-input-file -convert-math-to-llvm | FileCheck %s +// RUN: mlir-opt %s -split-input-file -pass-pipeline="builtin.func(convert-math-to-llvm)" | FileCheck %s // CHECK-LABEL: @ops func @ops(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: i32, %arg4: f64) { diff --git a/mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir b/mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir index 52baf97..93beb56 100644 --- a/mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir +++ b/mlir/test/Conversion/SCFToGPU/no_blocks_no_threads.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt -convert-affine-for-to-gpu="gpu-block-dims=0 gpu-thread-dims=1" %s | FileCheck --check-prefix=CHECK-THREADS %s -// RUN: mlir-opt -convert-affine-for-to-gpu="gpu-block-dims=1 gpu-thread-dims=0" %s | FileCheck --check-prefix=CHECK-BLOCKS %s +// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=0 gpu-thread-dims=1})" %s | FileCheck --check-prefix=CHECK-THREADS %s +// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=0})" %s | FileCheck --check-prefix=CHECK-BLOCKS %s // CHECK-THREADS-LABEL: @one_d_loop // CHECK-BLOCKS-LABEL: @one_d_loop diff --git a/mlir/test/Conversion/SCFToGPU/step_one.mlir b/mlir/test/Conversion/SCFToGPU/step_one.mlir index 9c0ecb0..4acd669 100644 --- a/mlir/test/Conversion/SCFToGPU/step_one.mlir +++ b/mlir/test/Conversion/SCFToGPU/step_one.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt -convert-affine-for-to-gpu="gpu-block-dims=1 gpu-thread-dims=1" %s | FileCheck --check-prefix=CHECK-11 %s -// RUN: mlir-opt -convert-affine-for-to-gpu="gpu-block-dims=2 gpu-thread-dims=2" %s | FileCheck --check-prefix=CHECK-22 %s +// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=1})" %s | FileCheck --check-prefix=CHECK-11 %s +// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=2 gpu-thread-dims=2})" %s | FileCheck --check-prefix=CHECK-22 %s // CHECK-11-LABEL: @step_1 // CHECK-22-LABEL: @step_1 diff --git a/mlir/test/Conversion/SCFToGPU/step_positive.mlir b/mlir/test/Conversion/SCFToGPU/step_positive.mlir index 415cb00..29987eb 100644 --- a/mlir/test/Conversion/SCFToGPU/step_positive.mlir +++ b/mlir/test/Conversion/SCFToGPU/step_positive.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -convert-affine-for-to-gpu="gpu-block-dims=1 gpu-thread-dims=1" %s | FileCheck %s +// RUN: mlir-opt -pass-pipeline="builtin.func(convert-affine-for-to-gpu{gpu-block-dims=1 gpu-thread-dims=1})" %s | FileCheck %s // CHECK-LABEL: @step_var func @step_var(%A : memref, %B : memref) { diff --git a/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir b/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir index 4ac169e..eda81bd 100644 --- a/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir +++ b/mlir/test/Conversion/ShapeToStandard/convert-shape-constraints.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -convert-shape-constraints <%s | FileCheck %s +// RUN: mlir-opt -pass-pipeline="builtin.func(convert-shape-constraints)" <%s | FileCheck %s // There's not very much useful to check here other than pasting the output. // CHECK-LABEL: func @cstr_broadcastable( diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir index 776dd54..ce7d155 100644 --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg-named.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt --split-input-file --tosa-to-linalg-named %s -verify-diagnostics -o -| FileCheck %s +// RUN: mlir-opt --split-input-file -pass-pipeline="builtin.func(tosa-to-linalg-named)" %s -verify-diagnostics -o -| FileCheck %s // CHECK-LABEL: @matmul func @matmul(%arg0: tensor<1x5x3xf32>, %arg1: tensor<1x3x6xf32>) -> (tensor<1x5x6xf32>) { diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir index 9828fe8..f375a93 100644 --- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir +++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt --split-input-file --tosa-to-linalg %s -verify-diagnostics -o -| FileCheck %s +// RUN: mlir-opt --split-input-file -pass-pipeline="builtin.func(tosa-to-linalg)" %s -verify-diagnostics -o -| FileCheck %s // CHECK: #[[$MAP0:.*]] = affine_map<() -> ()> diff --git a/mlir/test/Conversion/VectorToGPU/vector-to-mma-ops.mlir b/mlir/test/Conversion/VectorToGPU/vector-to-mma-ops.mlir index 0e1f195..3c83716 100644 --- a/mlir/test/Conversion/VectorToGPU/vector-to-mma-ops.mlir +++ b/mlir/test/Conversion/VectorToGPU/vector-to-mma-ops.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-vector-to-gpu -canonicalize | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-gpu)" -canonicalize | FileCheck %s #map0 = affine_map<(d0, d1) -> (d1, d0)> #map1 = affine_map<(d0, d1, d2) -> (d0, d2)> diff --git a/mlir/test/Conversion/VectorToSCF/tensor-transfer-ops.mlir b/mlir/test/Conversion/VectorToSCF/tensor-transfer-ops.mlir index ba6f500..ee373b2 100644 --- a/mlir/test/Conversion/VectorToSCF/tensor-transfer-ops.mlir +++ b/mlir/test/Conversion/VectorToSCF/tensor-transfer-ops.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-vector-to-scf='lower-tensors=true' -split-input-file -allow-unregistered-dialect | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-tensors=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s // CHECK-LABEL: func @transfer_read_2d( // CHECK: %[[ALLOC:.*]] = memref.alloca() : memref> diff --git a/mlir/test/Conversion/VectorToSCF/unrolled-tensor-transfer-ops.mlir b/mlir/test/Conversion/VectorToSCF/unrolled-tensor-transfer-ops.mlir index 662cbd6..7a3ad16 100644 --- a/mlir/test/Conversion/VectorToSCF/unrolled-tensor-transfer-ops.mlir +++ b/mlir/test/Conversion/VectorToSCF/unrolled-tensor-transfer-ops.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true lower-tensors=true' -split-input-file -allow-unregistered-dialect | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true lower-tensors=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s // CHECK-LABEL: func @transfer_read_2d( // CHECK: %[[V_INIT:.*]] = arith.constant dense<-4.200000e+01> : vector<4x9xf32> diff --git a/mlir/test/Conversion/VectorToSCF/unrolled-vector-to-loops.mlir b/mlir/test/Conversion/VectorToSCF/unrolled-vector-to-loops.mlir index 18997ee..0b9db04 100644 --- a/mlir/test/Conversion/VectorToSCF/unrolled-vector-to-loops.mlir +++ b/mlir/test/Conversion/VectorToSCF/unrolled-vector-to-loops.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-vector-to-scf=full-unroll=true -split-input-file -allow-unregistered-dialect | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s // CHECK-LABEL: func @transfer_read_inbounds func @transfer_read_inbounds(%A : memref) -> (vector<2x3x4xf32>) { diff --git a/mlir/test/Conversion/VectorToSCF/vector-to-scf-mask-and-permutation-map.mlir b/mlir/test/Conversion/VectorToSCF/vector-to-scf-mask-and-permutation-map.mlir index cf65bd6..781f0bd 100644 --- a/mlir/test/Conversion/VectorToSCF/vector-to-scf-mask-and-permutation-map.mlir +++ b/mlir/test/Conversion/VectorToSCF/vector-to-scf-mask-and-permutation-map.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-vector-to-scf='lower-permutation-maps=true' -split-input-file | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-permutation-maps=true})" -split-input-file | FileCheck %s // Ensure that the permutation map is lowered (by inserting a transpose op) // before lowering the vector.transfer_read. diff --git a/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir b/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir index ab6e7a0..8cbac26 100644 --- a/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir +++ b/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir @@ -1,5 +1,5 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -split-input-file -allow-unregistered-dialect | FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf=full-unroll=true -split-input-file -allow-unregistered-dialect | FileCheck %s --check-prefix=FULL-UNROLL +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf)" -split-input-file -allow-unregistered-dialect | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true})" -split-input-file -allow-unregistered-dialect | FileCheck %s --check-prefix=FULL-UNROLL // CHECK-LABEL: func @vector_transfer_ops_0d( func @vector_transfer_ops_0d(%M: memref) { diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/benchmark_matmul.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/benchmark_matmul.mlir index e88ccd5..07d4e49 100644 --- a/mlir/test/Integration/Dialect/Linalg/CPU/benchmark_matmul.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/benchmark_matmul.mlir @@ -4,8 +4,8 @@ // RUN: mlir-opt -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=linalg.fill register-tile-sizes=4,32 vectorize" | \ // RUN: mlir-opt -test-linalg-codegen-strategy="anchor-func=matmul anchor-op=memref.copy register-tile-sizes=4,32 vectorize" | \ -// RUN: mlir-opt -canonicalize -convert-vector-to-scf -lower-affine -convert-linalg-to-loops | \ -// RUN: mlir-opt -canonicalize -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt -pass-pipeline="builtin.func(canonicalize,convert-vector-to-scf,lower-affine,convert-linalg-to-loops)" | \ +// RUN: mlir-opt -pass-pipeline="builtin.func(canonicalize,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -O3 -e main -entry-point-result=void \ // Activate to dump assembly // R_UN: -dump-object-file -object-filename=/tmp/a.o \ diff --git a/mlir/test/Integration/Dialect/Linalg/CPU/test-comprehensive-bufferize.mlir b/mlir/test/Integration/Dialect/Linalg/CPU/test-comprehensive-bufferize.mlir index ae34d73..0508a2f 100644 --- a/mlir/test/Integration/Dialect/Linalg/CPU/test-comprehensive-bufferize.mlir +++ b/mlir/test/Integration/Dialect/Linalg/CPU/test-comprehensive-bufferize.mlir @@ -1,6 +1,6 @@ -// RUN: mlir-opt %s -canonicalize -cse -linalg-comprehensive-module-bufferize |\ -// RUN: mlir-opt -buffer-deallocation -convert-vector-to-scf -lower-affine -convert-linalg-to-loops |\ -// RUN: mlir-opt -canonicalize -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(canonicalize,cse),linalg-comprehensive-module-bufferize" |\ +// RUN: mlir-opt -pass-pipeline="builtin.func(buffer-deallocation,convert-vector-to-scf,lower-affine,convert-linalg-to-loops)" |\ +// RUN: mlir-opt -pass-pipeline="builtin.func(canonicalize,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -O3 -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext |\ diff --git a/mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir b/mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir index 84d04f1..88b963c 100644 --- a/mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir +++ b/mlir/test/Integration/Dialect/Standard/CPU/test-ceil-floor-pos-neg.mlir @@ -1,7 +1,4 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf \ -// RUN: -memref-expand -arith-expand -convert-vector-to-llvm \ -// RUN: -convert-memref-to-llvm -convert-func-to-llvm \ -// RUN: -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf,memref-expand,arith-expand),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir index 262ab4c..e485f4b 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-1d.mlir @@ -1,19 +1,19 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-2d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-2d.mlir index ada605d..f4469ea 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-2d.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-2d.mlir @@ -1,19 +1,19 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-3d.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-3d.mlir index 3bc59e2..dac05da 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-3d.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read-3d.mlir @@ -1,19 +1,19 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf='full-unroll=true lower-permutation-maps=true' -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true lower-permutation-maps=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read.mlir index 6ad315f..8747380 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-read.mlir @@ -1,9 +1,9 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf=full-unroll=true -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e entry -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-to-loops.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-to-loops.mlir index 87dfe27..ea7a614 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-to-loops.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-transfer-to-loops.mlir @@ -1,9 +1,9 @@ -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf=full-unroll=true -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf{full-unroll=true},lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext,%mlir_integration_test_dir/libmlir_c_runner_utils%shlibext | \ // RUN: FileCheck %s diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-vector-distribute.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-vector-distribute.mlir index d638147..6001e58 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-vector-distribute.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-vector-distribute.mlir @@ -1,16 +1,14 @@ -// RUN: mlir-opt %s -test-vector-to-forloop -convert-vector-to-scf \ -// RUN: -lower-affine -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-vector-to-forloop,convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -convert-vector-to-scf -lower-affine \ -// RUN: -convert-scf-to-cf -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e main \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-vector-to-scf,lower-affine,convert-scf-to-cf),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e main \ // RUN: -entry-point-result=void \ // RUN: -shared-libs=%mlir_integration_test_dir/libmlir_runner_utils%shlibext | \ // RUN: FileCheck %s -// RUN: mlir-opt %s -test-vector-to-forloop | FileCheck %s -check-prefix=TRANSFORM +// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-vector-to-forloop)" | FileCheck %s -check-prefix=TRANSFORM func private @print_memref_f32(memref<*xf32>) diff --git a/mlir/test/Target/LLVMIR/arm-neon-2d.mlir b/mlir/test/Target/LLVMIR/arm-neon-2d.mlir index b75afdc..e6299f9 100644 --- a/mlir/test/Target/LLVMIR/arm-neon-2d.mlir +++ b/mlir/test/Target/LLVMIR/arm-neon-2d.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -arm-neon-2d-to-intr %s | FileCheck %s +// RUN: mlir-opt -pass-pipeline="builtin.func(arm-neon-2d-to-intr)" %s | FileCheck %s // CHECK-LABEL: arm_neon_sdot2d_4x4_i8i8 func @arm_neon_sdot2d_4x4_i8i8(%a: vector<4xi32>, %b: vector<4x4xi8>, %c: vector<4x4xi8>) -> vector<4xi32> { diff --git a/mlir/test/Target/LLVMIR/vector-to-llvm-ir.mlir b/mlir/test/Target/LLVMIR/vector-to-llvm-ir.mlir index 04ff73c..395e4ca 100644 --- a/mlir/test/Target/LLVMIR/vector-to-llvm-ir.mlir +++ b/mlir/test/Target/LLVMIR/vector-to-llvm-ir.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-vector-to-llvm -convert-arith-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | mlir-translate -mlir-to-llvmir | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="convert-vector-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" | mlir-translate -mlir-to-llvmir | FileCheck %s func @genbool_1d() -> vector<8xi1> { %0 = vector.constant_mask [4] : vector<8xi1> diff --git a/mlir/test/mlir-cpu-runner/async-error.mlir b/mlir/test/mlir-cpu-runner/async-error.mlir index 7b961c0..dfc7673 100644 --- a/mlir/test/mlir-cpu-runner/async-error.mlir +++ b/mlir/test/mlir-cpu-runner/async-error.mlir @@ -1,14 +1,4 @@ -// RUN: mlir-opt %s -async-to-async-runtime \ -// RUN: -async-runtime-ref-counting \ -// RUN: -async-runtime-ref-counting-opt \ -// RUN: -convert-async-to-llvm \ -// RUN: -convert-linalg-to-loops \ -// RUN: -convert-scf-to-cf \ -// RUN: -convert-linalg-to-llvm \ -// RUN: -convert-vector-to-llvm \ -// RUN: -convert-arith-to-llvm \ -// RUN: -convert-func-to-llvm \ -// RUN: -reconcile-unrealized-casts \ +// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,builtin.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,builtin.func(convert-linalg-to-loops,convert-scf-to-cf),convert-linalg-to-llvm,convert-vector-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | mlir-cpu-runner \ // RUN: -e main -entry-point-result=void -O0 \ // RUN: -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext \ diff --git a/mlir/test/mlir-cpu-runner/async-group.mlir b/mlir/test/mlir-cpu-runner/async-group.mlir index b55241c..aab262d 100644 --- a/mlir/test/mlir-cpu-runner/async-group.mlir +++ b/mlir/test/mlir-cpu-runner/async-group.mlir @@ -1,10 +1,4 @@ -// RUN: mlir-opt %s -async-to-async-runtime \ -// RUN: -async-runtime-ref-counting \ -// RUN: -async-runtime-ref-counting-opt \ -// RUN: -convert-async-to-llvm \ -// RUN: -convert-arith-to-llvm \ -// RUN: -convert-func-to-llvm \ -// RUN: -reconcile-unrealized-casts \ +// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,builtin.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | mlir-cpu-runner \ // RUN: -e main -entry-point-result=void -O0 \ // RUN: -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext \ diff --git a/mlir/test/mlir-cpu-runner/async-value.mlir b/mlir/test/mlir-cpu-runner/async-value.mlir index 02f2094..5d8074f 100644 --- a/mlir/test/mlir-cpu-runner/async-value.mlir +++ b/mlir/test/mlir-cpu-runner/async-value.mlir @@ -1,12 +1,4 @@ -// RUN: mlir-opt %s -async-to-async-runtime \ -// RUN: -async-runtime-ref-counting \ -// RUN: -async-runtime-ref-counting-opt \ -// RUN: -convert-async-to-llvm \ -// RUN: -convert-arith-to-llvm \ -// RUN: -convert-vector-to-llvm \ -// RUN: -convert-memref-to-llvm \ -// RUN: -convert-func-to-llvm \ -// RUN: -reconcile-unrealized-casts \ +// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,builtin.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,builtin.func(convert-arith-to-llvm),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | mlir-cpu-runner \ // RUN: -e main -entry-point-result=void -O0 \ // RUN: -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext \ diff --git a/mlir/test/mlir-cpu-runner/async.mlir b/mlir/test/mlir-cpu-runner/async.mlir index 675a8de..2426302 100644 --- a/mlir/test/mlir-cpu-runner/async.mlir +++ b/mlir/test/mlir-cpu-runner/async.mlir @@ -1,14 +1,4 @@ -// RUN: mlir-opt %s -async-to-async-runtime \ -// RUN: -async-runtime-ref-counting \ -// RUN: -async-runtime-ref-counting-opt \ -// RUN: -convert-async-to-llvm \ -// RUN: -convert-linalg-to-loops \ -// RUN: -convert-scf-to-cf \ -// RUN: -convert-linalg-to-llvm \ -// RUN: -convert-memref-to-llvm \ -// RUN: -convert-arith-to-llvm \ -// RUN: -convert-func-to-llvm \ -// RUN: -reconcile-unrealized-casts \ +// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,builtin.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,builtin.func(convert-linalg-to-loops,convert-scf-to-cf),convert-linalg-to-llvm,convert-memref-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | mlir-cpu-runner \ // RUN: -e main -entry-point-result=void -O0 \ // RUN: -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext \ diff --git a/mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir b/mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir index 008123b..944d04d 100644 --- a/mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir +++ b/mlir/test/mlir-cpu-runner/bare-ptr-call-conv.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-cf -convert-arith-to-llvm -convert-memref-to-llvm -convert-func-to-llvm='use-bare-ptr-memref-call-conv=1' -reconcile-unrealized-casts | mlir-cpu-runner -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext -entry-point-result=void | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-scf-to-cf,convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm{use-bare-ptr-memref-call-conv=1}" -reconcile-unrealized-casts | mlir-cpu-runner -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext -entry-point-result=void | FileCheck %s // Verify bare pointer memref calling convention. `simple_add1_add2_test` // gets two 2xf32 memrefs, adds 1.0f to the first one and 2.0f to the second diff --git a/mlir/test/mlir-cpu-runner/copy.mlir b/mlir/test/mlir-cpu-runner/copy.mlir index e6f43ee..df42845 100644 --- a/mlir/test/mlir-cpu-runner/copy.mlir +++ b/mlir/test/mlir-cpu-runner/copy.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-cf -convert-arith-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-scf-to-cf,convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/mlir-cpu-runner/global-memref.mlir b/mlir/test/mlir-cpu-runner/global-memref.mlir index 6fcc3f3..37c6e6f4 100644 --- a/mlir/test/mlir-cpu-runner/global-memref.mlir +++ b/mlir/test/mlir-cpu-runner/global-memref.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-arith-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e main -entry-point-result=void -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e main -entry-point-result=void -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s func private @print_memref_f32(memref<*xf32>) attributes { llvm.emit_c_interface } func private @print_memref_i32(memref<*xi32>) attributes { llvm.emit_c_interface } diff --git a/mlir/test/mlir-cpu-runner/math-polynomial-approx.mlir b/mlir/test/mlir-cpu-runner/math-polynomial-approx.mlir index 92b417bb..c4674c1 100644 --- a/mlir/test/mlir-cpu-runner/math-polynomial-approx.mlir +++ b/mlir/test/mlir-cpu-runner/math-polynomial-approx.mlir @@ -1,9 +1,4 @@ -// RUN: mlir-opt %s -test-math-polynomial-approximation \ -// RUN: -convert-arith-to-llvm \ -// RUN: -convert-vector-to-llvm \ -// RUN: -convert-math-to-llvm \ -// RUN: -convert-func-to-llvm \ -// RUN: -reconcile-unrealized-casts \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(test-math-polynomial-approximation,convert-arith-to-llvm),convert-vector-to-llvm,builtin.func(convert-math-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | mlir-cpu-runner \ // RUN: -e main -entry-point-result=void -O0 \ // RUN: -shared-libs=%linalg_test_lib_dir/libmlir_c_runner_utils%shlibext \ diff --git a/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir b/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir index 1350443..e26730c 100644 --- a/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir +++ b/mlir/test/mlir-cpu-runner/memref-reinterpret-cast.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-cf -convert-memref-to-llvm -convert-arith-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-scf-to-cf),convert-memref-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/mlir-cpu-runner/memref-reshape.mlir b/mlir/test/mlir-cpu-runner/memref-reshape.mlir index 692a96b1..4fa312b 100644 --- a/mlir/test/mlir-cpu-runner/memref-reshape.mlir +++ b/mlir/test/mlir-cpu-runner/memref-reshape.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt %s -convert-scf-to-cf -memref-expand -convert-arith-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-scf-to-cf,memref-expand,convert-arith-to-llvm),convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext \ // RUN: | FileCheck %s diff --git a/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir b/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir index 455fb99..81c6e20 100644 --- a/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir +++ b/mlir/test/mlir-cpu-runner/sgemm-naive-codegen.mlir @@ -1,4 +1,4 @@ -// RUN: mlir-opt -convert-linalg-to-loops -lower-affine -convert-scf-to-cf -convert-arith-to-llvm -convert-vector-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts %s | mlir-cpu-runner -O3 -e main -entry-point-result=void -shared-libs=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s +// RUN: mlir-opt -pass-pipeline="builtin.func(convert-linalg-to-loops,lower-affine,convert-scf-to-cf,convert-arith-to-llvm),convert-vector-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" %s | mlir-cpu-runner -O3 -e main -entry-point-result=void -shared-libs=%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s func @main() { %A = memref.alloc() : memref<16x16xf32> diff --git a/mlir/test/mlir-cpu-runner/unranked-memref.mlir b/mlir/test/mlir-cpu-runner/unranked-memref.mlir index 1ceffd2..2e2062c 100644 --- a/mlir/test/mlir-cpu-runner/unranked-memref.mlir +++ b/mlir/test/mlir-cpu-runner/unranked-memref.mlir @@ -1,10 +1,4 @@ -// RUN: mlir-opt %s -convert-linalg-to-loops \ -// RUN: -convert-scf-to-cf \ -// RUN: -convert-arith-to-llvm \ -// RUN: -convert-linalg-to-llvm \ -// RUN: -convert-memref-to-llvm \ -// RUN: -convert-func-to-llvm \ -// RUN: -reconcile-unrealized-casts | \ +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | \ // RUN: mlir-cpu-runner -e main -entry-point-result=void \ // RUN: -shared-libs=%mlir_runner_utils_dir/libmlir_runner_utils%shlibext,%mlir_runner_utils_dir/libmlir_c_runner_utils%shlibext | FileCheck %s diff --git a/mlir/test/mlir-cpu-runner/utils.mlir b/mlir/test/mlir-cpu-runner/utils.mlir index 5d39db9..461d057 100644 --- a/mlir/test/mlir-cpu-runner/utils.mlir +++ b/mlir/test/mlir-cpu-runner/utils.mlir @@ -1,7 +1,7 @@ -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-arith-to-llvm -convert-linalg-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e print_0d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-0D -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-arith-to-llvm -convert-linalg-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e print_1d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-1D -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-arith-to-llvm -convert-linalg-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e print_3d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-3D -// RUN: mlir-opt %s -convert-linalg-to-loops -convert-scf-to-cf -convert-arith-to-llvm -convert-linalg-to-llvm -convert-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts | mlir-cpu-runner -e vector_splat_2d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-VECTOR-SPLAT-2D +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e print_0d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-0D +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e print_1d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-1D +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e print_3d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-3D +// RUN: mlir-opt %s -pass-pipeline="builtin.func(convert-linalg-to-loops,convert-scf-to-cf,convert-arith-to-llvm),convert-linalg-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,reconcile-unrealized-casts" | mlir-cpu-runner -e vector_splat_2d -entry-point-result=void -shared-libs=%linalg_test_lib_dir/libmlir_runner_utils%shlibext | FileCheck %s --check-prefix=PRINT-VECTOR-SPLAT-2D func @print_0d() { %f = arith.constant 2.00000e+00 : f32 diff --git a/mlir/test/mlir-opt/async.mlir b/mlir/test/mlir-opt/async.mlir index e77cb83..5b0c11c 100644 --- a/mlir/test/mlir-opt/async.mlir +++ b/mlir/test/mlir-opt/async.mlir @@ -1,16 +1,6 @@ // Check if mlir marks the corresponding function with required coroutine attribute. // -// RUN: mlir-opt %s -async-to-async-runtime \ -// RUN: -async-runtime-ref-counting \ -// RUN: -async-runtime-ref-counting-opt \ -// RUN: -convert-async-to-llvm \ -// RUN: -convert-linalg-to-loops \ -// RUN: -convert-scf-to-cf \ -// RUN: -convert-linalg-to-llvm \ -// RUN: -convert-memref-to-llvm \ -// RUN: -convert-arith-to-llvm \ -// RUN: -convert-func-to-llvm \ -// RUN: -reconcile-unrealized-casts \ +// RUN: mlir-opt %s -pass-pipeline="async-to-async-runtime,builtin.func(async-runtime-ref-counting,async-runtime-ref-counting-opt),convert-async-to-llvm,builtin.func(convert-linalg-to-loops,convert-scf-to-cf),convert-linalg-to-llvm,convert-memref-to-llvm,builtin.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts" \ // RUN: | FileCheck %s // CHECK: llvm.func @async_execute_fn{{.*}}attributes{{.*}}"coroutine.presplit", "0" -- 2.7.4