From 0ea6154b2a077c4e16549e399b9f3f297aad1a64 Mon Sep 17 00:00:00 2001 From: Jacques Pienaar Date: Mon, 29 Apr 2019 09:24:09 -0700 Subject: [PATCH] Add Dialect in op definition to capture prefix and documentation. Enables specifying the documentation for dialect along with defining the ops of the dialect. The doc generator will be expanded in follow up to emit the documentation in the autogenerated files. This is precursor to allowing common base for all ops in a dialect. All the dialect documentation is super sparse and just added as placeholder. I was tempted (and started) to move ConstantOp to be generated too, but this will be easier post adding extra_methods, so deferring until then. -- PiperOrigin-RevId: 245759984 --- mlir/include/mlir/FxpMathOps/FxpMathOps.td | 6 +++- mlir/include/mlir/IR/OpBase.td | 20 ++++++++++- mlir/include/mlir/LLVMIR/LLVMOpBase.td | 10 ++++-- mlir/include/mlir/LLVMIR/LLVMOps.td | 2 +- mlir/include/mlir/LLVMIR/NVVMOps.td | 8 +++-- mlir/include/mlir/Linalg/LinalgOps.td | 32 ++++++++--------- mlir/include/mlir/Quantization/QuantOps.td | 6 +++- mlir/include/mlir/StandardOps/Ops.td | 42 ++++++++++++---------- mlir/include/mlir/TableGen/Operator.h | 2 +- mlir/lib/TableGen/Operator.cpp | 9 +++-- mlir/lib/TableGen/Pattern.cpp | 1 + mlir/test/mlir-tblgen/attr-enum.td | 12 +++++-- .../mlir-tblgen/directive-verifyUnusedValue.td | 10 ++++-- mlir/test/mlir-tblgen/op-attribute.td | 14 +++++--- mlir/test/mlir-tblgen/op-decl.td | 18 ++++++---- mlir/test/mlir-tblgen/op-operand.td | 16 ++++++--- mlir/test/mlir-tblgen/op-result.td | 28 +++++++++------ mlir/test/mlir-tblgen/pattern-NativeCodeCall.td | 10 ++++-- mlir/test/mlir-tblgen/pattern-allof-attr.td | 8 ++++- mlir/test/mlir-tblgen/pattern-attr.td | 8 ++++- mlir/test/mlir-tblgen/pattern-benefit.td | 14 +++++--- mlir/test/mlir-tblgen/pattern-bound-symbol.td | 14 +++++--- mlir/test/mlir-tblgen/pattern-multi-result-op.td | 14 +++++--- mlir/test/mlir-tblgen/pattern.td | 10 ++++-- mlir/test/mlir-tblgen/predicate.td | 24 ++++++++----- mlir/test/mlir-tblgen/reference-impl.td | 8 ++++- 26 files changed, 240 insertions(+), 106 deletions(-) diff --git a/mlir/include/mlir/FxpMathOps/FxpMathOps.td b/mlir/include/mlir/FxpMathOps/FxpMathOps.td index c611111..708b17c 100644 --- a/mlir/include/mlir/FxpMathOps/FxpMathOps.td +++ b/mlir/include/mlir/FxpMathOps/FxpMathOps.td @@ -30,6 +30,10 @@ include "mlir/IR/OpBase.td" include "mlir/Quantization/QuantPredicates.td" +def fxpmath_Dialect : Dialect { + let name = "fxpmath"; +} + //===----------------------------------------------------------------------===// // Attributes //===----------------------------------------------------------------------===// @@ -86,7 +90,7 @@ def fxpmath_CompareFnAttr : EnumAttr<"ComparisonFn", //===----------------------------------------------------------------------===// class fxpmath_Op traits> : - Op; + Op; //===----------------------------------------------------------------------===// // Fixed-point (fxp) arithmetic ops used by kernels. diff --git a/mlir/include/mlir/IR/OpBase.td b/mlir/include/mlir/IR/OpBase.td index 300b86e..5f6dcd0 100644 --- a/mlir/include/mlir/IR/OpBase.td +++ b/mlir/include/mlir/IR/OpBase.td @@ -193,6 +193,21 @@ def IsStaticShapeTensorTypePred : CPred<"$_self.cast().hasStaticShape()">; //===----------------------------------------------------------------------===// +// Dialect definitions +//===----------------------------------------------------------------------===// + +class Dialect { + // The name of the dialect. + string name = ?; + + // Short summary of the dialect. + string summary = ?; + + // The description of the dialect. + string description = ?; +} + +//===----------------------------------------------------------------------===// // Type definitions //===----------------------------------------------------------------------===// @@ -808,7 +823,10 @@ class OpBuilder { } // Base class for all ops. -class Op props = []> { +class Op props = []> { + // The dialect of the op. + Dialect opDialect = dialect; + // The mnemonic of the op. string opName = mnemonic; diff --git a/mlir/include/mlir/LLVMIR/LLVMOpBase.td b/mlir/include/mlir/LLVMIR/LLVMOpBase.td index 2a59527..f609a30 100644 --- a/mlir/include/mlir/LLVMIR/LLVMOpBase.td +++ b/mlir/include/mlir/LLVMIR/LLVMOpBase.td @@ -29,14 +29,18 @@ include "mlir/IR/OpBase.td" #endif // OP_BASE +def LLVM_Dialect : Dialect { + let name = "llvm"; +} + // LLVM IR type wrapped in MLIR. def LLVM_Type : Type()">, "LLVM dialect type">; // Base class for LLVM operations. Defines the interface to the llvm::IRBuilder // used to translate to LLVM IR proper. -class LLVM_OpBase traits = []> : - Op { +class LLVM_OpBase traits = []> : + Op { // A pattern for constructing the LLVM IR Instruction (or other Value) that // corresponds to this op. This pattern can use `builder` to refer to an // `llvm::IRBuilder<>` instance, $-names of arguments and results and the @@ -51,4 +55,4 @@ class LLVM_OpBase traits = []> : string llvmBuilder = ""; } -#endif // LLVMIR_OP_BASE \ No newline at end of file +#endif // LLVMIR_OP_BASE diff --git a/mlir/include/mlir/LLVMIR/LLVMOps.td b/mlir/include/mlir/LLVMIR/LLVMOps.td index 56e6771..0b3968f 100644 --- a/mlir/include/mlir/LLVMIR/LLVMOps.td +++ b/mlir/include/mlir/LLVMIR/LLVMOps.td @@ -30,7 +30,7 @@ include "mlir/LLVMIR/LLVMOpBase.td" // this class is specialized below for both cases and should not be used // directly. class LLVM_Op traits = []> : - LLVM_OpBase { + LLVM_OpBase { } class LLVM_Builder { diff --git a/mlir/include/mlir/LLVMIR/NVVMOps.td b/mlir/include/mlir/LLVMIR/NVVMOps.td index e020e42..8b6e2b8 100644 --- a/mlir/include/mlir/LLVMIR/NVVMOps.td +++ b/mlir/include/mlir/LLVMIR/NVVMOps.td @@ -25,8 +25,12 @@ include "mlir/LLVMIR/LLVMOpBase.td" +def NVVM_Dialect : Dialect { + let name = "nvvm"; +} + class NVVM_Op traits = []> : - LLVM_OpBase { + LLVM_OpBase { } class NVVM_SpecialRegisterOp; def NVVM_BlockDimYOp : NVVM_SpecialRegisterOp<"read.ptx.sreg.nctaid.y">; def NVVM_BlockDimZOp : NVVM_SpecialRegisterOp<"read.ptx.sreg.nctaid.z">; -#endif // NVVMIR_OPS \ No newline at end of file +#endif // NVVMIR_OPS diff --git a/mlir/include/mlir/Linalg/LinalgOps.td b/mlir/include/mlir/Linalg/LinalgOps.td index 4198f91..d6673f2 100644 --- a/mlir/include/mlir/Linalg/LinalgOps.td +++ b/mlir/include/mlir/Linalg/LinalgOps.td @@ -27,13 +27,16 @@ include "mlir/IR/OpBase.td" #endif // OP_BASE +def Linalg_Dialect : Dialect { + let name = "linalg"; +} + // Whether a type is a ViewType. def LinalgIsViewTypePred : CPred<"$_self.isa()">; def View : Type; class ParametricNativeOpTrait : - NativeOpTrait -{} + NativeOpTrait; class ParametricIntNativeOpTrait parameters> : ParametricNativeOpTrait< @@ -45,35 +48,32 @@ class ParametricIntNativeOpTrait parameters> : sum, param, sum # "," # !cast(param)), - ">::Impl")> -{} + ">::Impl")>; // The Linalg `NInputsAndOutputs` trait provides the API for ops that are known // to have a specified number of inputs and outputs, all passed as operands. // See Linalg/LinalgTraits.h for implementation details an usage. class NInputsAndOutputs : - ParametricIntNativeOpTrait<"NInputsAndOutputs", [n_ins, n_outs]> -{} + ParametricIntNativeOpTrait<"NInputsAndOutputs", [n_ins, n_outs]>; // The linalg `NLoopTypes` trait provides the API for ops that are known to have // a specified number of parallel (n_par), reduction (n_red) and window (n_win) // loops. // See Linalg/LinalgTraits.h for implementation details an usage. class NLoopTypes : -ParametricIntNativeOpTrait<"NLoopTypes", [n_par, n_red, n_win]> -{} + ParametricIntNativeOpTrait<"NLoopTypes", [n_par, n_red, n_win]>; // The linalg `ViewRanks` trait the API for ops that are known to have a // specified list of view ranks. // See Linalg/LinalgTraits.h for implementation details an usage. class ViewRanks ranks> : -ParametricIntNativeOpTrait<"ViewRanks", ranks> -{} + ParametricIntNativeOpTrait<"ViewRanks", ranks>; // Base Tablegen class for Linalg ops. class LinalgOp props> : -Op { - let arguments = (ins Variadic); // default variadic builder + Op { + // The default variadic builder. + let arguments = (ins Variadic); let parser = [{ return impl::parseLinalgLibraryOp(parser, result); }]; @@ -85,12 +85,12 @@ Op { //////////////////////////////////////////////////////////////////////////////// def DotOp : LinalgOp<"dot", [NInputsAndOutputs<2, 1>, NLoopTypes<0, 1, 0>, - ViewRanks<[1, 1, 0]>]> {} + ViewRanks<[1, 1, 0]>]>; def MatvecOp : LinalgOp<"matvec", [NInputsAndOutputs<2, 1>, NLoopTypes<1, 1, 0>, - ViewRanks<[2, 1, 1]>]> {} + ViewRanks<[2, 1, 1]>]>; def MatmulOp : LinalgOp<"matmul", [NInputsAndOutputs<2, 1>, NLoopTypes<2, 1, 0>, - ViewRanks<[2, 2, 2]>]> {} + ViewRanks<[2, 2, 2]>]>; -#endif // LINALG_OPS \ No newline at end of file +#endif // LINALG_OPS diff --git a/mlir/include/mlir/Quantization/QuantOps.td b/mlir/include/mlir/Quantization/QuantOps.td index ec394757..225f45b 100644 --- a/mlir/include/mlir/Quantization/QuantOps.td +++ b/mlir/include/mlir/Quantization/QuantOps.td @@ -28,12 +28,16 @@ include "mlir/IR/OpBase.td" include "mlir/Quantization/QuantPredicates.td" #endif // OP_BASE +def quant_Dialect : Dialect { + let name = "quant"; +} + //===----------------------------------------------------------------------===// // Base classes //===----------------------------------------------------------------------===// class quant_Op traits> : - Op; + Op; //===----------------------------------------------------------------------===// // Quantization casts diff --git a/mlir/include/mlir/StandardOps/Ops.td b/mlir/include/mlir/StandardOps/Ops.td index 0b78e14..c83790c 100644 --- a/mlir/include/mlir/StandardOps/Ops.td +++ b/mlir/include/mlir/StandardOps/Ops.td @@ -28,14 +28,18 @@ include "mlir/IR/OpBase.td" #endif // OP_BASE +def Standard_Dialect : Dialect { + let name = "std"; +} + // Base class for standard arithmetic operations. Requires operands and // results to be of the same type, but does not constrain them to specific // types. Individual classes will have `lhs` and `rhs` accessor to operands. class ArithmeticOp traits = []> : - Op, - Results<(outs AnyType)> { + Op { - let opName = mnemonic; + let results = (outs AnyType); let parser = [{ return impl::parseBinaryOp(parser, result); @@ -69,84 +73,84 @@ class FloatArithmeticOp traits = []> : ArithmeticOp, Arguments<(ins FloatLike:$lhs, FloatLike:$rhs)>; -def AddFOp : FloatArithmeticOp<"std.addf"> { +def AddFOp : FloatArithmeticOp<"addf"> { let summary = "floating point addition operation"; let hasConstantFolder = 0b1; } -def AddIOp : IntArithmeticOp<"std.addi", [Commutative]> { +def AddIOp : IntArithmeticOp<"addi", [Commutative]> { let summary = "integer addition operation"; let hasFolder = 1; let hasConstantFolder = 0b1; } -def AndOp : IntArithmeticOp<"std.and", [Commutative]> { +def AndOp : IntArithmeticOp<"and", [Commutative]> { let summary = "integer binary and"; let hasConstantFolder = 0b1; let hasFolder = 1; } -def DivFOp : FloatArithmeticOp<"std.divf"> { +def DivFOp : FloatArithmeticOp<"divf"> { let summary = "floating point division operation"; } -def DivISOp : IntArithmeticOp<"std.divis"> { +def DivISOp : IntArithmeticOp<"divis"> { let summary = "signed integer division operation"; let hasConstantFolder = 0b1; } -def DivIUOp : IntArithmeticOp<"std.diviu"> { +def DivIUOp : IntArithmeticOp<"diviu"> { let summary = "unsigned integer division operation"; let hasConstantFolder = 0b1; } -def MulFOp : FloatArithmeticOp<"std.mulf"> { +def MulFOp : FloatArithmeticOp<"mulf"> { let summary = "foating point multiplication operation"; let hasConstantFolder = 0b1; } -def MulIOp : IntArithmeticOp<"std.muli", [Commutative]> { +def MulIOp : IntArithmeticOp<"muli", [Commutative]> { let summary = "integer multiplication operation"; let hasConstantFolder = 0b1; let hasFolder = 1; } -def OrOp : IntArithmeticOp<"std.or", [Commutative]> { +def OrOp : IntArithmeticOp<"or", [Commutative]> { let summary = "integer binary or"; let hasConstantFolder = 0b1; let hasFolder = 1; } -def RemFOp : FloatArithmeticOp<"std.remf"> { +def RemFOp : FloatArithmeticOp<"remf"> { let summary = "floating point division remainder operation"; } -def RemISOp : IntArithmeticOp<"std.remis"> { +def RemISOp : IntArithmeticOp<"remis"> { let summary = "signed integer division remainder operation"; let hasConstantFolder = 0b1; } -def RemIUOp : IntArithmeticOp<"std.remiu"> { +def RemIUOp : IntArithmeticOp<"remiu"> { let summary = "unsigned integer division remainder operation"; let hasConstantFolder = 0b1; } -def ShlISOp : IntArithmeticOp<"std.shlis"> { +def ShlISOp : IntArithmeticOp<"shlis"> { let summary = "signed integer shift left"; } -def SubFOp : FloatArithmeticOp<"std.subf"> { +def SubFOp : FloatArithmeticOp<"subf"> { let summary = "floating point subtraction operation"; let hasConstantFolder = 0b1; } -def SubIOp : IntArithmeticOp<"std.subi"> { +def SubIOp : IntArithmeticOp<"subi"> { let summary = "integer subtraction operation"; let hasConstantFolder = 0b1; let hasCanonicalizer = 0b1; } -def XOrOp : IntArithmeticOp<"std.xor", [Commutative]> { +def XOrOp : IntArithmeticOp<"xor", [Commutative]> { let summary = "integer binary xor"; let hasConstantFolder = 0b1; let hasCanonicalizer = 0b1; diff --git a/mlir/include/mlir/TableGen/Operator.h b/mlir/include/mlir/TableGen/Operator.h index e96b29c..f350a17 100644 --- a/mlir/include/mlir/TableGen/Operator.h +++ b/mlir/include/mlir/TableGen/Operator.h @@ -51,7 +51,7 @@ public: explicit Operator(const llvm::Record *def) : Operator(*def) {} // Returns the operation name. - StringRef getOperationName() const; + std::string getOperationName() const; // Returns this op's dialect name. StringRef getDialectName() const; diff --git a/mlir/lib/TableGen/Operator.cpp b/mlir/lib/TableGen/Operator.cpp index 3854728..f624718 100644 --- a/mlir/lib/TableGen/Operator.cpp +++ b/mlir/lib/TableGen/Operator.cpp @@ -46,8 +46,13 @@ tblgen::Operator::Operator(const llvm::Record &def) : def(def) { populateOpStructure(); } -StringRef tblgen::Operator::getOperationName() const { - return def.getValueAsString("opName"); +std::string tblgen::Operator::getOperationName() const { + auto *dialect = def.getValueAsDef("opDialect"); + assert(dialect && "op defined without dialect"); + auto prefix = dialect->getValueAsString("name"); + if (prefix.empty()) + return def.getValueAsString("opName"); + return llvm::formatv("{0}.{1}", prefix, def.getValueAsString("opName")); } StringRef tblgen::Operator::getDialectName() const { return dialectName; } diff --git a/mlir/lib/TableGen/Pattern.cpp b/mlir/lib/TableGen/Pattern.cpp index 420f9d2..ff0150d 100644 --- a/mlir/lib/TableGen/Pattern.cpp +++ b/mlir/lib/TableGen/Pattern.cpp @@ -28,6 +28,7 @@ using namespace mlir; +using llvm::formatv; using mlir::tblgen::Operator; bool tblgen::DagLeaf::isUnspecified() const { diff --git a/mlir/test/mlir-tblgen/attr-enum.td b/mlir/test/mlir-tblgen/attr-enum.td index 8cea34a..3347165 100644 --- a/mlir/test/mlir-tblgen/attr-enum.td +++ b/mlir/test/mlir-tblgen/attr-enum.td @@ -9,7 +9,13 @@ def NS_SomeEnum_B : EnumAttrCase<"B">; def NS_SomeEnum : EnumAttr< "SomeEnum", "some enum", [NS_SomeEnum_A, NS_SomeEnum_B]>; -def NS_OpA : Op<"op_a_with_enum_attr", []> { +def Test_Dialect : Dialect { + let name = "test"; +} +class NS_Op traits> : + Op; + +def NS_OpA : NS_Op<"op_a_with_enum_attr", []> { let arguments = (ins NS_SomeEnum:$attr); let results = (outs I32:$result); } @@ -26,7 +32,7 @@ def NS_OpA : Op<"op_a_with_enum_attr", []> { // DEF: if (!(((tblgen_attr.isa())) && (((tblgen_attr.cast().getValue() == "A")) || ((tblgen_attr.cast().getValue() == "B"))))) // DEF-SAME: return emitOpError("attribute 'attr' failed to satisfy constraint: some enum"); -def NS_OpB : Op<"op_b_with_enum_attr", []> { +def NS_OpB : NS_Op<"op_b_with_enum_attr", []> { let arguments = (ins NS_SomeEnum:$attr); let results = (outs I32:$result); } @@ -50,7 +56,7 @@ def : Pat<(NS_OpA NS_SomeEnum_A:$attr), (NS_OpB NS_SomeEnum_B)>; def NS_SomeEnum_Array : TypedArrayAttrBase; -def NS_OpC : Op<"op_b_with_enum_array_attr", []> { +def NS_OpC : NS_Op<"op_b_with_enum_array_attr", []> { let arguments = (ins NS_SomeEnum_Array:$attr); let results = (outs I32:$result); } diff --git a/mlir/test/mlir-tblgen/directive-verifyUnusedValue.td b/mlir/test/mlir-tblgen/directive-verifyUnusedValue.td index 94143df..1ed1e29 100644 --- a/mlir/test/mlir-tblgen/directive-verifyUnusedValue.td +++ b/mlir/test/mlir-tblgen/directive-verifyUnusedValue.td @@ -2,12 +2,18 @@ include "mlir/IR/OpBase.td" -def ThreeResultOp : Op<"three_result_op", []> { +def Test_Dialect : Dialect { + let name = "test"; +} +class NS_Op traits> : + Op; + +def ThreeResultOp : NS_Op<"three_result_op", []> { let arguments = (ins I32:$input); let results = (outs I32:$r1, I32:$r2, I32:$r3); } -def OneResultOp : Op<"one_result_op", []> { +def OneResultOp : NS_Op<"one_result_op", []> { let arguments = (ins I32:$input); let results = (outs I32:$r1); } diff --git a/mlir/test/mlir-tblgen/op-attribute.td b/mlir/test/mlir-tblgen/op-attribute.td index 32b9b6c..e1b4060 100644 --- a/mlir/test/mlir-tblgen/op-attribute.td +++ b/mlir/test/mlir-tblgen/op-attribute.td @@ -2,6 +2,12 @@ include "mlir/IR/OpBase.td" +def Test_Dialect : Dialect { + let name = "test"; +} +class NS_Op traits> : + Op; + def SomeAttr : Attr, "some attribute kind"> { let storageType = "some-attr-kind"; let returnType = "some-return-type"; @@ -12,7 +18,7 @@ def SomeAttr : Attr, "some attribute kind"> { // Test required, optional, default-valued attributes // --- -def AOp : Op<"a_op", []> { +def AOp : NS_Op<"a_op", []> { let arguments = (ins SomeAttr:$aAttr, DefaultValuedAttr:$bAttr, @@ -72,7 +78,7 @@ def SomeTypeAttr : TypeAttrBase<"SomeType", "some type attribute">; // Test common attribute kinds' constraints // --- -def BOp : Op<"b_op", []> { +def BOp : NS_Op<"b_op", []> { let arguments = (ins AnyAttr:$any_attr, BoolAttr:$bool_attr, @@ -106,7 +112,7 @@ def BOp : Op<"b_op", []> { // Test building constant values for array attribute kinds // --- -def COp : Op<"c_op", []> { +def COp : NS_Op<"c_op", []> { let arguments = (ins DefaultValuedAttr:$i32_array_attr, DefaultValuedAttr:$i64_array_attr, @@ -126,7 +132,7 @@ def COp : Op<"c_op", []> { // Test mixing operands and attributes in arbitrary order // --- -def MixOperandsAndAttrs : Op<"mix_operands_and_attrs", []> { +def MixOperandsAndAttrs : NS_Op<"mix_operands_and_attrs", []> { let arguments = (ins F32Attr:$attr, F32:$operand, F32Attr:$otherAttr, F32:$otherArg); } diff --git a/mlir/test/mlir-tblgen/op-decl.td b/mlir/test/mlir-tblgen/op-decl.td index ec4d843..5d98d6c 100644 --- a/mlir/test/mlir-tblgen/op-decl.td +++ b/mlir/test/mlir-tblgen/op-decl.td @@ -2,7 +2,13 @@ include "mlir/IR/OpBase.td" -def NS_AOp : Op<"a_op", [NoSideEffect]> { +def Test_Dialect : Dialect { + let name = "test"; +} +class NS_Op traits> : + Op; + +def NS_AOp : NS_Op<"a_op", [NoSideEffect]> { let arguments = (ins I32:$a, Variadic:$b, @@ -51,21 +57,21 @@ def NS_AOp : Op<"a_op", [NoSideEffect]> { // Check op trait for different number of operands // --- -def NS_BOp : Op<"op_with_no_operand", []> { +def NS_BOp : NS_Op<"op_with_no_operand", []> { let arguments = (ins); } // CHECK-LABEL: NS::BOp declarations // CHECK: OpTrait::NOperands<0>::Impl -def NS_COp : Op<"op_with_one_operand", []> { +def NS_COp : NS_Op<"op_with_one_operand", []> { let arguments = (ins I32:$operand); } // CHECK-LABEL: NS::COp declarations // CHECK: OpTrait::NOperands<1>::Impl -def NS_DOp : Op<"op_with_two_operands", []> { +def NS_DOp : NS_Op<"op_with_two_operands", []> { let arguments = (ins I32:$input1, I32:$input2); } @@ -75,12 +81,12 @@ def NS_DOp : Op<"op_with_two_operands", []> { // Check leading underscore in op name // --- -def NS__AOp : Op<"_op_with_leading_underscore", []>; +def NS__AOp : NS_Op<"_op_with_leading_underscore", []>; // CHECK-LABEL: NS::_AOp declarations // CHECK: class _AOp : public Op<_AOp -def _BOp : Op<"_op_with_leading_underscore_and_no_namespace", []>; +def _BOp : NS_Op<"_op_with_leading_underscore_and_no_namespace", []>; // CHECK-LABEL: _BOp declarations // CHECK: class _BOp : public Op<_BOp diff --git a/mlir/test/mlir-tblgen/op-operand.td b/mlir/test/mlir-tblgen/op-operand.td index 936fb7c..6416905 100644 --- a/mlir/test/mlir-tblgen/op-operand.td +++ b/mlir/test/mlir-tblgen/op-operand.td @@ -2,7 +2,13 @@ include "mlir/IR/OpBase.td" -def OpA : Op<"one_normal_operand_op", []> { +def Test_Dialect : Dialect { + let name = "test"; +} +class NS_Op traits> : + Op; + +def OpA : NS_Op<"one_normal_operand_op", []> { let arguments = (ins I32:$input); } @@ -21,7 +27,7 @@ def OpA : Op<"one_normal_operand_op", []> { // CHECK: if (!((this->getOperation()->getOperand(0)->getType().isInteger(32)))) // CHECK-NEXT: return emitOpError("operand #0 must be 32-bit integer"); -def OpB : Op<"one_variadic_operand_op", []> { +def OpB : NS_Op<"one_variadic_operand_op", []> { let arguments = (ins Variadic:$input); } @@ -30,7 +36,7 @@ def OpB : Op<"one_variadic_operand_op", []> { // CHECK-NOT: assert // CHECK: tblgen_state->addOperands(input); -def OpC : Op<"all_variadic_inputs_op", [SameVariadicOperandSize]> { +def OpC : NS_Op<"all_variadic_inputs_op", [SameVariadicOperandSize]> { let arguments = (ins Variadic:$input1, Variadic:$input2); } @@ -48,7 +54,7 @@ def OpC : Op<"all_variadic_inputs_op", [SameVariadicOperandSize]> { // CHECK-NEXT: tblgen_state->addOperands(input1); // CHECK-NEXT: tblgen_state->addOperands(input2); -def OpD : Op<"mix_variadic_and_normal_inputs_op", [SameVariadicOperandSize]> { +def OpD : NS_Op<"mix_variadic_and_normal_inputs_op", [SameVariadicOperandSize]> { let arguments = (ins Variadic:$input1, Tensor:$input2, Variadic:$input3); } @@ -72,7 +78,7 @@ def OpD : Op<"mix_variadic_and_normal_inputs_op", [SameVariadicOperandSize]> { // CHECK-NEXT: tblgen_state->operands.push_back(input2); // CHECK-NEXT: tblgen_state->addOperands(input3); -def OpE : Op<"one_variadic_among_multi_normal_inputs_op", []> { +def OpE : NS_Op<"one_variadic_among_multi_normal_inputs_op", []> { let arguments = (ins Tensor:$input1, Tensor:$input2, Variadic:$input3, Tensor:$input4, Tensor:$input5); } diff --git a/mlir/test/mlir-tblgen/op-result.td b/mlir/test/mlir-tblgen/op-result.td index 714c62c..1adfef8 100644 --- a/mlir/test/mlir-tblgen/op-result.td +++ b/mlir/test/mlir-tblgen/op-result.td @@ -2,7 +2,13 @@ include "mlir/IR/OpBase.td" -def OpA : Op<"one_normal_result_op", []> { +def Test_Dialect : Dialect { + let name = "test"; +} +class NS_Op traits> : + Op; + +def OpA : NS_Op<"one_normal_result_op", []> { let results = (outs I32:$result); } @@ -18,7 +24,7 @@ def OpA : Op<"one_normal_result_op", []> { // CHECK: if (!((this->getOperation()->getResult(0)->getType().isInteger(32)))) // CHECK-NEXT: return emitOpError("result #0 must be 32-bit integer"); -def OpB : Op<"same_input_output_type_op", [SameValueType]> { +def OpB : NS_Op<"same_input_output_type_op", [SameValueType]> { let arguments = (ins I32:$x); let results = (outs I32:$y); } @@ -29,7 +35,7 @@ def OpB : Op<"same_input_output_type_op", [SameValueType]> { // CHECK: void OpB::build(Builder *, OperationState *tblgen_state, Value *x) // CHECK: tblgen_state->addTypes({x->getType()}); -def OpC : Op<"three_normal_result_op", []> { +def OpC : NS_Op<"three_normal_result_op", []> { let results = (outs I32:$x, /*unnamed*/I32, I32:$z); } @@ -40,7 +46,7 @@ def OpC : Op<"three_normal_result_op", []> { // CHECK-NEXT: tblgen_state->types.push_back(z) def IntegerTypeAttr : TypeAttrBase<"IntegerType", "Integer type attribute">; -def OpD : Op<"type_attr_as_result_type", [FirstAttrDerivedResultType]> { +def OpD : NS_Op<"type_attr_as_result_type", [FirstAttrDerivedResultType]> { let arguments = (ins I32:$x, IntegerTypeAttr:$attr, F32Attr:$f32); let results = (outs Tensor:$y); } @@ -49,7 +55,7 @@ def OpD : Op<"type_attr_as_result_type", [FirstAttrDerivedResultType]> { // CHECK: void OpD::build(Builder *, OperationState *tblgen_state, Value *x, TypeAttr attr, FloatAttr f32) // CHECK: tblgen_state->addTypes({attr.getValue()}); -def OpE : Op<"value_attr_as_result_type", [FirstAttrDerivedResultType]> { +def OpE : NS_Op<"value_attr_as_result_type", [FirstAttrDerivedResultType]> { let arguments = (ins I32:$x, F32Attr:$attr); let results = (outs Tensor:$y); } @@ -58,7 +64,7 @@ def OpE : Op<"value_attr_as_result_type", [FirstAttrDerivedResultType]> { // CHECK: void OpE::build(Builder *, OperationState *tblgen_state, Value *x, FloatAttr attr) // CHECK: tblgen_state->addTypes({attr.getType()}); -def OpF : Op<"one_variadic_result_op", []> { +def OpF : NS_Op<"one_variadic_result_op", []> { let results = (outs Variadic:$x); } @@ -70,7 +76,7 @@ def OpF : Op<"one_variadic_result_op", []> { // CHECK-NOT: assert // CHECK: tblgen_state->addTypes(x); -def OpG : Op<"one_normal_and_one_variadic_result_op", []> { +def OpG : NS_Op<"one_normal_and_one_variadic_result_op", []> { let results = (outs I32:$x, Variadic:$y); } @@ -87,7 +93,7 @@ def OpG : Op<"one_normal_and_one_variadic_result_op", []> { // CHECK-NEXT: tblgen_state->addTypes(resultTypes); -def OpH : Op<"all_variadic_results_op", [SameVariadicResultSize]> { +def OpH : NS_Op<"all_variadic_results_op", [SameVariadicResultSize]> { let results = (outs Variadic:$output1, Variadic:$output2); } @@ -106,7 +112,7 @@ def OpH : Op<"all_variadic_results_op", [SameVariadicResultSize]> { // CHECK-NEXT: tblgen_state->addTypes(output1); // CHECK-NEXT: tblgen_state->addTypes(output2); -def OpI : Op<"mix_variadic_and_normal_results_op", [SameVariadicResultSize]> { +def OpI : NS_Op<"mix_variadic_and_normal_results_op", [SameVariadicResultSize]> { let results = (outs Variadic:$output1, Tensor:$output2, Variadic:$output3); } @@ -130,7 +136,7 @@ def OpI : Op<"mix_variadic_and_normal_results_op", [SameVariadicResultSize]> { // CHECK-NEXT: tblgen_state->types.push_back(output2); // CHECK-NEXT: tblgen_state->addTypes(output3); -def OpJ : Op<"one_variadic_among_multi_normal_results_op", []> { +def OpJ : NS_Op<"one_variadic_among_multi_normal_results_op", []> { let results = (outs Tensor:$output1, Tensor:$output2, Variadic:$output3, Tensor:$output4, Tensor:$output5); } @@ -152,7 +158,7 @@ def OpJ : Op<"one_variadic_among_multi_normal_results_op", []> { // Test that if the only operand is variadic, we acess the first value in the // pack to set result type // --- -def OpK : Op<"only_input_is_variadic_with_same_value_type_op", [SameValueType]> { +def OpK : NS_Op<"only_input_is_variadic_with_same_value_type_op", [SameValueType]> { let arguments = (ins Variadic:$input); let results = (outs Tensor:$result); } diff --git a/mlir/test/mlir-tblgen/pattern-NativeCodeCall.td b/mlir/test/mlir-tblgen/pattern-NativeCodeCall.td index 317284d..f034f22 100644 --- a/mlir/test/mlir-tblgen/pattern-NativeCodeCall.td +++ b/mlir/test/mlir-tblgen/pattern-NativeCodeCall.td @@ -2,16 +2,22 @@ include "mlir/IR/OpBase.td" +def Test_Dialect : Dialect { + let name = "test"; +} +class NS_Op traits> : + Op; + def CreateOperand : NativeCodeCall<"buildOperand($0, $1)">; def CreateArrayAttr : NativeCodeCall<"$_builder.getArrayAttr({$0, $1})">; def CreateOpResult : NativeCodeCall<"buildOp($0, $1)">; -def NS_AOp : Op<"a_op", []> { +def NS_AOp : NS_Op<"a_op", []> { let arguments = (ins I32:$input1, I32:$input2, I32Attr:$attr); let results = (outs I32:$output); } -def NS_BOp : Op<"b_op", []> { +def NS_BOp : NS_Op<"b_op", []> { let arguments = (ins I32:$input, I32Attr:$attr); let results = (outs I32:$output); } diff --git a/mlir/test/mlir-tblgen/pattern-allof-attr.td b/mlir/test/mlir-tblgen/pattern-allof-attr.td index e6f3799..38f73ff 100644 --- a/mlir/test/mlir-tblgen/pattern-allof-attr.td +++ b/mlir/test/mlir-tblgen/pattern-allof-attr.td @@ -2,6 +2,12 @@ include "mlir/IR/OpBase.td" +def Test_Dialect : Dialect { + let name = "test"; +} +class NS_Op traits> : + Op; + def FirstConstraint : AttrConstraint, "first constraint">; def SecondConstraint : AttrConstraint, @@ -9,7 +15,7 @@ def SecondConstraint : AttrConstraint, def ThirdConstraint : AttrConstraint, "third constraint">; -def OpA : Op<"op_a", []> { +def OpA : NS_Op<"op_a", []> { let arguments = (ins I32Attr:$attr ); diff --git a/mlir/test/mlir-tblgen/pattern-attr.td b/mlir/test/mlir-tblgen/pattern-attr.td index 1790850..b767ef8 100644 --- a/mlir/test/mlir-tblgen/pattern-attr.td +++ b/mlir/test/mlir-tblgen/pattern-attr.td @@ -2,9 +2,15 @@ include "mlir/IR/OpBase.td" +def Test_Dialect : Dialect { + let name = "test"; +} +class NS_Op traits> : + Op; + def MoreConstraint : AttrConstraint, "more constraint">; -def OpA : Op<"op_a", []> { +def OpA : NS_Op<"op_a", []> { let arguments = (ins I32Attr:$required_attr, OptionalAttr:$optional_attr, diff --git a/mlir/test/mlir-tblgen/pattern-benefit.td b/mlir/test/mlir-tblgen/pattern-benefit.td index e72684b..dfe9e67 100644 --- a/mlir/test/mlir-tblgen/pattern-benefit.td +++ b/mlir/test/mlir-tblgen/pattern-benefit.td @@ -4,15 +4,21 @@ include "mlir/IR/OpBase.td" def IfEqual : Constraint">>; +def Test_Dialect : Dialect { + let name = "x"; +} +class NS_Op traits = []> : + Op; + // Define ops to rewrite. def U: Type, "U">; -def X_AddOp : Op<"x.add"> { +def X_AddOp : NS_Op<"add"> { let arguments = (ins U, U); } -def Y_AddOp : Op<"y.add"> { +def Y_AddOp : NS_Op<"add"> { let arguments = (ins U, U, U); } -def Z_AddOp : Op<"z.add"> { +def Z_AddOp : NS_Op<"add"> { let arguments = (ins U); } @@ -25,4 +31,4 @@ def : Pat<(X_AddOp (X_AddOp $lhs, $rhs), $rhs), (Y_AddOp $lhs, $rhs, $rhs)>; def : Pat<(X_AddOp $lhs, $rhs), (Z_AddOp $lhs), [(IfEqual $lhs, $rhs)], (addBenefit 100)>; // CHECK-LABEL: struct GeneratedConvert1 -// CHECK: GeneratedConvert1(MLIRContext *context) : RewritePattern("x.add", 101, context) {} \ No newline at end of file +// CHECK: GeneratedConvert1(MLIRContext *context) : RewritePattern("x.add", 101, context) {} diff --git a/mlir/test/mlir-tblgen/pattern-bound-symbol.td b/mlir/test/mlir-tblgen/pattern-bound-symbol.td index 805a032..bde8b93 100644 --- a/mlir/test/mlir-tblgen/pattern-bound-symbol.td +++ b/mlir/test/mlir-tblgen/pattern-bound-symbol.td @@ -2,22 +2,28 @@ include "mlir/IR/OpBase.td" -def OpA : Op<"op_a", []> { +def Test_Dialect : Dialect { + let name = "test"; +} +class NS_Op traits> : + Op; + +def OpA : NS_Op<"op_a", []> { let arguments = (ins I32:$operand, I32Attr:$attr); let results = (outs I32:$result); } -def OpB : Op<"op_b", []> { +def OpB : NS_Op<"op_b", []> { let arguments = (ins I32:$operand); let results = (outs I32:$result); } -def OpC : Op<"op_c", []> { +def OpC : NS_Op<"op_c", []> { let arguments = (ins I32:$operand); let results = (outs I32:$result); } -def OpD : Op<"op_d", []> { +def OpD : NS_Op<"op_d", []> { let arguments = (ins I32:$input1, I32:$input2, I32:$input3, I32Attr:$attr); let results = (outs I32:$result); } diff --git a/mlir/test/mlir-tblgen/pattern-multi-result-op.td b/mlir/test/mlir-tblgen/pattern-multi-result-op.td index c57cd9a..8acfeb8 100644 --- a/mlir/test/mlir-tblgen/pattern-multi-result-op.td +++ b/mlir/test/mlir-tblgen/pattern-multi-result-op.td @@ -2,17 +2,23 @@ include "mlir/IR/OpBase.td" -def ThreeResultOp : Op<"three_result_op", []> { +def Test_Dialect : Dialect { + let name = "test"; +} +class NS_Op traits> : + Op; + +def ThreeResultOp : NS_Op<"three_result_op", []> { let arguments = (ins I32:$input); let results = (outs I32:$r1, I32:$r2, I32:$r3); } -def TwoResultOp : Op<"two_result_op", []> { +def TwoResultOp : NS_Op<"two_result_op", []> { let arguments = (ins I32:$input); let results = (outs I32:$r1, I32:$r2); } -def OneResultOp : Op<"one_result_op", []> { +def OneResultOp : NS_Op<"one_result_op", []> { let arguments = (ins I32:$input); let results = (outs I32:$r1); } @@ -54,7 +60,7 @@ def : Pattern<(ThreeResultOp $input), [ // Test more result patterns than needed for replacement // --- -def AdditionalOp : Op<"additional_one_result_op", []> { +def AdditionalOp : NS_Op<"additional_one_result_op", []> { let arguments = (ins I32:$input); let results = (outs I32:$r1); } diff --git a/mlir/test/mlir-tblgen/pattern.td b/mlir/test/mlir-tblgen/pattern.td index 1ca9643..bb5055a 100644 --- a/mlir/test/mlir-tblgen/pattern.td +++ b/mlir/test/mlir-tblgen/pattern.td @@ -2,12 +2,18 @@ include "mlir/IR/OpBase.td" -def OpA : Op<"op_a", []> { +def Test_Dialect : Dialect { + let name = ""; +} +class NS_Op traits> : + Op; + +def OpA : NS_Op<"op_a", []> { let arguments = (ins I32:$operand, I32Attr:$attr); let results = (outs I32:$result); } -def OpB : Op<"op_b", []> { +def OpB : NS_Op<"op_b", []> { let arguments = (ins I32:$operand, I32Attr:$attr); let results = (outs I32:$result); } diff --git a/mlir/test/mlir-tblgen/predicate.td b/mlir/test/mlir-tblgen/predicate.td index 3c05604..0c57c85 100644 --- a/mlir/test/mlir-tblgen/predicate.td +++ b/mlir/test/mlir-tblgen/predicate.td @@ -2,17 +2,23 @@ include "mlir/IR/OpBase.td" +def Test_Dialect : Dialect { + let name = "test"; +} +class NS_Op traits> : + Op; + def I32OrF32 : Type, "32-bit integer or floating-point type">; -def OpA : Op<"op_for_CPred_containing_multiple_same_placeholder", []> { +def OpA : NS_Op<"op_for_CPred_containing_multiple_same_placeholder", []> { let arguments = (ins I32OrF32:$x); } // CHECK-LABEL: OpA::verify // CHECK: if (!((this->getOperation()->getOperand(0)->getType().isInteger(32) || this->getOperation()->getOperand(0)->getType().isF32()))) -def OpB : Op<"op_for_AllOf_PredOpTrait", [ +def OpB : NS_Op<"op_for_AllOf_PredOpTrait", [ PredOpTrait<"both first and second holds", AllOf<[CPred<"first">, CPred<"second">]>>]> { } @@ -20,7 +26,7 @@ def OpB : Op<"op_for_AllOf_PredOpTrait", [ // CHECK-LABEL: OpB::verify // CHECK: if (!(((first)) && ((second)))) -def OpC : Op<"op_for_TCopVTEtIs", [ +def OpC : NS_Op<"op_for_TCopVTEtIs", [ PredOpTrait<"first operand has i32 element type", TCopVTEtIs<0, I32>>]> { let arguments = (ins Tensor:$x); @@ -30,7 +36,7 @@ def OpC : Op<"op_for_TCopVTEtIs", [ // CHECK: if (!((((*this->getOperation()).getNumOperands() > 0)) && (((*this->getOperation()).getOperand(0)->getType().isa())) && (((*this->getOperation()).getOperand(0)->getType().cast().getElementType().isInteger(32))))) -def OpD : Op<"op_for_TCOpVTEtIsSameAs", [ +def OpD : NS_Op<"op_for_TCOpVTEtIsSameAs", [ PredOpTrait<"first operand is a vector or tensor with the same " "elemental type as itself", TCopVTEtIsSameAs<0, 0>>]> { @@ -42,7 +48,7 @@ def OpD : Op<"op_for_TCOpVTEtIsSameAs", [ // CHECK-NEXT: return emitOpError("failed to verify that first operand is a vector or tensor with the same elemental type as itself"); -def OpE : Op<"op_for_TCresVTEtIsSameAsOp", [ +def OpE : NS_Op<"op_for_TCresVTEtIsSameAsOp", [ PredOpTrait<"first operand is a vector or tensor with the same " "elemental type as first result", TCresVTEtIsSameAsOp<0, 0>>]> { @@ -55,7 +61,7 @@ def OpE : Op<"op_for_TCresVTEtIsSameAsOp", [ // CHECK-NEXT: return emitOpError("failed to verify that first operand is a vector or tensor with the same elemental type as first result"); -def OpF : Op<"op_for_int_min_val", []> { +def OpF : NS_Op<"op_for_int_min_val", []> { let arguments = (ins Confined]>:$attr); } @@ -63,7 +69,7 @@ def OpF : Op<"op_for_int_min_val", []> { // CHECK: (tblgen_attr.cast().getInt() >= 10) // CHECK-SAME: return emitOpError("attribute 'attr' failed to satisfy constraint: 32-bit integer attribute whose minimal value is 10"); -def OpG : Op<"op_for_arr_min_count", []> { +def OpG : NS_Op<"op_for_arr_min_count", []> { let arguments = (ins Confined]>:$attr); } @@ -71,7 +77,7 @@ def OpG : Op<"op_for_arr_min_count", []> { // CHECK: (tblgen_attr.cast().size() >= 8) // CHECK-SAME: return emitOpError("attribute 'attr' failed to satisfy constraint: array attribute with at least 8 elements"); -def OpH : Op<"op_for_arr_value_at_index", []> { +def OpH : NS_Op<"op_for_arr_value_at_index", []> { let arguments = (ins Confined]>:$attr); } @@ -79,7 +85,7 @@ def OpH : Op<"op_for_arr_value_at_index", []> { // CHECK: (((tblgen_attr.cast().size() > 0)) && ((tblgen_attr.cast().getValue()[0].cast().getInt() == 8))))) // CHECK-SAME: return emitOpError("attribute 'attr' failed to satisfy constraint: array attribute whose 0-th element must be 8"); -def OpI: Op<"op_for_arr_min_value_at_index", []> { +def OpI: NS_Op<"op_for_arr_min_value_at_index", []> { let arguments = (ins Confined]>:$attr); } diff --git a/mlir/test/mlir-tblgen/reference-impl.td b/mlir/test/mlir-tblgen/reference-impl.td index be77c63..8bcab6e 100644 --- a/mlir/test/mlir-tblgen/reference-impl.td +++ b/mlir/test/mlir-tblgen/reference-impl.td @@ -5,7 +5,13 @@ include "mlir/IR/OpBase.td" #endif // OP_BASE -def X_AddOp : Op<"x.add">, +def X_Dialect : Dialect { + let name = "x"; +} +class X_Op traits = []> : + Op; + +def X_AddOp : X_Op<"add">, Arguments<(ins Tensor:$A, Tensor:$B)>, Results<(outs Tensor: $C)> { // TODO: extract referenceImplementation to Op. -- 2.7.4