From e088f93f0da3d6a2904f0f4fb22759c40914dce3 Mon Sep 17 00:00:00 2001 From: River Riddle Date: Wed, 8 May 2019 14:46:39 -0700 Subject: [PATCH] Simplify the parser/printer of ConstantOp now that all attributes have types. This has the added benefit of removing type redundancy from the pretty form. As a consequence, IntegerAttr/FloatAttr will now always print the type even if it is i64/f64. -- PiperOrigin-RevId: 247295828 --- mlir/lib/IR/AsmPrinter.cpp | 8 ++-- mlir/lib/StandardOps/Ops.cpp | 19 ++-------- mlir/test/FxpMathOps/lower-uniform-casts.mlir | 6 +-- .../FxpMathOps/lower-uniform-real-math-addew.mlir | 2 +- .../FxpMathOps/lower-uniform-real-math-mulew.mlir | 6 +-- mlir/test/IR/core-ops.mlir | 18 ++++----- mlir/test/IR/invalid-ops.mlir | 20 +++++----- mlir/test/IR/parser.mlir | 21 +++++------ mlir/test/LLVMIR/convert-argattrs.mlir | 4 +- mlir/test/LLVMIR/convert-to-llvmir.mlir | 2 +- mlir/test/Quantization/canonicalize.mlir | 4 +- mlir/test/Quantization/convert-const.mlir | 40 ++++++++++---------- .../Vectorize/lower_vector_transfers.mlir | 4 +- mlir/test/Transforms/Vectorize/materialize.mlir | 2 +- .../Vectorize/materialize_vectors_1d_to_1d.mlir | 16 ++++---- .../Vectorize/materialize_vectors_2d_to_1d.mlir | 12 +++--- .../Vectorize/materialize_vectors_2d_to_2d.mlir | 8 ++-- mlir/test/Transforms/Vectorize/vector_utils.mlir | 12 +++--- mlir/test/Transforms/Vectorize/vectorize_1d.mlir | 8 ++-- mlir/test/Transforms/Vectorize/vectorize_2d.mlir | 10 ++--- mlir/test/Transforms/canonicalize.mlir | 28 +++++++------- mlir/test/Transforms/constant-fold.mlir | 44 +++++++++++----------- 22 files changed, 139 insertions(+), 155 deletions(-) diff --git a/mlir/lib/IR/AsmPrinter.cpp b/mlir/lib/IR/AsmPrinter.cpp index aeec1ab..33dc0b7 100644 --- a/mlir/lib/IR/AsmPrinter.cpp +++ b/mlir/lib/IR/AsmPrinter.cpp @@ -526,8 +526,8 @@ void ModulePrinter::printAttributeOptionalType(Attribute attr, bool isSigned = intAttr.getType().isIndex() || intAttr.getType().getIntOrFloatBitWidth() != 1; intAttr.getValue().print(os, isSigned); - // Print type unless i64 (parser defaults i64 in absence of type). - if (includeType && !intAttr.getType().isInteger(64)) { + // Print the type. + if (includeType) { os << " : "; printType(intAttr.getType()); } @@ -536,8 +536,8 @@ void ModulePrinter::printAttributeOptionalType(Attribute attr, case Attribute::Kind::Float: { auto floatAttr = attr.cast(); printFloatValue(floatAttr.getValue(), os); - // Print type unless f64 (parser defaults to f64 in absence of type). - if (includeType && !floatAttr.getType().isF64()) { + // Print the type. + if (includeType) { os << " : "; printType(floatAttr.getType()); } diff --git a/mlir/lib/StandardOps/Ops.cpp b/mlir/lib/StandardOps/Ops.cpp index c89d66d..64e7b57c 100644 --- a/mlir/lib/StandardOps/Ops.cpp +++ b/mlir/lib/StandardOps/Ops.cpp @@ -1127,9 +1127,7 @@ static void printConstantOp(OpAsmPrinter *p, ConstantOp &op) { if (op.getAttrs().size() > 1) *p << ' '; - *p << op.getValue(); - if (!op.getValue().isa()) - *p << " : " << op.getType(); + p->printAttributeAndType(op.getValue()); } static ParseResult parseConstantOp(OpAsmParser *parser, @@ -1141,19 +1139,8 @@ static ParseResult parseConstantOp(OpAsmParser *parser, parser->parseAttribute(valueAttr, "value", result->attributes)) return failure(); - // 'constant' taking a function reference doesn't get a redundant type - // specifier. The attribute itself carries it. - if (auto fnAttr = valueAttr.dyn_cast()) - return parser->addTypeToList(fnAttr.getValue()->getType(), result->types); - - if (auto intAttr = valueAttr.dyn_cast()) { - type = intAttr.getType(); - } else if (auto fpAttr = valueAttr.dyn_cast()) { - type = fpAttr.getType(); - } else if (parser->parseColonType(type)) { - return failure(); - } - return parser->addTypeToList(type, result->types); + // Add the attribute type to the list. + return parser->addTypeToList(valueAttr.getType(), result->types); } /// The constant op requires an attribute, and furthermore requires that it diff --git a/mlir/test/FxpMathOps/lower-uniform-casts.mlir b/mlir/test/FxpMathOps/lower-uniform-casts.mlir index 3bd94a4..14d98a0 100644 --- a/mlir/test/FxpMathOps/lower-uniform-casts.mlir +++ b/mlir/test/FxpMathOps/lower-uniform-casts.mlir @@ -5,7 +5,7 @@ !type_input = type tensor<4x!quant.uniform> !type_result = type tensor<4xf32> func @dequantize_per_layer_fixedpoint(%arg0 : !type_input) -> !type_result { - // CHECK: %cst = constant splat, 6.250000e-02> : tensor<4xf32> + // CHECK: %cst = constant splat, 6.250000e-02> // CHECK-NEXT: %0 = "quant.scast"(%arg0) : (tensor<4x!quant.uniform>) -> tensor<4xi8> // CHECK-NEXT: %1 = "fxpmath.convertis"(%0) : (tensor<4xi8>) -> tensor<4xi32> // CHECK-NEXT: %2 = "fxpmath.convertistof"(%1) : (tensor<4xi32>) -> tensor<4xf32> @@ -20,8 +20,8 @@ func @dequantize_per_layer_fixedpoint(%arg0 : !type_input) -> !type_result { !type_input = type tensor<4x!quant.uniform> !type_result = type tensor<4xf32> func @dequantize_per_layer_affine(%arg0 : !type_input) -> !type_result { - // CHECK: %cst = constant splat, 36> : tensor<4xi32> - // CHECK-NEXT: %cst_0 = constant splat, 6.250000e-02> : tensor<4xf32> + // CHECK: %cst = constant splat, 36> + // CHECK-NEXT: %cst_0 = constant splat, 6.250000e-02> // CHECK-NEXT: %0 = "quant.scast"(%arg0) : (tensor<4x!quant.uniform>) -> tensor<4xi8> // CHECK-NEXT: %1 = "fxpmath.convertis"(%0) : (tensor<4xi8>) -> tensor<4xi32> // CHECK-NEXT: %2 = addi %1, %cst : tensor<4xi32> diff --git a/mlir/test/FxpMathOps/lower-uniform-real-math-addew.mlir b/mlir/test/FxpMathOps/lower-uniform-real-math-addew.mlir index a1c2e2d..5f73979 100644 --- a/mlir/test/FxpMathOps/lower-uniform-real-math-addew.mlir +++ b/mlir/test/FxpMathOps/lower-uniform-real-math-addew.mlir @@ -28,7 +28,7 @@ func @real_addew_fixedpoint_isomorphic(%arg0 : !type_lhs, %arg1: !type_rhs) -> ! !type_rhs = type tensor<4x!quant.uniform> !type_result = type tensor<4x!quant.uniform> func @real_addew_affine_isomorphic(%arg0 : !type_lhs, %arg1: !type_rhs) -> !type_result { - // CHECK-NEXT: %cst = constant splat, 5> : tensor<4xi16> + // CHECK-NEXT: %cst = constant splat, 5> // CHECK-NEXT: %0 = "quant.scast"(%arg0) : (tensor<4x!quant.uniform>) -> tensor<4xi8> // CHECK-NEXT: %1 = "quant.scast"(%arg1) : (tensor<4x!quant.uniform>) -> tensor<4xi8> // CHECK-NEXT: %2 = "fxpmath.convertis"(%0) : (tensor<4xi8>) -> tensor<4xi16> diff --git a/mlir/test/FxpMathOps/lower-uniform-real-math-mulew.mlir b/mlir/test/FxpMathOps/lower-uniform-real-math-mulew.mlir index d62cfc2..edfdc8b6 100644 --- a/mlir/test/FxpMathOps/lower-uniform-real-math-mulew.mlir +++ b/mlir/test/FxpMathOps/lower-uniform-real-math-mulew.mlir @@ -31,9 +31,9 @@ func @real_mulew_fixedpoint(%arg0 : !type_lhs, %arg1: !type_rhs) -> !type_result !type_result = type tensor<4x!quant.uniform> func @real_mulew_affine_clamp(%arg0 : !type_lhs, %arg1: !type_rhs) -> !type_result { // Just verify that the affine adds/constants and clamps are present. - // CHECK: %cst = constant splat, 3> : tensor<4xi32> - // CHECK: %cst_0 = constant splat, 5> : tensor<4xi32> - // CHECK: %cst_1 = constant splat, -9> : tensor<4xi32> + // CHECK: %cst = constant splat, 3> + // CHECK: %cst_0 = constant splat, 5> + // CHECK: %cst_1 = constant splat, -9> // CHECK: addi %2, %cst : tensor<4xi32> // CHECK: addi %3, %cst_0 : tensor<4xi32> // CHECK: muli %4, %5 : tensor<4xi32> diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir index fd9dc04..4cb80c8 100644 --- a/mlir/test/IR/core-ops.mlir +++ b/mlir/test/IR/core-ops.mlir @@ -93,14 +93,14 @@ func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index) { // CHECK: %f_2 = constant @affine_apply : () -> () %12 = constant @affine_apply : () -> () - // CHECK: %cst_3 = constant splat, 0> : vector<4xi32> - %13 = constant splat, 0> : vector<4 x i32> + // CHECK: %cst_3 = constant splat, 0> + %13 = constant splat, 0> - // CHECK: %cst_4 = constant splat, 0> : tensor<42xi32> - %tci32 = constant splat, 0> : tensor<42 x i32> + // CHECK: %cst_4 = constant splat, 0> + %tci32 = constant splat, 0> - // CHECK: %cst_5 = constant splat, 0> : vector<42xi32> - %vci32 = constant splat, 0> : vector<42 x i32> + // CHECK: %cst_5 = constant splat, 0> + %vci32 = constant splat, 0> // CHECK: %{{[0-9]+}} = cmpi "eq", %{{[0-9]+}}, %{{[0-9]+}} : i32 %14 = cmpi "eq", %i3, %i4 : i32 @@ -253,9 +253,9 @@ func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index) { // CHECK: %{{[0-9]+}} = xor %cst_4, %cst_4 : tensor<42xi32> %63 = xor %tci32, %tci32 : tensor<42 x i32> - %64 = constant splat, 0.> : vector<4 x f32> - %tcf32 = constant splat, 0.> : tensor<42 x f32> - %vcf32 = constant splat, 0.> : vector<4 x f32> + %64 = constant splat, 0.> + %tcf32 = constant splat, 0.> + %vcf32 = constant splat, 0.> // CHECK: %{{[0-9]+}} = cmpf "ogt", %{{[0-9]+}}, %{{[0-9]+}} : f32 %65 = cmpf "ogt", %f3, %f4 : f32 diff --git a/mlir/test/IR/invalid-ops.mlir b/mlir/test/IR/invalid-ops.mlir index 419c6ad..645df7f 100644 --- a/mlir/test/IR/invalid-ops.mlir +++ b/mlir/test/IR/invalid-ops.mlir @@ -241,7 +241,7 @@ func @func_with_ops(i32, i32) { func @func_with_ops() { ^bb0: - %c = constant splat, 0> : vector<42 x i32> + %c = constant splat, 0> // expected-error@+1 {{op requires the same shape for all operands and results}} %r = "std.cmpi"(%c, %c) {predicate: 0} : (vector<42 x i32>, vector<42 x i32>) -> vector<41 x i1> } @@ -390,7 +390,7 @@ func @test_vector.transfer_read(memref) { func @test_vector.transfer_write(memref) { ^bb0(%arg0: memref): %c3 = constant 3 : index - %cst = constant splat, 3.0> : vector<128 x f32> + %cst = constant splat, 3.0> // expected-error@+1 {{expected 5 operand types but had 4}} %0 = "vector.transfer_write"(%cst, %arg0, %c3, %c3, %c3) : (vector<128xf32>, memref, index, index) -> () } @@ -400,7 +400,7 @@ func @test_vector.transfer_write(memref) { func @test_vector.transfer_write(memref) { ^bb0(%arg0: memref): %c3 = constant 3 : index - %cst = constant splat, 3.0> : vector<128 x f32> + %cst = constant splat, 3.0> // expected-error@+1 {{expects 4 operands (of which 2 indices)}} vector.transfer_write %cst, %arg0[%c3, %c3, %c3] : vector<128xf32>, memref } @@ -410,7 +410,7 @@ func @test_vector.transfer_write(memref) { func @test_vector.transfer_write(memref) { ^bb0(%arg0: memref): %c3 = constant 3 : index - %cst = constant splat, 3.0> : vector<128 x f32> + %cst = constant splat, 3.0> // expected-error@+1 {{requires an AffineMapAttr named 'permutation_map'}} vector.transfer_write %cst, %arg0[%c3, %c3] : vector<128xf32>, memref } @@ -420,7 +420,7 @@ func @test_vector.transfer_write(memref) { func @test_vector.transfer_write(memref) { ^bb0(%arg0: memref): %c3 = constant 3 : index - %cst = constant splat, 3.0> : vector<128 x f32> + %cst = constant splat, 3.0> // expected-error@+1 {{requires an AffineMapAttr named 'permutation_map'}} vector.transfer_write %cst, %arg0[%c3, %c3] {perm: (d0)->(d0)} : vector<128xf32>, memref } @@ -430,7 +430,7 @@ func @test_vector.transfer_write(memref) { func @test_vector.transfer_write(memref) { ^bb0(%arg0: memref): %c3 = constant 3 : index - %cst = constant splat, 3.0> : vector<128 x f32> + %cst = constant splat, 3.0> // expected-error@+1 {{requires a permutation_map with input dims of the same rank as the memref type}} vector.transfer_write %cst, %arg0[%c3, %c3] {permutation_map: (d0)->(d0)} : vector<128xf32>, memref } @@ -440,7 +440,7 @@ func @test_vector.transfer_write(memref) { func @test_vector.transfer_write(memref) { ^bb0(%arg0: memref): %c3 = constant 3 : index - %cst = constant splat, 3.0> : vector<128 x f32> + %cst = constant splat, 3.0> // expected-error@+1 {{requires a permutation_map with result dims of the same rank as the vector type}} vector.transfer_write %cst, %arg0[%c3, %c3] {permutation_map: (d0, d1)->(d0, d1)} : vector<128xf32>, memref } @@ -450,7 +450,7 @@ func @test_vector.transfer_write(memref) { func @test_vector.transfer_write(memref) { ^bb0(%arg0: memref): %c3 = constant 3 : index - %cst = constant splat, 3.0> : vector<128 x f32> + %cst = constant splat, 3.0> // expected-error@+1 {{requires a projected permutation_map (at most one dim or the zero constant can appear in each result)}} vector.transfer_write %cst, %arg0[%c3, %c3] {permutation_map: (d0, d1)->(d0 + d1)} : vector<128xf32>, memref } @@ -460,7 +460,7 @@ func @test_vector.transfer_write(memref) { func @test_vector.transfer_write(memref) { ^bb0(%arg0: memref): %c3 = constant 3 : index - %cst = constant splat, 3.0> : vector<128 x f32> + %cst = constant splat, 3.0> // expected-error@+1 {{requires a projected permutation_map (at most one dim or the zero constant can appear in each result)}} vector.transfer_write %cst, %arg0[%c3, %c3] {permutation_map: (d0, d1)->(d0 + 1)} : vector<128xf32>, memref } @@ -469,7 +469,7 @@ func @test_vector.transfer_write(memref) { func @test_vector.transfer_write(memref) { ^bb0(%arg0: memref): %c3 = constant 3 : index - %cst = constant splat, 3.0> : vector<3 x 7 x f32> + %cst = constant splat, 3.0> // expected-error@+1 {{requires a permutation_map that is a permutation (found one dim used more than once)}} vector.transfer_write %cst, %arg0[%c3, %c3, %c3] {permutation_map: (d0, d1, d2)->(d0, d0)} : vector<3x7xf32>, memref } diff --git a/mlir/test/IR/parser.mlir b/mlir/test/IR/parser.mlir index 2b28f80..ebce723 100644 --- a/mlir/test/IR/parser.mlir +++ b/mlir/test/IR/parser.mlir @@ -348,7 +348,7 @@ func @attributes() { // CHECK: "foo"() "foo"(){} : ()->() - // CHECK: "foo"() {a: 1, b: -423, c: [true, false], d: 1.600000e+01} : () -> () + // CHECK: "foo"() {a: 1 : i64, b: -423 : i64, c: [true, false], d: 1.600000e+01 : f64} : () -> () "foo"() {a: 1, b: -423, c: [true, false], d: 16.0 } : () -> () // CHECK: "foo"() {map1: #map{{[0-9]+}}} @@ -369,15 +369,12 @@ func @attributes() { // CHECK: "foo"() {set12: [#set{{[0-9]+}}, #set{{[0-9]+}}]} "foo"() {set12: [#set1, #set2]} : () -> () - // CHECK: "foo"() {d: 1.000000e-09, func: [], i123: 7, if: "foo"} : () -> () + // CHECK: "foo"() {d: 1.000000e-09 : f64, func: [], i123: 7 : i64, if: "foo"} : () -> () "foo"() {if: "foo", func: [], i123: 7, d: 1.e-9} : () -> () // CHECK: "foo"() {fn: @attributes : () -> (), if: @ifinst : (index) -> ()} : () -> () "foo"() {fn: @attributes : () -> (), if: @ifinst : (index) -> ()} : () -> () - // CHECK: "foo"() {int: 0} : () -> () - "foo"() {int: 0 : i64} : () -> () - // CHECK: "foo"() {int: 0 : i42} : () -> () "foo"() {int: 0 : i42} : () -> () return @@ -524,14 +521,14 @@ func @unitAttrs() -> () { // CHECK-LABEL: func @floatAttrs func @floatAttrs() -> () { ^bb0: - // CHECK: "foo"() {a: 4.000000e+00, b: 2.000000e+00, c: 7.100000e+00, d: -0.000000e+00} : () -> () + // CHECK: "foo"() {a: 4.000000e+00 : f64, b: 2.000000e+00 : f64, c: 7.100000e+00 : f64, d: -0.000000e+00 : f64} : () -> () "foo"(){a: 4.0, b: 2.0, c: 7.1, d: -0.0} : () -> () return } // CHECK-LABEL: func @externalfuncattr func @externalfuncattr() -> () - // CHECK: attributes {dialect.a: "a\22quoted\22string", dialect.b: 4.000000e+00, dialect.c: tensor<*xf32>} + // CHECK: attributes {dialect.a: "a\22quoted\22string", dialect.b: 4.000000e+00 : f64, dialect.c: tensor<*xf32>} attributes {dialect.a: "a\"quoted\"string", dialect.b: 4.0, dialect.c: tensor<*xf32>} // CHECK-LABEL: func @funcattrempty @@ -540,7 +537,7 @@ func @funcattrempty() -> () // CHECK-LABEL: func @funcattr func @funcattr() -> () - // CHECK: attributes {dialect.a: "a\22quoted\22string", dialect.b: 4.000000e+00, dialect.c: tensor<*xf32>} + // CHECK: attributes {dialect.a: "a\22quoted\22string", dialect.b: 4.000000e+00 : f64, dialect.c: tensor<*xf32>} attributes {dialect.a: "a\"quoted\"string", dialect.b: 4.0, dialect.c: tensor<*xf32>} { ^bb0: return @@ -843,11 +840,11 @@ func @dialect_attrs() // CHECK-LABEL: func @_valid.function$name func @_valid.function$name() -// CHECK-LABEL: func @external_func_arg_attrs(i32, i1 {dialect.attr: 10}, i32) -func @external_func_arg_attrs(i32, i1 {dialect.attr: 10}, i32) +// CHECK-LABEL: func @external_func_arg_attrs(i32, i1 {dialect.attr: 10 : i64}, i32) +func @external_func_arg_attrs(i32, i1 {dialect.attr: 10 : i64}, i32) -// CHECK-LABEL: func @func_arg_attrs(%arg0: i1 {dialect.attr: 10}) -func @func_arg_attrs(%arg0: i1 {dialect.attr: 10}) { +// CHECK-LABEL: func @func_arg_attrs(%arg0: i1 {dialect.attr: 10 : i64}) +func @func_arg_attrs(%arg0: i1 {dialect.attr: 10 : i64}) { return } diff --git a/mlir/test/LLVMIR/convert-argattrs.mlir b/mlir/test/LLVMIR/convert-argattrs.mlir index 983967e..2af616f 100644 --- a/mlir/test/LLVMIR/convert-argattrs.mlir +++ b/mlir/test/LLVMIR/convert-argattrs.mlir @@ -1,8 +1,8 @@ // RUN: mlir-opt -lower-to-llvm %s | FileCheck %s -// CHECK-LABEL: func @check_attributes(%arg0: !llvm<"float*"> {dialect.a: true, dialect.b: 4}) { -func @check_attributes(%static: memref<10x20xf32> {dialect.a: true, dialect.b: 4 }) { +// CHECK-LABEL: func @check_attributes(%arg0: !llvm<"float*"> {dialect.a: true, dialect.b: 4 : i64}) { +func @check_attributes(%static: memref<10x20xf32> {dialect.a: true, dialect.b: 4 : i64 }) { return } diff --git a/mlir/test/LLVMIR/convert-to-llvmir.mlir b/mlir/test/LLVMIR/convert-to-llvmir.mlir index 89250db..905a473 100644 --- a/mlir/test/LLVMIR/convert-to-llvmir.mlir +++ b/mlir/test/LLVMIR/convert-to-llvmir.mlir @@ -360,7 +360,7 @@ func @multireturn_caller() { func @vector_ops(vector<4xf32>, vector<4xi1>, vector<4xi64>) -> vector<4xf32> { ^bb0(%arg0: vector<4xf32>, %arg1: vector<4xi1>, %arg2: vector<4xi64>): // CHECK-NEXT: %0 = llvm.constant(splat, 4.200000e+01>) : !llvm<"<4 x float>"> - %0 = constant splat, 42.> : vector<4xf32> + %0 = constant splat, 42.> // CHECK-NEXT: %1 = llvm.fadd %arg0, %0 : !llvm<"<4 x float>"> %1 = addf %arg0, %0 : vector<4xf32> // CHECK-NEXT: %2 = llvm.sdiv %arg2, %arg2 : !llvm<"<4 x i64>"> diff --git a/mlir/test/Quantization/canonicalize.mlir b/mlir/test/Quantization/canonicalize.mlir index abc851c..5b3d20c 100644 --- a/mlir/test/Quantization/canonicalize.mlir +++ b/mlir/test/Quantization/canonicalize.mlir @@ -5,7 +5,7 @@ func @redundant_scast() -> tensor<4xi8> { // CHECK-NEXT: constant splat, 10> // CHECK-NEXT: return - %cst = constant splat, 5> : tensor<4xi8> + %cst = constant splat, 5> %1 = "quant.scast"(%cst) : (tensor<4xi8>) -> tensor<4x!quant.uniform> %2 = "quant.scast"(%1) : (tensor<4x!quant.uniform>) -> tensor<4xi8> %3 = addi %2, %2 : tensor<4xi8> @@ -18,7 +18,7 @@ func @non_redundant_scast() -> tensor<4x!quant.uniform // CHECK-NEXT: constant splat, 5> // CHECK-NEXT: scast // CHECK-NEXT: return - %cst = constant splat, 5> : tensor<4xi8> + %cst = constant splat, 5> %1 = "quant.scast"(%cst) : (tensor<4xi8>) -> tensor<4x!quant.uniform> return %1 : tensor<4x!quant.uniform> } diff --git a/mlir/test/Quantization/convert-const.mlir b/mlir/test/Quantization/convert-const.mlir index 1d28fa4..742a06e 100644 --- a/mlir/test/Quantization/convert-const.mlir +++ b/mlir/test/Quantization/convert-const.mlir @@ -11,9 +11,9 @@ // (-64 signed == 192 unsigned). // CHECK-LABEL: constant_splat_tensor_u8_affine func @constant_splat_tensor_u8_affine() -> tensor<4xf32> { - // CHECK: %cst = constant splat, -64> : tensor<4xi8> + // CHECK: %cst = constant splat, -64> // CHECK-NEXT: %0 = "quant.scast"(%cst) : (tensor<4xi8>) -> tensor<4x!quant.uniform> - %cst = constant splat, 0.5> : tensor<4xf32> + %cst = constant splat, 0.5> %1 = "quant.qcast"(%cst) : (tensor<4xf32>) -> tensor<4x!quant.uniform> %2 = "quant.dcast"(%1) : (tensor<4x!quant.uniform>) -> (tensor<4xf32>) return %2 : tensor<4xf32> @@ -23,9 +23,9 @@ func @constant_splat_tensor_u8_affine() -> tensor<4xf32> { // Verifies i8 affine quantization on a splat tensor. // CHECK-LABEL: constant_splat_tensor_i8_affine func @constant_splat_tensor_i8_affine() -> tensor<4xf32> { - // CHECK: %cst = constant splat, 63> : tensor<4xi8> + // CHECK: %cst = constant splat, 63> // CHECK-NEXT: %0 = "quant.scast"(%cst) : (tensor<4xi8>) -> tensor<4x!quant.uniform> - %cst = constant splat, 0.5> : tensor<4xf32> + %cst = constant splat, 0.5> %1 = "quant.qcast"(%cst) : (tensor<4xf32>) -> tensor<4x!quant.uniform> %2 = "quant.dcast"(%1) : (tensor<4x!quant.uniform>) -> (tensor<4xf32>) return %2 : tensor<4xf32> @@ -35,9 +35,9 @@ func @constant_splat_tensor_i8_affine() -> tensor<4xf32> { // Verifies i8 fixedpoint quantization on a splat tensor. // CHECK-LABEL: const_splat_tensor_i8_fixedpoint func @const_splat_tensor_i8_fixedpoint() -> tensor<4xf32> { - // CHECK: %cst = constant splat, 64> : tensor<4xi8> + // CHECK: %cst = constant splat, 64> // CHECK-NEXT: %0 = "quant.scast"(%cst) : (tensor<4xi8>) -> tensor<4x!quant.uniform> - %cst = constant splat, 0.5> : tensor<4xf32> + %cst = constant splat, 0.5> %1 = "quant.qcast"(%cst) : (tensor<4xf32>) -> tensor<4x!quant.uniform> %2 = "quant.dcast"(%1) : (tensor<4x!quant.uniform>) -> (tensor<4xf32>) return %2 : tensor<4xf32> @@ -47,8 +47,8 @@ func @const_splat_tensor_i8_fixedpoint() -> tensor<4xf32> { // Verifies i8 fixedpoint quantization on a splat tensor resulting in a negative storage value. // CHECK-LABEL: const_splat_tensor_i8_fixedpoint_neg func @const_splat_tensor_i8_fixedpoint_neg() -> tensor<4xf32> { - // CHECK: %cst = constant splat, -64> : tensor<4xi8> - %cst = constant splat, -0.5> : tensor<4xf32> + // CHECK: %cst = constant splat, -64> + %cst = constant splat, -0.5> %1 = "quant.qcast"(%cst) : (tensor<4xf32>) -> tensor<4x!quant.uniform> %2 = "quant.dcast"(%1) : (tensor<4x!quant.uniform>) -> (tensor<4xf32>) return %2 : tensor<4xf32> @@ -58,8 +58,8 @@ func @const_splat_tensor_i8_fixedpoint_neg() -> tensor<4xf32> { // Verifies i8 fixedpoint quantization on a dense tensor, sweeping values. // CHECK-LABEL: const_dense_tensor_i8_fixedpoint func @const_dense_tensor_i8_fixedpoint() -> tensor<7xf32> { - // CHECK: %cst = constant dense, [-128, -128, -64, 0, 64, 127, 127]> : tensor<7xi8> - %cst = constant dense, [-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> : tensor<7xf32> + // CHECK: %cst = constant dense, [-128, -128, -64, 0, 64, 127, 127]> + %cst = constant dense, [-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> %1 = "quant.qcast"(%cst) : (tensor<7xf32>) -> tensor<7x!quant.uniform> %2 = "quant.dcast"(%1) : (tensor<7x!quant.uniform>) -> (tensor<7xf32>) return %2 : tensor<7xf32> @@ -70,10 +70,10 @@ func @const_dense_tensor_i8_fixedpoint() -> tensor<7xf32> { // CHECK-LABEL: const_sparse_tensor_i8_fixedpoint func @const_sparse_tensor_i8_fixedpoint() -> tensor<7x2xf32> { // NOTE: Ugly regex match pattern for opening "[[" of indices tensor. - // CHECK: %cst = constant sparse, {{\[}}[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6]], [-128, -128, -64, 0, 64, 127, 127]> : tensor<7x2xi8> + // CHECK: %cst = constant sparse, {{\[}}[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6]], [-128, -128, -64, 0, 64, 127, 127]> %cst = constant sparse, [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [0, 5], [0, 6]], - [-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> : tensor<7x2xf32> + [-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> %1 = "quant.qcast"(%cst) : (tensor<7x2xf32>) -> tensor<7x2x!quant.uniform> %2 = "quant.dcast"(%1) : (tensor<7x2x!quant.uniform>) -> (tensor<7x2xf32>) return %2 : tensor<7x2xf32> @@ -96,8 +96,8 @@ func @const_primitive_float_i8_fixedpoint() -> f32 { // CHECK-LABEL: const_dense_tensor_u4_affine func @const_dense_tensor_u4_affine() -> tensor<7xf32> { // NOTE: Unsigned quantities printed by MLIR as signed. - // CHECK: %cst = constant dense, [0, 0, 4, -8, -4, -1, -1]> : tensor<7xi4> - %cst = constant dense, [-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> : tensor<7xf32> + // CHECK: %cst = constant dense, [0, 0, 4, -8, -4, -1, -1]> + %cst = constant dense, [-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> %1 = "quant.qcast"(%cst) : (tensor<7xf32>) -> tensor<7x!quant.uniform> %2 = "quant.dcast"(%1) : (tensor<7x!quant.uniform>) -> (tensor<7xf32>) return %2 : tensor<7xf32> @@ -108,8 +108,8 @@ func @const_dense_tensor_u4_affine() -> tensor<7xf32> { // CHECK-LABEL: const_dense_tensor_i4_affine func @const_dense_tensor_i4_affine() -> tensor<7xf32> { // NOTE: Unsigned quantities printed by MLIR as signed. - // CHECK: %cst = constant dense, [-8, -8, -5, -1, 3, 7, 7]> : tensor<7xi4> - %cst = constant dense, [-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> : tensor<7xf32> + // CHECK: %cst = constant dense, [-8, -8, -5, -1, 3, 7, 7]> + %cst = constant dense, [-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> %1 = "quant.qcast"(%cst) : (tensor<7xf32>) -> tensor<7x!quant.uniform> %2 = "quant.dcast"(%1) : (tensor<7x!quant.uniform>) -> (tensor<7xf32>) return %2 : tensor<7xf32> @@ -119,8 +119,8 @@ func @const_dense_tensor_i4_affine() -> tensor<7xf32> { // Verifies i4 fixed point quantization on a dense tensor, sweeping values. // CHECK-LABEL: const_dense_tensor_i4_fixedpoint func @const_dense_tensor_i4_fixedpoint() -> tensor<7xf32> { - // CHECK: %cst = constant dense, [-8, -8, -4, 0, 4, 7, 7]> : tensor<7xi4> - %cst = constant dense, [-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> : tensor<7xf32> + // CHECK: %cst = constant dense, [-8, -8, -4, 0, 4, 7, 7]> + %cst = constant dense, [-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> %1 = "quant.qcast"(%cst) : (tensor<7xf32>) -> tensor<7x!quant.uniform> %2 = "quant.dcast"(%1) : (tensor<7x!quant.uniform>) -> (tensor<7xf32>) return %2 : tensor<7xf32> @@ -132,8 +132,8 @@ func @const_dense_tensor_i4_fixedpoint() -> tensor<7xf32> { // be clamped to 100). // CHECK-LABEL: const_custom_storage_range_i8_fixedpoint func @const_custom_storage_range_i8_fixedpoint() -> tensor<7xf32> { - // CHECK: %cst = constant dense, [-100, -100, -64, 0, 64, 100, 100]> : tensor<7xi8> - %cst = constant dense, [-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> : tensor<7xf32> + // CHECK: %cst = constant dense, [-100, -100, -64, 0, 64, 100, 100]> + %cst = constant dense, [-2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0]> %1 = "quant.qcast"(%cst) : (tensor<7xf32>) -> tensor<7x!quant.uniform:f32, 7.812500e-03>> %2 = "quant.dcast"(%1) : (tensor<7x!quant.uniform:f32, 7.812500e-03>>) -> (tensor<7xf32>) return %2 : tensor<7xf32> diff --git a/mlir/test/Transforms/Vectorize/lower_vector_transfers.mlir b/mlir/test/Transforms/Vectorize/lower_vector_transfers.mlir index 6331c38..a5717f5 100644 --- a/mlir/test/Transforms/Vectorize/lower_vector_transfers.mlir +++ b/mlir/test/Transforms/Vectorize/lower_vector_transfers.mlir @@ -127,7 +127,7 @@ func @materialize_read(%M: index, %N: index, %O: index, %P: index) { // CHECK-LABEL:func @materialize_write(%arg0: index, %arg1: index, %arg2: index, %arg3: index) { func @materialize_write(%M: index, %N: index, %O: index, %P: index) { // CHECK-NEXT: %[[C0:.*]] = constant 0 : index - // CHECK-NEXT: %cst = constant splat, 1.000000e+00> : vector<5x4x3xf32> + // CHECK-NEXT: %cst = constant splat, 1.000000e+00> // CHECK-NEXT: %0 = alloc(%arg0, %arg1, %arg2, %arg3) : memref // CHECK-NEXT: affine.for %[[I0:.*]] = 0 to %arg0 step 3 { // CHECK-NEXT: affine.for %[[I1:.*]] = 0 to %arg1 step 4 { @@ -188,7 +188,7 @@ func @materialize_write(%M: index, %N: index, %O: index, %P: index) { // Check that I3 + I6 (of size 5) read from first index load(I6, ...) and write into last index store(..., S3) // Other dimension is just accessed with I2. %A = alloc (%M, %N, %O, %P) : memref - %f1 = constant splat, 1.000000e+00> : vector<5x4x3xf32> + %f1 = constant splat, 1.000000e+00> affine.for %i0 = 0 to %M step 3 { affine.for %i1 = 0 to %N step 4 { affine.for %i2 = 0 to %O { diff --git a/mlir/test/Transforms/Vectorize/materialize.mlir b/mlir/test/Transforms/Vectorize/materialize.mlir index 40460e1..ee1ec34 100644 --- a/mlir/test/Transforms/Vectorize/materialize.mlir +++ b/mlir/test/Transforms/Vectorize/materialize.mlir @@ -9,7 +9,7 @@ // CHECK-LABEL: func @materialize func @materialize(%M : index, %N : index, %O : index, %P : index) { %A = alloc (%M, %N, %O, %P) : memref - %f1 = constant splat, 1.000000e+00> : vector<4x4x4xf32> + %f1 = constant splat, 1.000000e+00> // CHECK: affine.for %i0 = 0 to %arg0 step 4 { // CHECK-NEXT: affine.for %i1 = 0 to %arg1 step 4 { // CHECK-NEXT: affine.for %i2 = 0 to %arg2 { diff --git a/mlir/test/Transforms/Vectorize/materialize_vectors_1d_to_1d.mlir b/mlir/test/Transforms/Vectorize/materialize_vectors_1d_to_1d.mlir index 318373a..df2dc08 100644 --- a/mlir/test/Transforms/Vectorize/materialize_vectors_1d_to_1d.mlir +++ b/mlir/test/Transforms/Vectorize/materialize_vectors_1d_to_1d.mlir @@ -17,10 +17,10 @@ func @vector_add_2d(%M : index, %N : index) -> f32 { // 4x unroll (jammed by construction). // CHECK: affine.for %i0 = 0 to %arg0 { // CHECK-NEXT: affine.for %i1 = 0 to %arg1 step 32 { - // CHECK-NEXT: %[[CST0:.*]] = constant splat, 1.000000e+00> : vector<8xf32> - // CHECK-NEXT: %[[CST1:.*]] = constant splat, 1.000000e+00> : vector<8xf32> - // CHECK-NEXT: %[[CST2:.*]] = constant splat, 1.000000e+00> : vector<8xf32> - // CHECK-NEXT: %[[CST3:.*]] = constant splat, 1.000000e+00> : vector<8xf32> + // CHECK-NEXT: %[[CST0:.*]] = constant splat, 1.000000e+00> + // CHECK-NEXT: %[[CST1:.*]] = constant splat, 1.000000e+00> + // CHECK-NEXT: %[[CST2:.*]] = constant splat, 1.000000e+00> + // CHECK-NEXT: %[[CST3:.*]] = constant splat, 1.000000e+00> // CHECK-NEXT: %[[VAL00:.*]] = affine.apply [[ID1]]{{.*}} // CHECK-NEXT: %[[VAL01:.*]] = affine.apply [[ID1]]{{.*}} // CHECK-NEXT: vector.transfer_write %[[CST0]], {{.*}}[%[[VAL00]], %[[VAL01]]] {permutation_map: [[D0D1TOD1]]} : vector<8xf32>, memref @@ -43,10 +43,10 @@ func @vector_add_2d(%M : index, %N : index) -> f32 { // 4x unroll (jammed by construction). // CHECK: affine.for %i2 = 0 to %arg0 { // CHECK-NEXT: affine.for %i3 = 0 to %arg1 step 32 { - // CHECK-NEXT: %[[CST0:.*]] = constant splat, 2.000000e+00> : vector<8xf32> - // CHECK-NEXT: %[[CST1:.*]] = constant splat, 2.000000e+00> : vector<8xf32> - // CHECK-NEXT: %[[CST2:.*]] = constant splat, 2.000000e+00> : vector<8xf32> - // CHECK-NEXT: %[[CST3:.*]] = constant splat, 2.000000e+00> : vector<8xf32> + // CHECK-NEXT: %[[CST0:.*]] = constant splat, 2.000000e+00> + // CHECK-NEXT: %[[CST1:.*]] = constant splat, 2.000000e+00> + // CHECK-NEXT: %[[CST2:.*]] = constant splat, 2.000000e+00> + // CHECK-NEXT: %[[CST3:.*]] = constant splat, 2.000000e+00> // CHECK-NEXT: %[[VAL00:.*]] = affine.apply [[ID1]]{{.*}} // CHECK-NEXT: %[[VAL01:.*]] = affine.apply [[ID1]]{{.*}} // CHECK-NEXT: vector.transfer_write %[[CST0]], {{.*}}[%[[VAL00]], %[[VAL01]]] {permutation_map: [[D0D1TOD1]]} : vector<8xf32>, memref diff --git a/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_1d.mlir b/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_1d.mlir index e5034e4..67ec9ce 100644 --- a/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_1d.mlir +++ b/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_1d.mlir @@ -17,12 +17,12 @@ func @vector_add_2d(%M : index, %N : index) -> f32 { // (3x2)x unroll (jammed by construction). // CHECK: affine.for %i0 = 0 to %arg0 step 3 { // CHECK-NEXT: affine.for %i1 = 0 to %arg1 step 16 { - // CHECK-NEXT: {{.*}} = constant splat, 1.000000e+00> : vector<8xf32> - // CHECK-NEXT: {{.*}} = constant splat, 1.000000e+00> : vector<8xf32> - // CHECK-NEXT: {{.*}} = constant splat, 1.000000e+00> : vector<8xf32> - // CHECK-NEXT: {{.*}} = constant splat, 1.000000e+00> : vector<8xf32> - // CHECK-NEXT: {{.*}} = constant splat, 1.000000e+00> : vector<8xf32> - // CHECK-NEXT: {{.*}} = constant splat, 1.000000e+00> : vector<8xf32> + // CHECK-NEXT: {{.*}} = constant splat, 1.000000e+00> + // CHECK-NEXT: {{.*}} = constant splat, 1.000000e+00> + // CHECK-NEXT: {{.*}} = constant splat, 1.000000e+00> + // CHECK-NEXT: {{.*}} = constant splat, 1.000000e+00> + // CHECK-NEXT: {{.*}} = constant splat, 1.000000e+00> + // CHECK-NEXT: {{.*}} = constant splat, 1.000000e+00> // CHECK-NEXT: %[[VAL00:.*]] = affine.apply [[ID1]](%i0) // CHECK-NEXT: %[[VAL01:.*]] = affine.apply [[ID1]](%i1) // CHECK-NEXT: vector.transfer_write {{.*}}, {{.*}}[%[[VAL00]], %[[VAL01]]] {permutation_map: [[D0D1TOD1]]} : vector<8xf32>, memref diff --git a/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_2d.mlir b/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_2d.mlir index ea1353d..7a40e59 100644 --- a/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_2d.mlir +++ b/mlir/test/Transforms/Vectorize/materialize_vectors_2d_to_2d.mlir @@ -15,8 +15,8 @@ func @vector_add_2d(%M : index, %N : index) -> f32 { // 2x unroll (jammed by construction). // CHECK: affine.for %i0 = 0 to %arg0 step 3 { // CHECK-NEXT: affine.for %i1 = 0 to %arg1 step 32 { - // CHECK-NEXT: {{.*}} = constant splat, 1.000000e+00> : vector<3x16xf32> - // CHECK-NEXT: {{.*}} = constant splat, 1.000000e+00> : vector<3x16xf32> + // CHECK-NEXT: {{.*}} = constant splat, 1.000000e+00> + // CHECK-NEXT: {{.*}} = constant splat, 1.000000e+00> // CHECK-NEXT: %[[VAL00:.*]] = affine.apply [[ID1]](%i0) // CHECK-NEXT: %[[VAL01:.*]] = affine.apply [[ID1]](%i1) // CHECK-NEXT: vector.transfer_write {{.*}}, {{.*}}[%[[VAL00]], %[[VAL01]]] {permutation_map: [[ID2]]} : vector<3x16xf32>, memref @@ -33,8 +33,8 @@ func @vector_add_2d(%M : index, %N : index) -> f32 { // 2x unroll (jammed by construction). // CHECK: affine.for %i2 = 0 to %arg0 step 3 { // CHECK-NEXT: affine.for %i3 = 0 to %arg1 step 32 { - // CHECK-NEXT: {{.*}} = constant splat, 2.000000e+00> : vector<3x16xf32> - // CHECK-NEXT: {{.*}} = constant splat, 2.000000e+00> : vector<3x16xf32> + // CHECK-NEXT: {{.*}} = constant splat, 2.000000e+00> + // CHECK-NEXT: {{.*}} = constant splat, 2.000000e+00> // CHECK-NEXT: %[[VAL00:.*]] = affine.apply [[ID1]](%i2) // CHECK-NEXT: %[[VAL01:.*]] = affine.apply [[ID1]](%i3) // CHECK-NEXT: vector.transfer_write {{.*}}, {{.*}}[%[[VAL00]], %[[VAL01]]] {permutation_map: [[ID2]]} : vector<3x16xf32>, memref diff --git a/mlir/test/Transforms/Vectorize/vector_utils.mlir b/mlir/test/Transforms/Vectorize/vector_utils.mlir index cec3650..df0a33c 100644 --- a/mlir/test/Transforms/Vectorize/vector_utils.mlir +++ b/mlir/test/Transforms/Vectorize/vector_utils.mlir @@ -13,17 +13,17 @@ func @vector_add_2d(%arg0: index, %arg1: index) -> f32 { %cst = constant 1.000000e+00 : f32 // CHECK:matched: {{.*}} constant splat{{.*}} with shape ratio: 2, 32 - %cst_1 = constant splat, 1.000000e+00> : vector<8x256xf32> + %cst_1 = constant splat, 1.000000e+00> // CHECK:matched: {{.*}} constant splat{{.*}} with shape ratio: 1, 3, 7, 2, 1 - %cst_a = constant splat, 1.000000e+00> : vector<1x3x7x8x8xf32> + %cst_a = constant splat, 1.000000e+00> // CHECK-NOT:matched: {{.*}} constant splat{{.*}} with shape ratio: 1, 3, 7, 1{{.*}} - %cst_b = constant splat, 1.000000e+00> : vector<1x3x7x4x4xf32> + %cst_b = constant splat, 1.000000e+00> // TEST-3x4x5x8:matched: {{.*}} constant splat{{.*}} with shape ratio: 3, 2, 1, 4 - %cst_c = constant splat, 1.000000e+00> : vector<3x4x5x8xf32> + %cst_c = constant splat, 1.000000e+00> // TEST-3x4x4x8-NOT:matched: {{.*}} constant splat{{.*}} with shape ratio{{.*}} - %cst_d = constant splat, 1.000000e+00> : vector<3x4x4x8xf32> + %cst_d = constant splat, 1.000000e+00> // TEST-3x4x4x8:matched: {{.*}} constant splat{{.*}} with shape ratio: 1, 1, 2, 16 - %cst_e = constant splat, 1.000000e+00> : vector<1x2x10x32xf32> + %cst_e = constant splat, 1.000000e+00> // Nothing should be matched in this last block. // CHECK-NOT:matched: {{.*}} = constant 7{{.*}} diff --git a/mlir/test/Transforms/Vectorize/vectorize_1d.mlir b/mlir/test/Transforms/Vectorize/vectorize_1d.mlir index 5a0fab1..e035478 100644 --- a/mlir/test/Transforms/Vectorize/vectorize_1d.mlir +++ b/mlir/test/Transforms/Vectorize/vectorize_1d.mlir @@ -86,7 +86,7 @@ func @vector_add_2d(%M : index, %N : index) -> f32 { %f2 = constant 2.0 : f32 affine.for %i0 = 0 to %M { affine.for %i1 = 0 to %N { - // CHECK: [[C1:%.*]] = constant splat, 1.000000e+00> : vector<128xf32> + // CHECK: [[C1:%.*]] = constant splat, 1.000000e+00> // CHECK: vector.transfer_write [[C1]], {{.*}} {permutation_map: #[[map_proj_d0d1_d1]]} : vector<128xf32>, memref // non-scoped %f1 store %f1, %A[%i0, %i1] : memref @@ -94,7 +94,7 @@ func @vector_add_2d(%M : index, %N : index) -> f32 { } affine.for %i2 = 0 to %M { affine.for %i3 = 0 to %N { - // CHECK: [[C3:%.*]] = constant splat, 2.000000e+00> : vector<128xf32> + // CHECK: [[C3:%.*]] = constant splat, 2.000000e+00> // CHECK: vector.transfer_write [[C3]], {{.*}} {permutation_map: #[[map_proj_d0d1_d1]]} : vector<128xf32>, memref // non-scoped %f2 store %f2, %B[%i2, %i3] : memref @@ -105,9 +105,9 @@ func @vector_add_2d(%M : index, %N : index) -> f32 { // CHECK: [[A5:%.*]] = vector.transfer_read %0[{{.*}}] {permutation_map: #[[map_proj_d0d1_d1]]} : memref, vector<128xf32> // CHECK: [[B5:%.*]] = vector.transfer_read %1[{{.*}}] {permutation_map: #[[map_proj_d0d1_d1]]} : memref, vector<128xf32> // CHECK: [[S5:%.*]] = addf [[A5]], [[B5]] : vector<128xf32> - // CHECK: [[SPLAT1:%.*]] = constant splat, 1.000000e+00> : vector<128xf32> + // CHECK: [[SPLAT1:%.*]] = constant splat, 1.000000e+00> // CHECK: [[S6:%.*]] = addf [[S5]], [[SPLAT1]] : vector<128xf32> - // CHECK: [[SPLAT2:%.*]] = constant splat, 2.000000e+00> : vector<128xf32> + // CHECK: [[SPLAT2:%.*]] = constant splat, 2.000000e+00> // CHECK: [[S7:%.*]] = addf [[S5]], [[SPLAT2]] : vector<128xf32> // CHECK: [[S8:%.*]] = addf [[S7]], [[S6]] : vector<128xf32> // CHECK: vector.transfer_write [[S8]], {{.*}} {permutation_map: #[[map_proj_d0d1_d1]]} : vector<128xf32>, memref diff --git a/mlir/test/Transforms/Vectorize/vectorize_2d.mlir b/mlir/test/Transforms/Vectorize/vectorize_2d.mlir index 217c7a6..8ece190 100644 --- a/mlir/test/Transforms/Vectorize/vectorize_2d.mlir +++ b/mlir/test/Transforms/Vectorize/vectorize_2d.mlir @@ -53,7 +53,7 @@ func @vector_add_2d(%M : index, %N : index) -> f32 { %f2 = constant 2.0 : f32 affine.for %i0 = 0 to %M { affine.for %i1 = 0 to %N { - // CHECK: [[C1:%.*]] = constant splat, 1.000000e+00> : vector<32x256xf32> + // CHECK: [[C1:%.*]] = constant splat, 1.000000e+00> // CHECK: vector.transfer_write [[C1]], {{.*}} {permutation_map: #[[map_id2]]} : vector<32x256xf32>, memref // non-scoped %f1 store %f1, %A[%i0, %i1] : memref @@ -61,7 +61,7 @@ func @vector_add_2d(%M : index, %N : index) -> f32 { } affine.for %i2 = 0 to %M { affine.for %i3 = 0 to %N { - // CHECK: [[C3:%.*]] = constant splat, 2.000000e+00> : vector<32x256xf32> + // CHECK: [[C3:%.*]] = constant splat, 2.000000e+00> // CHECK: vector.transfer_write [[C3]], {{.*}} {permutation_map: #[[map_id2]]} : vector<32x256xf32>, memref // non-scoped %f2 store %f2, %B[%i2, %i3] : memref @@ -72,9 +72,9 @@ func @vector_add_2d(%M : index, %N : index) -> f32 { // CHECK: [[A5:%.*]] = vector.transfer_read %0[{{.*}}] {permutation_map: #[[map_id2]]} : memref, vector<32x256xf32> // CHECK: [[B5:%.*]] = vector.transfer_read %1[{{.*}}] {permutation_map: #[[map_id2]]} : memref, vector<32x256xf32> // CHECK: [[S5:%.*]] = addf [[A5]], [[B5]] : vector<32x256xf32> - // CHECK: [[SPLAT1:%.*]] = constant splat, 1.000000e+00> : vector<32x256xf32> + // CHECK: [[SPLAT1:%.*]] = constant splat, 1.000000e+00> // CHECK: [[S6:%.*]] = addf [[S5]], [[SPLAT1]] : vector<32x256xf32> - // CHECK: [[SPLAT2:%.*]] = constant splat, 2.000000e+00> : vector<32x256xf32> + // CHECK: [[SPLAT2:%.*]] = constant splat, 2.000000e+00> // CHECK: [[S7:%.*]] = addf [[S5]], [[SPLAT2]] : vector<32x256xf32> // CHECK: [[S8:%.*]] = addf [[S7]], [[S6]] : vector<32x256xf32> // CHECK: vector.transfer_write [[S8]], {{.*}} {permutation_map: #[[map_id2]]} : vector<32x256xf32>, memref @@ -109,7 +109,7 @@ func @vectorize_matmul(%arg0: memref, %arg1: memref, %arg2: me // VECT-NEXT: %[[N:.*]] = dim %arg2, 1 : memref // VECT: {{.*}} #[[map_id1]](%[[M]]) step 4 { // VECT-NEXT: {{.*}} #[[map_id1]](%[[N]]) step 8 { - // VECT: %[[VC0:.*]] = constant splat, 0.000000e+00> : vector<4x8xf32> + // VECT: %[[VC0:.*]] = constant splat, 0.000000e+00> // VECT-NEXT: vector.transfer_write %[[VC0]], %arg2[%{{.*}}, %{{.*}}] {permutation_map: #[[map_id2]]} : vector<4x8xf32>, memref affine.for %i0 = (d0) -> (d0)(%c0) to (d0) -> (d0)(%M) { affine.for %i1 = (d0) -> (d0)(%c0) to (d0) -> (d0)(%N) { diff --git a/mlir/test/Transforms/canonicalize.mlir b/mlir/test/Transforms/canonicalize.mlir index 7e8a5d5..31b4016 100644 --- a/mlir/test/Transforms/canonicalize.mlir +++ b/mlir/test/Transforms/canonicalize.mlir @@ -67,7 +67,7 @@ func @addi_zero(%arg0: i32) -> i32 { // CHECK-LABEL: func @addi_zero_vector func @addi_zero_vector(%arg0: vector<4 x i32>) -> vector<4 x i32> { // CHECK-NEXT: return %arg0 - %c0_v4i32 = constant splat, 0> : vector<4 x i32> + %c0_v4i32 = constant splat, 0> %y = addi %c0_v4i32, %arg0 : vector<4 x i32> return %y: vector<4 x i32> } @@ -75,7 +75,7 @@ func @addi_zero_vector(%arg0: vector<4 x i32>) -> vector<4 x i32> { // CHECK-LABEL: func @addi_zero_tensor func @addi_zero_tensor(%arg0: tensor<4 x 5 x i32>) -> tensor<4 x 5 x i32> { // CHECK-NEXT: return %arg0 - %c0_t45i32 = constant splat, 0> : tensor<4 x 5 x i32> + %c0_t45i32 = constant splat, 0> %y = addi %arg0, %c0_t45i32 : tensor<4 x 5 x i32> return %y: tensor<4 x 5 x i32> } @@ -93,8 +93,8 @@ func @muli_zero(%arg0: i32) -> i32 { // CHECK-LABEL: func @muli_zero_vector func @muli_zero_vector(%arg0: vector<4 x i32>) -> vector<4 x i32> { - // CHECK-NEXT: %cst = constant splat, 0> : vector<4xi32> - %cst = constant splat, 0> : vector<4 x i32> + // CHECK-NEXT: %cst = constant splat, 0> + %cst = constant splat, 0> %y = muli %cst, %arg0 : vector<4 x i32> @@ -104,8 +104,8 @@ func @muli_zero_vector(%arg0: vector<4 x i32>) -> vector<4 x i32> { // CHECK-LABEL: func @muli_zero_tensor func @muli_zero_tensor(%arg0: tensor<4 x 5 x i32>) -> tensor<4 x 5 x i32> { - // CHECK-NEXT: %cst = constant splat, 0> : tensor<4x5xi32> - %cst = constant splat, 0> : tensor<4 x 5 x i32> + // CHECK-NEXT: %cst = constant splat, 0> + %cst = constant splat, 0> %y = muli %arg0, %cst : tensor<4 x 5 x i32> @@ -124,7 +124,7 @@ func @muli_one(%arg0: i32) -> i32 { // CHECK-LABEL: func @muli_one_vector func @muli_one_vector(%arg0: vector<4 x i32>) -> vector<4 x i32> { // CHECK-NEXT: return %arg0 - %c1_v4i32 = constant splat, 1> : vector<4 x i32> + %c1_v4i32 = constant splat, 1> %y = muli %c1_v4i32, %arg0 : vector<4 x i32> return %y: vector<4 x i32> } @@ -132,7 +132,7 @@ func @muli_one_vector(%arg0: vector<4 x i32>) -> vector<4 x i32> { // CHECK-LABEL: func @muli_one_tensor func @muli_one_tensor(%arg0: tensor<4 x 5 x i32>) -> tensor<4 x 5 x i32> { // CHECK-NEXT: return %arg0 - %c1_t45i32 = constant splat, 1> : tensor<4 x 5 x i32> + %c1_t45i32 = constant splat, 1> %y = muli %arg0, %c1_t45i32 : tensor<4 x 5 x i32> return %y: tensor<4 x 5 x i32> } @@ -169,8 +169,8 @@ func @and_zero(%arg0: i32) -> i32 { //CHECK-LABEL: func @and_zero_vector func @and_zero_vector(%arg0: vector<4xi32>) -> vector<4xi32> { - // CHECK-NEXT: %cst = constant splat, 0> : vector<4xi32> - %cst = constant splat, 0> : vector<4xi32> + // CHECK-NEXT: %cst = constant splat, 0> + %cst = constant splat, 0> // CHECK-NEXT: return %cst %1 = and %arg0, %cst : vector<4xi32> return %1 : vector<4xi32> @@ -178,8 +178,8 @@ func @and_zero_vector(%arg0: vector<4xi32>) -> vector<4xi32> { //CHECK-LABEL: func @and_zero_tensor func @and_zero_tensor(%arg0: tensor<4x5xi32>) -> tensor<4x5xi32> { - // CHECK-NEXT: %cst = constant splat, 0> : tensor<4x5xi32> - %cst = constant splat, 0> : tensor<4x5xi32> + // CHECK-NEXT: %cst = constant splat, 0> + %cst = constant splat, 0> // CHECK-NEXT: return %cst %1 = and %arg0, %cst : tensor<4x5xi32> return %1 : tensor<4x5xi32> @@ -217,7 +217,7 @@ func @or_zero(%arg0: i32) -> i32 { //CHECK-LABEL: func @or_zero_vector func @or_zero_vector(%arg0: vector<4xi32>) -> vector<4xi32> { // CHECK-NEXT: return %arg0 - %cst = constant splat, 0> : vector<4xi32> + %cst = constant splat, 0> %1 = or %arg0, %cst : vector<4xi32> return %1 : vector<4xi32> } @@ -225,7 +225,7 @@ func @or_zero_vector(%arg0: vector<4xi32>) -> vector<4xi32> { //CHECK-LABEL: func @or_zero_tensor func @or_zero_tensor(%arg0: tensor<4x5xi32>) -> tensor<4x5xi32> { // CHECK-NEXT: return %arg0 - %cst = constant splat, 0> : tensor<4x5xi32> + %cst = constant splat, 0> %1 = or %arg0, %cst : tensor<4x5xi32> return %1 : tensor<4x5xi32> } diff --git a/mlir/test/Transforms/constant-fold.mlir b/mlir/test/Transforms/constant-fold.mlir index f249e99..927d2d4 100644 --- a/mlir/test/Transforms/constant-fold.mlir +++ b/mlir/test/Transforms/constant-fold.mlir @@ -37,10 +37,10 @@ func @simple_addf() -> f32 { // CHECK-LABEL: func @addf_splat_tensor func @addf_splat_tensor() -> tensor<4xf32> { - %0 = constant splat, 4.5> : tensor<4xf32> - %1 = constant splat, 1.5> : tensor<4xf32> + %0 = constant splat, 4.5> + %1 = constant splat, 1.5> - // CHECK-NEXT: %cst = constant splat, 6.000000e+00> : tensor<4xf32> + // CHECK-NEXT: %cst = constant splat, 6.000000e+00> %2 = addf %0, %1 : tensor<4xf32> // CHECK-NEXT: return %cst @@ -65,10 +65,10 @@ func @simple_addi() -> i32 { // CHECK-LABEL: func @addi_splat_vector func @addi_splat_vector() -> vector<8xi32> { - %0 = constant splat, 1> : vector<8xi32> - %1 = constant splat, 5> : vector<8xi32> + %0 = constant splat, 1> + %1 = constant splat, 5> - // CHECK-NEXT: %cst = constant splat, 6> : vector<8xi32> + // CHECK-NEXT: %cst = constant splat, 6> %2 = addi %0, %1 : vector<8xi32> // CHECK-NEXT: return %cst @@ -93,10 +93,10 @@ func @simple_subf() -> f32 { // CHECK-LABEL: func @subf_splat_vector func @subf_splat_vector() -> vector<4xf32> { - %0 = constant splat, 4.5> : vector<4xf32> - %1 = constant splat, 1.5> : vector<4xf32> + %0 = constant splat, 4.5> + %1 = constant splat, 1.5> - // CHECK-NEXT: %cst = constant splat, 3.000000e+00> : vector<4xf32> + // CHECK-NEXT: %cst = constant splat, 3.000000e+00> %2 = subf %0, %1 : vector<4xf32> // CHECK-NEXT: return %cst @@ -121,10 +121,10 @@ func @simple_subi() -> i32 { // CHECK-LABEL: func @subi_splat_tensor func @subi_splat_tensor() -> tensor<4xi32> { - %0 = constant splat, 4> : tensor<4xi32> - %1 = constant splat, 1> : tensor<4xi32> + %0 = constant splat, 4> + %1 = constant splat, 1> - // CHECK-NEXT: %cst = constant splat, 3> : tensor<4xi32> + // CHECK-NEXT: %cst = constant splat, 3> %2 = subi %0, %1 : tensor<4xi32> // CHECK-NEXT: return %cst @@ -171,10 +171,10 @@ func @simple_mulf() -> f32 { // CHECK-LABEL: func @mulf_splat_tensor func @mulf_splat_tensor() -> tensor<4xf32> { - %0 = constant splat, 4.5> : tensor<4xf32> - %1 = constant splat, 1.5> : tensor<4xf32> + %0 = constant splat, 4.5> + %1 = constant splat, 1.5> - // CHECK-NEXT: %cst = constant splat, 6.750000e+00> : tensor<4xf32> + // CHECK-NEXT: %cst = constant splat, 6.750000e+00> %2 = mulf %0, %1 : tensor<4xf32> // CHECK-NEXT: return %cst @@ -277,10 +277,10 @@ func @muli() -> i32 { // CHECK-LABEL: func @muli_splat_vector func @muli_splat_vector() -> vector<4xi32> { - %0 = constant splat, 4> : vector<4xi32> - %1 = constant splat, 2> : vector<4xi32> + %0 = constant splat, 4> + %1 = constant splat, 2> - // CHECK-NEXT: %cst = constant splat, 8> : vector<4xi32> + // CHECK-NEXT: %cst = constant splat, 8> %2 = muli %0, %1 : vector<4xi32> // CHECK-NEXT: return %cst @@ -399,22 +399,22 @@ func @fold_extract_element(%arg0 : index) -> (f32, f16, f16, i32) { // Fold an extract into a splat. // CHECK-NEXT: {{.*}} = constant 4.500000e+00 : f32 - %0 = constant splat, 4.5> : tensor<4xf32> + %0 = constant splat, 4.5> %ext_1 = extract_element %0[%arg0] : tensor<4xf32> // Fold an extract into a sparse with a sparse index. // CHECK-NEXT: {{.*}} = constant -2.000000e+00 : f16 - %1 = constant sparse, [[0, 0, 0], [1, 1, 1]], [-5.0, -2.0]> : vector<1x1x1xf16> + %1 = constant sparse, [[0, 0, 0], [1, 1, 1]], [-5.0, -2.0]> %ext_2 = extract_element %1[%const_1, %const_1, %const_1] : vector<1x1x1xf16> // Fold an extract into a sparse with a non sparse index. // CHECK-NEXT: {{.*}} = constant 0.000000e+00 : f16 - %2 = constant sparse, [[1, 1, 1]], [-2.0]> : vector<1x1x1xf16> + %2 = constant sparse, [[1, 1, 1]], [-2.0]> %ext_3 = extract_element %2[%const_0, %const_0, %const_0] : vector<1x1x1xf16> // Fold an extract into a dense tensor. // CHECK-NEXT: {{.*}} = constant 64 : i32 - %3 = constant dense, [[[1, -2, 1, 36]], [[0, 2, -1, 64]]]> : tensor<2x1x4xi32> + %3 = constant dense, [[[1, -2, 1, 36]], [[0, 2, -1, 64]]]> %ext_4 = extract_element %3[%const_1, %const_0, %const_3] : tensor<2x1x4xi32> // CHECK-NEXT: return -- 2.7.4