// -----
-// CHECK-LABL: func @view_empty_memref(
+// CHECK-LABEL: func @view_empty_memref(
// CHECK: %[[ARG0:.*]]: index,
// CHECK: %[[ARG1:.*]]: memref<0xi8>)
func.func @view_empty_memref(%offset: index, %mem: memref<0xi8>) {
%cst_b = arith.constant dense<1.000000e+00> : vector<1x3x7x4x4xf32>
// TEST-3x4x5x8:matched: {{.*}} arith.constant dense{{.*}} with shape ratio: 3, 2, 1, 4
%cst_c = arith.constant dense<1.000000e+00> : vector<3x4x5x8xf32>
- // TEST-3x4x4x8-NOT:matched: {{.*}} arith.constant dense{{.*}} with shape ratio{{.*}}
+ // TEST-3x4x5x8-NOT:matched: {{.*}} arith.constant dense{{.*}} with shape ratio{{.*}}
%cst_d = arith.constant dense<1.000000e+00> : vector<3x4x4x8xf32>
- // TEST-3x4x4x8:matched: {{.*}} arith.constant dense{{.*}} with shape ratio: 1, 1, 2, 16
+ // TEST-3x4x5x8:matched: {{.*}} arith.constant dense{{.*}} with shape ratio: 1, 1, 2, 16
%cst_e = arith.constant dense<1.000000e+00> : vector<1x2x10x32xf32>
// Nothing should be matched in this last block.
// CHECK-NEXT: [[HIGH1:%.+]] = vector.extract [[BCAST0]][1] : vector<2xi32>
// CHECK-NEXT: {{%.+}}, {{%.+}} = arith.addui_extended [[LOW0]], [[LOW1]] : i32, i1
// CHECK: [[RES:%.+]] = llvm.bitcast {{%.+}} : vector<2xi32> to i64
-// CHECK-NEXt: return [[RES]] : i64
+// CHECK-NEXT: return [[RES]] : i64
func.func @emulate_me_please(%x : i64) -> i64 {
%r = arith.addi %x, %x : i64
return %r : i64
// NO-DROP-LABEL: func @return_arg
// NO-DROP-SAME: %[[A:.*]]: memref<?xf32
-// No_DROP: return %[[A]]
+// NO-DROP: return %[[A]]
(index, index, index) -> ()
// CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref<i32>) schedule(static)
- // CHECK-SAMe: for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
+ // CHECK-SAME: for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}})
"omp.wsloop" (%lb, %ub, %step, %data_var, %linear_var) ({
^bb0(%iv: index):
omp.yield
}
// -----
-/// CHECK-LABEL: transpose_conv2d
+// CHECK-LABEL: transpose_conv2d
func.func @test_transpose_conv2d(%arg0: tensor<1x32x32x8xf32>, %arg1: tensor<16x1x1x8xf32>, %arg2: tensor<16xf32>) -> tensor<1x32x32x16xf32> {
%0 = "tosa.transpose_conv2d"(%arg0, %arg1, %arg2) {out_pad = [0, 0, 0, 0], out_shape = [1, 32, 32, 16], stride = [1, 1]} : (tensor<1x32x32x8xf32>, tensor<16x1x1x8xf32>, tensor<16xf32>) -> tensor<1x32x32x16xf32>
return %0 : tensor<1x32x32x16xf32>
// -----
-// CHECK-LABLE: @test_table_static
+// CHECK-LABEL: @test_table_static
func.func @test_table_static(%arg0 : tensor<4x5xi16>, %arg1 : tensor<513xi16>) -> () {
// CHECK:"tosa.table"(%arg0, %arg1) : (tensor<4x5xi16>, tensor<513xi16>) -> tensor<4x5xi16>
%0 = "tosa.table"(%arg0, %arg1) : (tensor<4x5xi16>, tensor<513xi16>) -> tensor<?x?xi16>
// -----
-// CHECK-LABLE: @test_table_dynamic
+// CHECK-LABEL: @test_table_dynamic
func.func @test_table_dynamic(%arg0 : tensor<4x?xi16>, %arg1 : tensor<513xi16>) -> () {
// CHECK:"tosa.table"(%arg0, %arg1) : (tensor<4x?xi16>, tensor<513xi16>) -> tensor<4x?xi16>
%0 = "tosa.table"(%arg0, %arg1) : (tensor<4x?xi16>, tensor<513xi16>) -> tensor<?x?xi16>
}
// CHECK-LABEL: func @cancel_shape_cast
-// FIXME: PR49590
-// HECK-SAME: %[[A:.*]]: vector<16xf32>
-// HECK: return %[[A]] : vector<16xf32>
+// CHECK-SAME: %[[A:.*]]: vector<16xf32>
+// CHECK: return %[[A]] : vector<16xf32>
func.func @cancel_shape_cast(%arg0: vector<16xf32>) -> vector<16xf32> {
%0 = vector.shape_cast %arg0 : vector<16xf32> to vector<4x4xf32>
%13 = spirv.Ordered %arg0, %arg1 : f32
// CHECK: spirv.Unordered
%14 = spirv.Unordered %arg0, %arg1 : f32
- // CHCK: spirv.IsNan
+ // CHECK: spirv.IsNan
%15 = spirv.IsNan %arg0 : f32
- // CHCK: spirv.IsInf
+ // CHECK: spirv.IsInf
%16 = spirv.IsInf %arg1 : f32
spirv.Return
}
%pi_over_4 = arith.constant 0.78539816339 : f32
call @cos_f32(%pi_over_4) : (f32) -> ()
- //// CHECK: 0
+ // CHECK: 0
%pi_over_2 = arith.constant 1.57079632679 : f32
call @cos_f32(%pi_over_2) : (f32) -> ()
- /// CHECK: -1
+ // CHECK: -1
%pi = arith.constant 3.14159265359 : f32
call @cos_f32(%pi) : (f32) -> ()