From c1247f0e74bff00ab9a896a8132318916f3e84a7 Mon Sep 17 00:00:00 2001 From: Amara Emerson Date: Wed, 7 Oct 2020 11:19:54 -0700 Subject: [PATCH] [mlir] Fix build after 322d0afd875df66b36e4810a2b95c20a8f22ab9b due to change in intrinsic overloads. I'd forgottent to run the mlir tests after removing the scalar input overload on the fadd/fmul reductions. This is a quick fix for the mlir bot. --- mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td | 4 +--- mlir/test/Target/llvmir-intrinsics.mlir | 8 ++++---- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td index 9e9237e..aa7cd46 100644 --- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td +++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td @@ -308,9 +308,7 @@ class LLVM_VectorReductionAcc llvm::Function *fn = llvm::Intrinsic::getDeclaration( module, llvm::Intrinsic::vector_reduce_}] # mnem # [{, - { }] # StrJoin.lst, - ListIntSubst.lst)>.result # [{ + { }] # StrJoin.lst>.result # [{ }); auto operands = lookupValues(opInst.getOperands()); llvm::FastMathFlags origFM = builder.getFastMathFlags(); diff --git a/mlir/test/Target/llvmir-intrinsics.mlir b/mlir/test/Target/llvmir-intrinsics.mlir index 7ab440a..ef1ed5a 100644 --- a/mlir/test/Target/llvmir-intrinsics.mlir +++ b/mlir/test/Target/llvmir-intrinsics.mlir @@ -202,13 +202,13 @@ llvm.func @vector_reductions(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>, %a "llvm.intr.vector.reduce.umax"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32 // CHECK: call i32 @llvm.vector.reduce.umin.v8i32 "llvm.intr.vector.reduce.umin"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32 - // CHECK: call float @llvm.vector.reduce.fadd.f32.v8f32 + // CHECK: call float @llvm.vector.reduce.fadd.v8f32 "llvm.intr.vector.reduce.fadd"(%arg0, %arg1) : (!llvm.float, !llvm.vec<8 x float>) -> !llvm.float - // CHECK: call float @llvm.vector.reduce.fmul.f32.v8f32 + // CHECK: call float @llvm.vector.reduce.fmul.v8f32 "llvm.intr.vector.reduce.fmul"(%arg0, %arg1) : (!llvm.float, !llvm.vec<8 x float>) -> !llvm.float - // CHECK: call reassoc float @llvm.vector.reduce.fadd.f32.v8f32 + // CHECK: call reassoc float @llvm.vector.reduce.fadd.v8f32 "llvm.intr.vector.reduce.fadd"(%arg0, %arg1) {reassoc = true} : (!llvm.float, !llvm.vec<8 x float>) -> !llvm.float - // CHECK: call reassoc float @llvm.vector.reduce.fmul.f32.v8f32 + // CHECK: call reassoc float @llvm.vector.reduce.fmul.v8f32 "llvm.intr.vector.reduce.fmul"(%arg0, %arg1) {reassoc = true} : (!llvm.float, !llvm.vec<8 x float>) -> !llvm.float // CHECK: call i32 @llvm.vector.reduce.xor.v8i32 "llvm.intr.vector.reduce.xor"(%arg2) : (!llvm.vec<8 x i32>) -> !llvm.i32 -- 2.7.4