From fe963a861851fb5b1463f8d4ab20a1b6138d3efd Mon Sep 17 00:00:00 2001 From: Paul Walker Date: Tue, 4 Apr 2023 12:49:44 +0000 Subject: [PATCH] [NFC][InstCombine] Add tests that show bogus combine of SVE intrinsics when using strictfp. --- .../InstCombine/AArch64/sve-intrinsic-strictfp.ll | 94 ++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-strictfp.ll diff --git a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-strictfp.ll b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-strictfp.ll new file mode 100644 index 0000000..0272ffb --- /dev/null +++ b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-strictfp.ll @@ -0,0 +1,94 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals +; RUN: opt -S -passes=inline,instcombine < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +; TODO: We can only lower to constrained intrinsics when the necessary code +; generation support for scalable vector strict operations exists. +define @replace_fadd_intrinsic_double_strictfp( %a, %b) #0 { +; CHECK: Function Attrs: strictfp +; CHECK-LABEL: @replace_fadd_intrinsic_double_strictfp( +; CHECK-NEXT: [[TMP1:%.*]] = fadd [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: ret [[TMP1]] +; + %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #1 + %2 = tail call @llvm.aarch64.sve.fadd.nxv2f64( %1, %a, %b) #1 + ret %2 +} + +; NOTE: IRBuilder::CreateBinOp doesn't emit constrained operations directly so +; rely on function inlining to showcase the problematic transformation. +define @call_replace_fadd_intrinsic_double_strictfp( %a, %b) #0 { +; CHECK: Function Attrs: strictfp +; CHECK-LABEL: @call_replace_fadd_intrinsic_double_strictfp( +; CHECK-NEXT: [[DOTSTRICT:%.*]] = call @llvm.experimental.constrained.fadd.nxv2f64( [[A:%.*]], [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR3:[0-9]+]] +; CHECK-NEXT: ret [[DOTSTRICT]] +; + %1 = call @replace_fadd_intrinsic_double_strictfp( %a, %b) #1 + ret %1 +} + +; TODO: We can only lower to constrained intrinsics when the necessary code +; generation support for scalable vector strict operations exists. +define @replace_fmul_intrinsic_double_strictfp( %a, %b) #0 { +; CHECK: Function Attrs: strictfp +; CHECK-LABEL: @replace_fmul_intrinsic_double_strictfp( +; CHECK-NEXT: [[TMP1:%.*]] = fmul [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: ret [[TMP1]] +; + %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #1 + %2 = tail call @llvm.aarch64.sve.fmul.nxv2f64( %1, %a, %b) #1 + ret %2 +} + +; NOTE: IRBuilder::CreateBinOp doesn't emit constrained operations directly so +; rely on function inlining to showcase the problematic transformation. +define @call_replace_fmul_intrinsic_double_strictfp( %a, %b) #0 { +; CHECK: Function Attrs: strictfp +; CHECK-LABEL: @call_replace_fmul_intrinsic_double_strictfp( +; CHECK-NEXT: [[DOTSTRICT:%.*]] = call @llvm.experimental.constrained.fmul.nxv2f64( [[A:%.*]], [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR3]] +; CHECK-NEXT: ret [[DOTSTRICT]] +; + %1 = call @replace_fmul_intrinsic_double_strictfp( %a, %b) #1 + ret %1 +} + +; TODO: We can only lower to constrained intrinsics when the necessary code +; generation support for scalable vector strict operations exists. +define @replace_fsub_intrinsic_double_strictfp( %a, %b) #0 { +; CHECK: Function Attrs: strictfp +; CHECK-LABEL: @replace_fsub_intrinsic_double_strictfp( +; CHECK-NEXT: [[TMP1:%.*]] = fsub [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: ret [[TMP1]] +; + %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #1 + %2 = tail call @llvm.aarch64.sve.fsub.nxv2f64( %1, %a, %b) #1 + ret %2 +} + +; NOTE: IRBuilder::CreateBinOp doesn't emit constrained operations directly so +; rely on function inlining to showcase the problematic transformation. +define @call_replace_fsub_intrinsic_double_strictfp( %a, %b) #0 { +; CHECK: Function Attrs: strictfp +; CHECK-LABEL: @call_replace_fsub_intrinsic_double_strictfp( +; CHECK-NEXT: [[DOTSTRICT:%.*]] = call @llvm.experimental.constrained.fsub.nxv2f64( [[A:%.*]], [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR3]] +; CHECK-NEXT: ret [[DOTSTRICT]] +; + %1 = call @replace_fsub_intrinsic_double_strictfp( %a, %b) #1 + ret %1 +} + +declare @llvm.aarch64.sve.fadd.nxv2f64(, , ) +declare @llvm.aarch64.sve.fmul.nxv2f64(, , ) +declare @llvm.aarch64.sve.fsub.nxv2f64(, , ) + +declare @llvm.aarch64.sve.ptrue.nxv2i1(i32) + +attributes #0 = { "target-features"="+sve" strictfp } +attributes #1 = { strictfp } +;. +; CHECK: attributes #[[ATTR0:[0-9]+]] = { strictfp "target-features"="+sve" } +; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) } +; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(inaccessiblemem: readwrite) } +; CHECK: attributes #[[ATTR3]] = { strictfp } +;. -- 2.7.4