From bdf6ba2d30a01f3b01d499558a0753a3e9c4b019 Mon Sep 17 00:00:00 2001 From: "Kevin P. Neal" Date: Tue, 12 Oct 2021 13:42:58 -0400 Subject: [PATCH] [FPEnv][InstSimplify] Precommit tests: Enable more folds for constrained fsub Precommit tests for D107285 as requested. TODO notes left at individual functions also as requested. --- llvm/test/Transforms/InstSimplify/strictfp-fsub.ll | 705 +++++++++++++++++++++ 1 file changed, 705 insertions(+) create mode 100644 llvm/test/Transforms/InstSimplify/strictfp-fsub.ll diff --git a/llvm/test/Transforms/InstSimplify/strictfp-fsub.ll b/llvm/test/Transforms/InstSimplify/strictfp-fsub.ll new file mode 100644 index 0000000..5db5310 --- /dev/null +++ b/llvm/test/Transforms/InstSimplify/strictfp-fsub.ll @@ -0,0 +1,705 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instsimplify -S | FileCheck %s + +; +; constrained fsub +; + +; +; fsub X, +0 ==> X +; + +define float @fsub_x_p0_defaultenv(float %a) #0 { +; CHECK-LABEL: @fsub_x_p0_defaultenv( +; CHECK-NEXT: ret float [[A:%.*]] +; + %ret = call float @llvm.experimental.constrained.fsub.f32(float %a, float 0.0, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + ret float %ret +} + +; Missing nnan: must not fire. +define float @fsub_x_p0_ebmaytrap(float %a) #0 { +; CHECK-LABEL: @fsub_x_p0_ebmaytrap( +; CHECK-NEXT: [[RET:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[A:%.*]], float 0.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0:[0-9]+]] +; CHECK-NEXT: ret float [[RET]] +; + %ret = call float @llvm.experimental.constrained.fsub.f32(float %a, float 0.0, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %ret +} + +; TODO: This will fold if we allow non-default floating point environments. +define float @fsub_nnan_x_p0_ebmaytrap(float %a) #0 { +; CHECK-LABEL: @fsub_nnan_x_p0_ebmaytrap( +; CHECK-NEXT: [[RET:%.*]] = call nnan float @llvm.experimental.constrained.fsub.f32(float [[A:%.*]], float 0.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %ret = call nnan float @llvm.experimental.constrained.fsub.f32(float %a, float 0.0, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %ret +} + +; Missing nnan: must not fire. +define float @fsub_x_p0_ebstrict(float %a) #0 { +; CHECK-LABEL: @fsub_x_p0_ebstrict( +; CHECK-NEXT: [[RET:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[A:%.*]], float 0.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %ret = call float @llvm.experimental.constrained.fsub.f32(float %a, float 0.0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +; TODO: This will fold if we allow non-default floating point environments. +; TODO: The instruction is expected to remain, but the result isn't used. +define float @fsub_nnan_x_p0_ebstrict(float %a) #0 { +; CHECK-LABEL: @fsub_nnan_x_p0_ebstrict( +; CHECK-NEXT: [[RET:%.*]] = call nnan float @llvm.experimental.constrained.fsub.f32(float [[A:%.*]], float 0.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %ret = call nnan float @llvm.experimental.constrained.fsub.f32(float %a, float 0.0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +; Test with a fast math flag set but that flag is not "nnan". +define float @fsub_ninf_x_p0_ebstrict(float %a) #0 { +; CHECK-LABEL: @fsub_ninf_x_p0_ebstrict( +; CHECK-NEXT: [[RET:%.*]] = call ninf float @llvm.experimental.constrained.fsub.f32(float [[A:%.*]], float 0.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %ret = call ninf float @llvm.experimental.constrained.fsub.f32(float %a, float 0.0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +; +; fsub X, -0 ==> X, when we know X is not -0 +; (fast math flag: nsz) +; + +define float @fold_fsub_nsz_x_n0_defaultenv(float %a) #0 { +; CHECK-LABEL: @fold_fsub_nsz_x_n0_defaultenv( +; CHECK-NEXT: ret float [[A:%.*]] +; + %sub = call nsz float @llvm.experimental.constrained.fsub.f32(float %a, float -0.0, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + ret float %sub +} + +; Missing nnan: must not fire. +define float @fold_fsub_nsz_x_n0_ebmaytrap(float %a) #0 { +; CHECK-LABEL: @fold_fsub_nsz_x_n0_ebmaytrap( +; CHECK-NEXT: [[SUB:%.*]] = call nsz float @llvm.experimental.constrained.fsub.f32(float [[A:%.*]], float -0.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[SUB]] +; + %sub = call nsz float @llvm.experimental.constrained.fsub.f32(float %a, float -0.0, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %sub +} + +; TODO: This will fold if we allow non-default floating point environments. +define float @fold_fsub_nnan_nsz_x_n0_ebmaytrap(float %a) #0 { +; CHECK-LABEL: @fold_fsub_nnan_nsz_x_n0_ebmaytrap( +; CHECK-NEXT: [[SUB:%.*]] = call nnan nsz float @llvm.experimental.constrained.fsub.f32(float [[A:%.*]], float -0.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[SUB]] +; + %sub = call nnan nsz float @llvm.experimental.constrained.fsub.f32(float %a, float -0.0, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %sub +} + +; Missing nnan: must not fire. +define float @fold_fsub_nsz_x_n0_ebstrict(float %a) #0 { +; CHECK-LABEL: @fold_fsub_nsz_x_n0_ebstrict( +; CHECK-NEXT: [[SUB:%.*]] = call nsz float @llvm.experimental.constrained.fsub.f32(float [[A:%.*]], float -0.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[SUB]] +; + %sub = call nsz float @llvm.experimental.constrained.fsub.f32(float %a, float -0.0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %sub +} + +; TODO: This will fold if we allow non-default floating point environments. +; TODO: The instruction is expected to remain, but the result isn't used. +define float @fold_fsub_nsz_nnan_x_n0_ebstrict(float %a) #0 { +; CHECK-LABEL: @fold_fsub_nsz_nnan_x_n0_ebstrict( +; CHECK-NEXT: [[SUB:%.*]] = call nnan nsz float @llvm.experimental.constrained.fsub.f32(float [[A:%.*]], float -0.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[SUB]] +; + %sub = call nsz nnan float @llvm.experimental.constrained.fsub.f32(float %a, float -0.0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %sub +} + +; +; fsub X, -0 ==> X, when we know X is not -0 +; (No "nsz" flags) +; + +define float @fold_fsub_fabs_x_n0_defaultenv(float %a) #0 { +; CHECK-LABEL: @fold_fsub_fabs_x_n0_defaultenv( +; CHECK-NEXT: [[ABSA:%.*]] = call float @llvm.fabs.f32(float [[A:%.*]]) +; CHECK-NEXT: ret float [[ABSA]] +; + %absa = call float @llvm.fabs.f32(float %a) + %sub = call float @llvm.experimental.constrained.fsub.f32(float %absa, float -0.0, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + ret float %sub +} + +; Missing nnan: must not fire. +define float @fold_fsub_fabs_x_n0_ebmaytrap(float %a) #0 { +; CHECK-LABEL: @fold_fsub_fabs_x_n0_ebmaytrap( +; CHECK-NEXT: [[ABSA:%.*]] = call float @llvm.fabs.f32(float [[A:%.*]]) +; CHECK-NEXT: [[SUB:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[ABSA]], float -0.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[SUB]] +; + %absa = call float @llvm.fabs.f32(float %a) + %sub = call float @llvm.experimental.constrained.fsub.f32(float %absa, float -0.0, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %sub +} + +; TODO: This will fold if we allow non-default floating point environments. +define float @fold_fsub_fabs_nnan_x_n0_ebmaytrap(float %a) #0 { +; CHECK-LABEL: @fold_fsub_fabs_nnan_x_n0_ebmaytrap( +; CHECK-NEXT: [[ABSA:%.*]] = call float @llvm.fabs.f32(float [[A:%.*]]) +; CHECK-NEXT: [[SUB:%.*]] = call nnan float @llvm.experimental.constrained.fsub.f32(float [[ABSA]], float -0.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[SUB]] +; + %absa = call float @llvm.fabs.f32(float %a) + %sub = call nnan float @llvm.experimental.constrained.fsub.f32(float %absa, float -0.0, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %sub +} + +; Missing nnan: must not fire. +define float @fold_fsub_fabs_x_n0_ebstrict(float %a) #0 { +; CHECK-LABEL: @fold_fsub_fabs_x_n0_ebstrict( +; CHECK-NEXT: [[ABSA:%.*]] = call float @llvm.fabs.f32(float [[A:%.*]]) +; CHECK-NEXT: [[SUB:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[ABSA]], float -0.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[SUB]] +; + %absa = call float @llvm.fabs.f32(float %a) + %sub = call float @llvm.experimental.constrained.fsub.f32(float %absa, float -0.0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %sub +} + +; TODO: This will fold if we allow non-default floating point environments. +; TODO: The instruction is expected to remain, but the result isn't used. +define float @fold_fsub_fabs_nnan_x_n0_ebstrict(float %a) #0 { +; CHECK-LABEL: @fold_fsub_fabs_nnan_x_n0_ebstrict( +; CHECK-NEXT: [[ABSA:%.*]] = call float @llvm.fabs.f32(float [[A:%.*]]) +; CHECK-NEXT: [[SUB:%.*]] = call nnan float @llvm.experimental.constrained.fsub.f32(float [[ABSA]], float -0.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[SUB]] +; + %absa = call float @llvm.fabs.f32(float %a) + %sub = call nnan float @llvm.experimental.constrained.fsub.f32(float %absa, float -0.0, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %sub +} + +; TODO: This can't fire currently due to missing constrained support: +define float @fold_fsub_sitofp_x_n0_defaultenv(i32 %a) #0 { +; CHECK-LABEL: @fold_fsub_sitofp_x_n0_defaultenv( +; CHECK-NEXT: [[FPA:%.*]] = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: [[SUB:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[FPA]], float -0.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: ret float [[SUB]] +; + %fpa = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + %sub = call float @llvm.experimental.constrained.fsub.f32(float %fpa, float -0.0, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + ret float %sub +} + +; +; fsub -0.0, (fneg X) ==> X +; + +define float @fsub_fneg_n0_fnX_defaultenv(float %a) #0 { +; CHECK-LABEL: @fsub_fneg_n0_fnX_defaultenv( +; CHECK-NEXT: ret float [[A:%.*]] +; + %nega = fneg float %a + %ret = call float @llvm.experimental.constrained.fsub.f32(float -0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + ret float %ret +} + +; Missing nnan: must not fire. +define float @fsub_fneg_n0_fnX_ebmaytrap(float %a) #0 { +; CHECK-LABEL: @fsub_fneg_n0_fnX_ebmaytrap( +; CHECK-NEXT: [[NEGA:%.*]] = fneg float [[A:%.*]] +; CHECK-NEXT: [[RET:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float -0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = fneg float %a + %ret = call float @llvm.experimental.constrained.fsub.f32(float -0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %ret +} + +; TODO: This will fold if we allow non-default floating point environments. +define float @fsub_fneg_nnan_n0_fnX_ebmaytrap(float %a) #0 { +; CHECK-LABEL: @fsub_fneg_nnan_n0_fnX_ebmaytrap( +; CHECK-NEXT: [[NEGA:%.*]] = fneg float [[A:%.*]] +; CHECK-NEXT: [[RET:%.*]] = call nnan float @llvm.experimental.constrained.fsub.f32(float -0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = fneg float %a + %ret = call nnan float @llvm.experimental.constrained.fsub.f32(float -0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %ret +} + +; Missing nnan: must not fire. +define float @fsub_fneg_n0_fnX_ebstrict(float %a) #0 { +; CHECK-LABEL: @fsub_fneg_n0_fnX_ebstrict( +; CHECK-NEXT: [[NEGA:%.*]] = fneg float [[A:%.*]] +; CHECK-NEXT: [[RET:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float -0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = fneg float %a + %ret = call float @llvm.experimental.constrained.fsub.f32(float -0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +; TODO: This will fold if we allow non-default floating point environments. +; TODO: The instruction is expected to remain, but the result isn't used. +define float @fsub_fneg_nnan_n0_fnX_ebstrict(float %a) #0 { +; CHECK-LABEL: @fsub_fneg_nnan_n0_fnX_ebstrict( +; CHECK-NEXT: [[NEGA:%.*]] = fneg float [[A:%.*]] +; CHECK-NEXT: [[RET:%.*]] = call nnan float @llvm.experimental.constrained.fsub.f32(float -0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = fneg float %a + %ret = call nnan float @llvm.experimental.constrained.fsub.f32(float -0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +; +; fsub -0.0, (fsub -0.0, X) ==> X +; + +; TODO: This won't fire without m_FNeg() knowing the constrained intrinsics. +define float @fsub_fsub_n0_fnX_defaultenv(float %a) #0 { +; CHECK-LABEL: @fsub_fsub_n0_fnX_defaultenv( +; CHECK-NEXT: [[NEGA:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float -0.000000e+00, float [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float -0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = call float @llvm.experimental.constrained.fsub.f32(float -0.0, float %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + %ret = call float @llvm.experimental.constrained.fsub.f32(float -0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + ret float %ret +} + +; Missing nnan: must not fire. +define float @fsub_fsub_n0_fnX_ebmaytrap(float %a) #0 { +; CHECK-LABEL: @fsub_fsub_n0_fnX_ebmaytrap( +; CHECK-NEXT: [[NEGA:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float -0.000000e+00, float [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float -0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = call float @llvm.experimental.constrained.fsub.f32(float -0.0, float %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + %ret = call float @llvm.experimental.constrained.fsub.f32(float -0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %ret +} + +; TODO: This won't fire without m_FNeg() knowing the constrained intrinsics. +define float @fsub_fsub_nnan_n0_fnX_ebmaytrap(float %a) #0 { +; CHECK-LABEL: @fsub_fsub_nnan_n0_fnX_ebmaytrap( +; CHECK-NEXT: [[NEGA:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float -0.000000e+00, float [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call nnan float @llvm.experimental.constrained.fsub.f32(float -0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = call float @llvm.experimental.constrained.fsub.f32(float -0.0, float %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + %ret = call nnan float @llvm.experimental.constrained.fsub.f32(float -0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %ret +} + +; Missing nnan: must not fire. +define float @fsub_fsub_n0_fnX_ebstrict(float %a) #0 { +; CHECK-LABEL: @fsub_fsub_n0_fnX_ebstrict( +; CHECK-NEXT: [[NEGA:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float -0.000000e+00, float [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float -0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = call float @llvm.experimental.constrained.fsub.f32(float -0.0, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + %ret = call float @llvm.experimental.constrained.fsub.f32(float -0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +; TODO: This won't fire without m_FNeg() knowing the constrained intrinsics. +define float @fsub_fsub_nnan_n0_fnX_ebstrict(float %a) #0 { +; CHECK-LABEL: @fsub_fsub_nnan_n0_fnX_ebstrict( +; CHECK-NEXT: [[NEGA:%.*]] = call nnan float @llvm.experimental.constrained.fsub.f32(float -0.000000e+00, float [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call nnan float @llvm.experimental.constrained.fsub.f32(float -0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = call nnan float @llvm.experimental.constrained.fsub.f32(float -0.0, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + %ret = call nnan float @llvm.experimental.constrained.fsub.f32(float -0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +; +; fsub 0.0, (fneg X) ==> X if signed zeros are ignored. +; + +define float @fsub_fneg_nsz_p0_fnX_defaultenv(float %a) #0 { +; CHECK-LABEL: @fsub_fneg_nsz_p0_fnX_defaultenv( +; CHECK-NEXT: ret float [[A:%.*]] +; + %nega = fneg float %a + %ret = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + ret float %ret +} + +; Missing nnan: must not fire. +define float @fsub_fneg_nsz_p0_fnX_ebmaytrap(float %a) #0 { +; CHECK-LABEL: @fsub_fneg_nsz_p0_fnX_ebmaytrap( +; CHECK-NEXT: [[NEGA:%.*]] = fneg float [[A:%.*]] +; CHECK-NEXT: [[RET:%.*]] = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = fneg float %a + %ret = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %ret +} + +; TODO: This will fold if we allow non-default floating point environments. +define float @fsub_fneg_nsz_nnan_p0_fnX_ebmaytrap(float %a) #0 { +; CHECK-LABEL: @fsub_fneg_nsz_nnan_p0_fnX_ebmaytrap( +; CHECK-NEXT: [[NEGA:%.*]] = fneg float [[A:%.*]] +; CHECK-NEXT: [[RET:%.*]] = call nnan nsz float @llvm.experimental.constrained.fsub.f32(float 0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = fneg float %a + %ret = call nnan nsz float @llvm.experimental.constrained.fsub.f32(float 0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %ret +} + +; Missing nnan: must not fire. +define float @fsub_fneg_nsz_p0_fnX_ebstrict(float %a) #0 { +; CHECK-LABEL: @fsub_fneg_nsz_p0_fnX_ebstrict( +; CHECK-NEXT: [[NEGA:%.*]] = fneg float [[A:%.*]] +; CHECK-NEXT: [[RET:%.*]] = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = fneg float %a + %ret = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +; TODO: This will fold if we allow non-default floating point environments. +; TODO: The instruction is expected to remain, but the result isn't used. +define float @fsub_fneg_nnan_nsz_p0_fnX_ebstrict(float %a) #0 { +; CHECK-LABEL: @fsub_fneg_nnan_nsz_p0_fnX_ebstrict( +; CHECK-NEXT: [[NEGA:%.*]] = fneg float [[A:%.*]] +; CHECK-NEXT: [[RET:%.*]] = call nnan nsz float @llvm.experimental.constrained.fsub.f32(float 0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = fneg float %a + %ret = call nnan nsz float @llvm.experimental.constrained.fsub.f32(float 0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +; +; fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored. +; + +; TODO: Need constrained intrinsic support in m_FNeg() and m_FSub to fire. +define float @fsub_fsub_p0_nsz_fnX_defaultenv(float %a) #0 { +; CHECK-LABEL: @fsub_fsub_p0_nsz_fnX_defaultenv( +; CHECK-NEXT: [[NEGA:%.*]] = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.000000e+00, float [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.0, float %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + %ret = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + ret float %ret +} + +; Missing nnan: must not fire. +define float @fsub_fsub_nsz_p0_fnX_ebmaytrap(float %a) #0 { +; CHECK-LABEL: @fsub_fsub_nsz_p0_fnX_ebmaytrap( +; CHECK-NEXT: [[NEGA:%.*]] = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.000000e+00, float [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.0, float %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + %ret = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %ret +} + +; TODO: Need constrained intrinsic support in m_FNeg() and m_FSub to fire. +define float @fsub_fsub_nnan_nsz_p0_fnX_ebmaytrap(float %a) #0 { +; CHECK-LABEL: @fsub_fsub_nnan_nsz_p0_fnX_ebmaytrap( +; CHECK-NEXT: [[NEGA:%.*]] = call nnan nsz float @llvm.experimental.constrained.fsub.f32(float 0.000000e+00, float [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call nnan nsz float @llvm.experimental.constrained.fsub.f32(float 0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = call nnan nsz float @llvm.experimental.constrained.fsub.f32(float 0.0, float %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + %ret = call nnan nsz float @llvm.experimental.constrained.fsub.f32(float 0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %ret +} + +; Missing nnan: must not fire. +define float @fsub_fsub_nsz_p0_fnX_ebstrict(float %a) #0 { +; CHECK-LABEL: @fsub_fsub_nsz_p0_fnX_ebstrict( +; CHECK-NEXT: [[NEGA:%.*]] = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.000000e+00, float [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.0, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + %ret = call nsz float @llvm.experimental.constrained.fsub.f32(float 0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +; TODO: Need constrained intrinsic support in m_FNeg() and m_FSub to fire. +define float @fsub_fsub_nnan_nsz_p0_fnX_ebstrict(float %a) #0 { +; CHECK-LABEL: @fsub_fsub_nnan_nsz_p0_fnX_ebstrict( +; CHECK-NEXT: [[NEGA:%.*]] = call nnan nsz float @llvm.experimental.constrained.fsub.f32(float 0.000000e+00, float [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call nnan nsz float @llvm.experimental.constrained.fsub.f32(float 0.000000e+00, float [[NEGA]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %nega = call nnan nsz float @llvm.experimental.constrained.fsub.f32(float 0.0, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + %ret = call nnan nsz float @llvm.experimental.constrained.fsub.f32(float 0.0, float %nega, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +; +; fsub nnan x, x ==> 0.0 +; + +; Missing nnan: must not fire. +define float @fsub_x_x_defaultenv(float %a) #0 { +; CHECK-LABEL: @fsub_x_x_defaultenv( +; CHECK-NEXT: [[RET:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[A:%.*]], float [[A]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %ret = call float @llvm.experimental.constrained.fsub.f32(float %a, float %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + ret float %ret +} + +define float @fsub_nnan_x_x_defaultenv(float %a) #0 { +; CHECK-LABEL: @fsub_nnan_x_x_defaultenv( +; CHECK-NEXT: ret float 0.000000e+00 +; + %ret = call nnan float @llvm.experimental.constrained.fsub.f32(float %a, float %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + ret float %ret +} + +; Missing nnan: must not fire. +define float @fsub_x_x_ebmaytrap(float %a) #0 { +; CHECK-LABEL: @fsub_x_x_ebmaytrap( +; CHECK-NEXT: [[RET:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[A:%.*]], float [[A]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %ret = call float @llvm.experimental.constrained.fsub.f32(float %a, float %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %ret +} + +; TODO: This will fold if we allow non-default floating point environments. +define float @fsub_nnan_x_x_ebmaytrap(float %a) #0 { +; CHECK-LABEL: @fsub_nnan_x_x_ebmaytrap( +; CHECK-NEXT: [[RET:%.*]] = call nnan float @llvm.experimental.constrained.fsub.f32(float [[A:%.*]], float [[A]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %ret = call nnan float @llvm.experimental.constrained.fsub.f32(float %a, float %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %ret +} + +; Missing nnan: must not fire. +define float @fsub_x_x_ebstrict(float %a) #0 { +; CHECK-LABEL: @fsub_x_x_ebstrict( +; CHECK-NEXT: [[RET:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[A:%.*]], float [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %ret = call float @llvm.experimental.constrained.fsub.f32(float %a, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +; TODO: This will fold if we allow non-default floating point environments. +; TODO: The instruction is expected to remain, but the result isn't used. +define float @fsub_nnan_x_x_ebstrict(float %a) #0 { +; CHECK-LABEL: @fsub_nnan_x_x_ebstrict( +; CHECK-NEXT: [[RET:%.*]] = call nnan float @llvm.experimental.constrained.fsub.f32(float [[A:%.*]], float [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %ret = call nnan float @llvm.experimental.constrained.fsub.f32(float %a, float %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +; +; Y - (Y - X) --> X +; + +; Missing nsz and reassoc: must not fire +define float @fsub_fsub_y_x_x_defaultenv(float %x, float %y) #0 { +; CHECK-LABEL: @fsub_fsub_y_x_x_defaultenv( +; CHECK-NEXT: [[INNER:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[Y:%.*]], float [[X:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[Y]], float [[INNER]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %inner = call float @llvm.experimental.constrained.fsub.f32(float %y, float %x, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + %ret = call float @llvm.experimental.constrained.fsub.f32(float %y, float %inner, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + ret float %ret +} + +; TODO: Need constrained intrinsic support in m_c_FAdd() and m_FSub to fire. +define float @fsub_fsub_fmf_y_x_x_defaultenv(float %x, float %y) #0 { +; CHECK-LABEL: @fsub_fsub_fmf_y_x_x_defaultenv( +; CHECK-NEXT: [[INNER:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[Y:%.*]], float [[X:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call reassoc nsz float @llvm.experimental.constrained.fsub.f32(float [[Y]], float [[INNER]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %inner = call float @llvm.experimental.constrained.fsub.f32(float %y, float %x, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + %ret = call nsz reassoc float @llvm.experimental.constrained.fsub.f32(float %y, float %inner, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + ret float %ret +} + +; TODO: Consider how alternate rounding modes can break these transforms. + +; The "fpexcept.maytrap" instruction must _not_ be folded into the +; "fpexcept.ignore" instruction. This must not fire. +define float @fsub_fsub_fmf_y_x_x_ebmaytrap_defaultenv(float %x, float %y) #0 { +; CHECK-LABEL: @fsub_fsub_fmf_y_x_x_ebmaytrap_defaultenv( +; CHECK-NEXT: [[INNER:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[Y:%.*]], float [[X:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call reassoc nsz float @llvm.experimental.constrained.fsub.f32(float [[Y]], float [[INNER]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %inner = call float @llvm.experimental.constrained.fsub.f32(float %y, float %x, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + %ret = call nsz reassoc float @llvm.experimental.constrained.fsub.f32(float %y, float %inner, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + ret float %ret +} + +; Missing nsz and reassoc: must not fire +define float @fsub_fsub_y_x_x_ebmaytrap(float %x, float %y) #0 { +; CHECK-LABEL: @fsub_fsub_y_x_x_ebmaytrap( +; CHECK-NEXT: [[INNER:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[Y:%.*]], float [[X:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[Y]], float [[INNER]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %inner = call float @llvm.experimental.constrained.fsub.f32(float %y, float %x, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + %ret = call float @llvm.experimental.constrained.fsub.f32(float %y, float %inner, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %ret +} + +; TODO: Need constrained intrinsic support in m_c_FAdd() and m_FSub to fire. +define float @fsub_fsub_fmf_y_x_x_ebmaytrap(float %x, float %y) #0 { +; CHECK-LABEL: @fsub_fsub_fmf_y_x_x_ebmaytrap( +; CHECK-NEXT: [[INNER:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[Y:%.*]], float [[X:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call reassoc nsz float @llvm.experimental.constrained.fsub.f32(float [[Y]], float [[INNER]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %inner = call float @llvm.experimental.constrained.fsub.f32(float %y, float %x, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + %ret = call nsz reassoc float @llvm.experimental.constrained.fsub.f32(float %y, float %inner, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %ret +} + +; Missing nsz and reassoc: must not fire +define float @fsub_fsub_y_x_x_ebstrict(float %x, float %y) #0 { +; CHECK-LABEL: @fsub_fsub_y_x_x_ebstrict( +; CHECK-NEXT: [[INNER:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[Y:%.*]], float [[X:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[Y]], float [[INNER]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %inner = call float @llvm.experimental.constrained.fsub.f32(float %y, float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + %ret = call float @llvm.experimental.constrained.fsub.f32(float %y, float %inner, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +; TODO: Need constrained intrinsic support in m_c_FAdd() and m_FSub to fire. +define float @fsub_fsub_fmf_y_x_x_ebstrict(float %x, float %y) #0 { +; CHECK-LABEL: @fsub_fsub_fmf_y_x_x_ebstrict( +; CHECK-NEXT: [[INNER:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[Y:%.*]], float [[X:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call reassoc nsz float @llvm.experimental.constrained.fsub.f32(float [[Y]], float [[INNER]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %inner = call float @llvm.experimental.constrained.fsub.f32(float %y, float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + %ret = call nsz reassoc float @llvm.experimental.constrained.fsub.f32(float %y, float %inner, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +; +; (X + Y) - Y --> X +; TODO: Missing IR matcher support means these won't fire. +; + +; Missing nsz and reassoc: must not fire +define float @fadd_fsub_x_y_y_defaultenv(float %x, float %y) #0 { +; CHECK-LABEL: @fadd_fsub_x_y_y_defaultenv( +; CHECK-NEXT: [[INNER:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[X:%.*]], float [[Y:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[INNER]], float [[Y]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %inner = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + %ret = call float @llvm.experimental.constrained.fsub.f32(float %inner, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + ret float %ret +} + +; TODO: Need constrained intrinsic support in m_c_FAdd() and m_FSub to fire. +define float @fadd_fsub_fmf_x_y_y_defaultenv(float %x, float %y) #0 { +; CHECK-LABEL: @fadd_fsub_fmf_x_y_y_defaultenv( +; CHECK-NEXT: [[INNER:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[X:%.*]], float [[Y:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call reassoc nsz float @llvm.experimental.constrained.fsub.f32(float [[INNER]], float [[Y]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %inner = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + %ret = call nsz reassoc float @llvm.experimental.constrained.fsub.f32(float %inner, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + ret float %ret +} + +; The "fpexcept.maytrap" instruction must _not_ be folded into the +; "fpexcept.ignore" instruction. This must not fire. +define float @fadd_fsub_fmf_x_y_y_ebmaytrap_defaultenv(float %x, float %y) #0 { +; CHECK-LABEL: @fadd_fsub_fmf_x_y_y_ebmaytrap_defaultenv( +; CHECK-NEXT: [[INNER:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[X:%.*]], float [[Y:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call reassoc nsz float @llvm.experimental.constrained.fsub.f32(float [[INNER]], float [[Y]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %inner = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + %ret = call nsz reassoc float @llvm.experimental.constrained.fsub.f32(float %inner, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0 + ret float %ret +} + +; Missing nsz and reassoc: must not fire +define float @fadd_fsub_x_y_y_ebmaytrap(float %x, float %y) #0 { +; CHECK-LABEL: @fadd_fsub_x_y_y_ebmaytrap( +; CHECK-NEXT: [[INNER:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[X:%.*]], float [[Y:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[INNER]], float [[Y]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %inner = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + %ret = call float @llvm.experimental.constrained.fsub.f32(float %inner, float %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %ret +} + +; TODO: Need constrained intrinsic support in m_c_FAdd() and m_FSub to fire. +define float @fadd_fsub_fmf_x_y_y_ebmaytrap(float %x, float %y) #0 { +; CHECK-LABEL: @fadd_fsub_fmf_x_y_y_ebmaytrap( +; CHECK-NEXT: [[INNER:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[X:%.*]], float [[Y:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call reassoc nsz float @llvm.experimental.constrained.fsub.f32(float [[INNER]], float [[Y]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %inner = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + %ret = call nsz reassoc float @llvm.experimental.constrained.fsub.f32(float %inner, float %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0 + ret float %ret +} + +; Missing nsz and reassoc: must not fire +define float @fadd_fsub_x_y_y_ebstrict(float %x, float %y) #0 { +; CHECK-LABEL: @fadd_fsub_x_y_y_ebstrict( +; CHECK-NEXT: [[INNER:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[X:%.*]], float [[Y:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[INNER]], float [[Y]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %inner = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + %ret = call float @llvm.experimental.constrained.fsub.f32(float %inner, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +; TODO: Need constrained intrinsic support in m_c_FAdd() and m_FSub to fire. +define float @fadd_fsub_fmf_x_y_y_ebstrict(float %x, float %y) #0 { +; CHECK-LABEL: @fadd_fsub_fmf_x_y_y_ebstrict( +; CHECK-NEXT: [[INNER:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[X:%.*]], float [[Y:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: [[RET:%.*]] = call reassoc nsz float @llvm.experimental.constrained.fsub.f32(float [[INNER]], float [[Y]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]] +; CHECK-NEXT: ret float [[RET]] +; + %inner = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + %ret = call nsz reassoc float @llvm.experimental.constrained.fsub.f32(float %inner, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0 + ret float %ret +} + +declare float @llvm.fabs.f32(float) +declare <2 x float> @llvm.fabs.v2f32(<2 x float>) + +declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata) #0 +declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata) #0 + +declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata) #0 + +attributes #0 = { strictfp } -- 2.7.4