From d54b7f059290102b0ff007843ecf0668e833c118 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Thu, 9 Aug 2018 22:40:08 +0000 Subject: [PATCH] ValueTracking: Start enhancing isKnownNeverNaN llvm-svn: 339399 --- llvm/include/llvm/Analysis/ValueTracking.h | 3 +- llvm/lib/Analysis/InstructionSimplify.cpp | 20 +++---- llvm/lib/Analysis/ValueTracking.cpp | 24 +++++++-- .../Transforms/InstCombine/InstCombineCompares.cpp | 4 +- .../test/Transforms/InstCombine/known-never-nan.ll | 61 ++++++++++++++++++++++ 5 files changed, 97 insertions(+), 15 deletions(-) create mode 100644 llvm/test/Transforms/InstCombine/known-never-nan.ll diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h index c1a91a8..99b47fb 100644 --- a/llvm/include/llvm/Analysis/ValueTracking.h +++ b/llvm/include/llvm/Analysis/ValueTracking.h @@ -194,7 +194,8 @@ class Value; /// Return true if the floating-point scalar value is not a NaN or if the /// floating-point vector value has no NaN elements. Return false if a value /// could ever be NaN. - bool isKnownNeverNaN(const Value *V); + bool isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, + unsigned Depth = 0); /// Return true if we can prove that the specified FP value's sign bit is 0. /// diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp index 9d317b4..a64409b 100644 --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -1694,7 +1694,8 @@ static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) { return nullptr; } -static Value *simplifyAndOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) { +static Value *simplifyAndOrOfFCmps(const TargetLibraryInfo *TLI, + FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) { Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1); Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1); if (LHS0->getType() != RHS0->getType()) @@ -1711,8 +1712,8 @@ static Value *simplifyAndOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) { // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X - if ((isKnownNeverNaN(LHS0) && (LHS1 == RHS0 || LHS1 == RHS1)) || - (isKnownNeverNaN(LHS1) && (LHS0 == RHS0 || LHS0 == RHS1))) + if ((isKnownNeverNaN(LHS0, TLI) && (LHS1 == RHS0 || LHS1 == RHS1)) || + (isKnownNeverNaN(LHS1, TLI) && (LHS0 == RHS0 || LHS0 == RHS1))) return RHS; // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y @@ -1723,15 +1724,16 @@ static Value *simplifyAndOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) { // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X - if ((isKnownNeverNaN(RHS0) && (RHS1 == LHS0 || RHS1 == LHS1)) || - (isKnownNeverNaN(RHS1) && (RHS0 == LHS0 || RHS0 == LHS1))) + if ((isKnownNeverNaN(RHS0, TLI) && (RHS1 == LHS0 || RHS1 == LHS1)) || + (isKnownNeverNaN(RHS1, TLI) && (RHS0 == LHS0 || RHS0 == LHS1))) return LHS; } return nullptr; } -static Value *simplifyAndOrOfCmps(Value *Op0, Value *Op1, bool IsAnd) { +static Value *simplifyAndOrOfCmps(const TargetLibraryInfo *TLI, + Value *Op0, Value *Op1, bool IsAnd) { // Look through casts of the 'and' operands to find compares. auto *Cast0 = dyn_cast(Op0); auto *Cast1 = dyn_cast(Op1); @@ -1751,7 +1753,7 @@ static Value *simplifyAndOrOfCmps(Value *Op0, Value *Op1, bool IsAnd) { auto *FCmp0 = dyn_cast(Op0); auto *FCmp1 = dyn_cast(Op1); if (FCmp0 && FCmp1) - V = simplifyAndOrOfFCmps(FCmp0, FCmp1, IsAnd); + V = simplifyAndOrOfFCmps(TLI, FCmp0, FCmp1, IsAnd); if (!V) return nullptr; @@ -1831,7 +1833,7 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, return Op1; } - if (Value *V = simplifyAndOrOfCmps(Op0, Op1, true)) + if (Value *V = simplifyAndOrOfCmps(Q.TLI, Op0, Op1, true)) return V; // Try some generic simplifications for associative operations. @@ -1981,7 +1983,7 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, match(Op0, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B))))) return Op0; - if (Value *V = simplifyAndOrOfCmps(Op0, Op1, false)) + if (Value *V = simplifyAndOrOfCmps(Q.TLI, Op0, Op1, false)) return V; // Try some generic simplifications for associative operations. diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 2aac19a..2484cec 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -2841,10 +2841,10 @@ static bool cannotBeOrderedLessThanZeroImpl(const Value *V, default: break; case Intrinsic::maxnum: - return (isKnownNeverNaN(I->getOperand(0)) && + return (isKnownNeverNaN(I->getOperand(0), TLI) && cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, Depth + 1)) || - (isKnownNeverNaN(I->getOperand(1)) && + (isKnownNeverNaN(I->getOperand(1), TLI) && cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, Depth + 1)); @@ -2909,7 +2909,8 @@ bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); } -bool llvm::isKnownNeverNaN(const Value *V) { +bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, + unsigned Depth) { assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type"); // If we're told that NaNs won't happen, assume they won't. @@ -2924,6 +2925,23 @@ bool llvm::isKnownNeverNaN(const Value *V) { if (auto *CFP = dyn_cast(V)) return !CFP->isNaN(); + if (Depth == MaxDepth) + return false; + + if (const auto *II = dyn_cast(V)) { + switch (II->getIntrinsicID()) { + case Intrinsic::canonicalize: + case Intrinsic::fabs: + case Intrinsic::copysign: + return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1); + case Intrinsic::sqrt: + return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) && + CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI); + default: + return false; + } + } + // Bail out for constant expressions, but try to handle vector constants. if (!V->getType()->isVectorTy() || !isa(V)) return false; diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp index 6de92a4..725e1c8 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -5153,11 +5153,11 @@ Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) { // If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand, // then canonicalize the operand to 0.0. if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) { - if (!match(Op0, m_PosZeroFP()) && isKnownNeverNaN(Op0)) { + if (!match(Op0, m_PosZeroFP()) && isKnownNeverNaN(Op0, &TLI)) { I.setOperand(0, ConstantFP::getNullValue(Op0->getType())); return &I; } - if (!match(Op1, m_PosZeroFP()) && isKnownNeverNaN(Op1)) { + if (!match(Op1, m_PosZeroFP()) && isKnownNeverNaN(Op1, &TLI)) { I.setOperand(1, ConstantFP::getNullValue(Op0->getType())); return &I; } diff --git a/llvm/test/Transforms/InstCombine/known-never-nan.ll b/llvm/test/Transforms/InstCombine/known-never-nan.ll new file mode 100644 index 0000000..b36eeaf --- /dev/null +++ b/llvm/test/Transforms/InstCombine/known-never-nan.ll @@ -0,0 +1,61 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -instcombine %s | FileCheck %s + +define i1 @nnan_fabs_src(double %arg) { +; CHECK-LABEL: @nnan_fabs_src( +; CHECK-NEXT: ret i1 true +; + %nnan = fadd nnan double %arg, 1.0 + %op = call double @llvm.fabs.f64(double %nnan) + %tmp = fcmp ord double %op, %op + ret i1 %tmp +} + +define i1 @nnan_opicalize_src(double %arg) { +; CHECK-LABEL: @nnan_opicalize_src( +; CHECK-NEXT: ret i1 true +; + %nnan = fadd nnan double %arg, 1.0 + %op = call double @llvm.canonicalize.f64(double %nnan) + %tmp = fcmp ord double %op, %op + ret i1 %tmp +} + +define i1 @nnan_copysign_src(double %arg0, double %arg1) { +; CHECK-LABEL: @nnan_copysign_src( +; CHECK-NEXT: ret i1 true +; + %nnan = fadd nnan double %arg0, 1.0 + %op = call double @llvm.copysign.f64(double %nnan, double %arg1) + %tmp = fcmp ord double %op, %op + ret i1 %tmp +} + +define i1 @fabs_sqrt_src(double %arg0, double %arg1) { +; CHECK-LABEL: @fabs_sqrt_src( +; CHECK-NEXT: ret i1 true +; + %nnan = fadd nnan double %arg0, 1.0 + %fabs = call double @llvm.fabs.f64(double %nnan) + %op = call double @llvm.sqrt.f64(double %fabs) + %tmp = fcmp ord double %op, %op + ret i1 %tmp +} + +define i1 @fabs_sqrt_src_maybe_nan(double %arg0, double %arg1) { +; CHECK-LABEL: @fabs_sqrt_src_maybe_nan( +; CHECK-NEXT: [[FABS:%.*]] = call double @llvm.fabs.f64(double [[ARG0:%.*]]) +; CHECK-NEXT: [[OP:%.*]] = call double @llvm.sqrt.f64(double [[FABS]]) +; CHECK-NEXT: [[TMP:%.*]] = fcmp ord double [[OP]], 0.000000e+00 +; CHECK-NEXT: ret i1 [[TMP]] +; + %fabs = call double @llvm.fabs.f64(double %arg0) + %op = call double @llvm.sqrt.f64(double %fabs) + %tmp = fcmp ord double %op, %op + ret i1 %tmp +} + +declare double @llvm.sqrt.f64(double) +declare double @llvm.fabs.f64(double) +declare double @llvm.canonicalize.f64(double) +declare double @llvm.copysign.f64(double, double) -- 2.7.4