From: Philip Reames Date: Sat, 5 Dec 2020 23:57:09 +0000 (-0800) Subject: Add recursive decomposition reasoning to isKnownNonEqual X-Git-Tag: llvmorg-13-init~4257 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=8f076291be41467560ebf73738561225d2b67206;p=platform%2Fupstream%2Fllvm.git Add recursive decomposition reasoning to isKnownNonEqual The basic idea is that by looking through operand instructions which don't change the equality result that we can push the existing known bits comparison down past instructions which would obscure them. We have analogous handling in InstSimplify for most - though weirdly not all - of these cases starting from an icmp root. It's a bit unfortunate to duplicate logic, but since my actual goal is to extend BasicAA, the icmp logic doesn't help. (And just makes it hard to test here.) The BasicAA change will be posted separately for review. Differential Revision: https://reviews.llvm.org/D92698 --- diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 32e0ca3..a1bb6e2 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -350,13 +350,14 @@ bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, return Known.isNegative(); } -static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q); +static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, + const Query &Q); bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) { - return ::isKnownNonEqual(V1, V2, + return ::isKnownNonEqual(V1, V2, 0, Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT, UseInstrInfo, /*ORE=*/nullptr)); } @@ -2486,7 +2487,8 @@ bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) { } /// Return true if V2 == V1 + X, where X is known non-zero. -static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) { +static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth, + const Query &Q) { const BinaryOperator *BO = dyn_cast(V1); if (!BO || BO->getOpcode() != Instruction::Add) return false; @@ -2497,24 +2499,54 @@ static bool isAddOfNonZero(const Value *V1, const Value *V2, const Query &Q) { Op = BO->getOperand(0); else return false; - return isKnownNonZero(Op, 0, Q); + return isKnownNonZero(Op, Depth + 1, Q); } /// Return true if it is known that V1 != V2. -static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q) { +static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, + const Query &Q) { if (V1 == V2) return false; if (V1->getType() != V2->getType()) // We can't look through casts yet. return false; - if (isAddOfNonZero(V1, V2, Q) || isAddOfNonZero(V2, V1, Q)) + + if (Depth >= MaxAnalysisRecursionDepth) + return false; + + // See if we can recurse through (exactly one of) our operands. + auto *O1 = dyn_cast(V1); + auto *O2 = dyn_cast(V2); + if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) { + switch (O1->getOpcode()) { + default: break; + case Instruction::Add: + case Instruction::Sub: + // Assume operand order has been canonicalized + if (O1->getOperand(0) == O2->getOperand(0)) + return isKnownNonEqual(O1->getOperand(1), O2->getOperand(1), + Depth + 1, Q); + if (O1->getOperand(1) == O2->getOperand(1)) + return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0), + Depth + 1, Q); + break; + case Instruction::SExt: + case Instruction::ZExt: + if (O1->getOperand(0)->getType() == O2->getOperand(0)->getType()) + return isKnownNonEqual(O1->getOperand(0), O2->getOperand(0), + Depth + 1, Q); + break; + }; + } + + if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q)) return true; if (V1->getType()->isIntOrIntVectorTy()) { // Are any known bits in V1 contradictory to known bits in V2? If V1 // has a known zero where V2 has a known one, they must not be equal. - KnownBits Known1 = computeKnownBits(V1, 0, Q); - KnownBits Known2 = computeKnownBits(V2, 0, Q); + KnownBits Known1 = computeKnownBits(V1, Depth, Q); + KnownBits Known2 = computeKnownBits(V2, Depth, Q); if (Known1.Zero.intersects(Known2.One) || Known2.Zero.intersects(Known1.One)) diff --git a/llvm/test/Analysis/ValueTracking/known-non-equal.ll b/llvm/test/Analysis/ValueTracking/known-non-equal.ll index ae2251b..664542f 100644 --- a/llvm/test/Analysis/ValueTracking/known-non-equal.ll +++ b/llvm/test/Analysis/ValueTracking/known-non-equal.ll @@ -108,11 +108,7 @@ define i1 @add2(i8 %B, i8 %C) { define i1 @sub1(i8 %B, i8 %C) { ; CHECK-LABEL: @sub1( -; CHECK-NEXT: [[A:%.*]] = add i8 [[B:%.*]], 1 -; CHECK-NEXT: [[A_OP:%.*]] = sub i8 [[A]], [[C:%.*]] -; CHECK-NEXT: [[B_OP:%.*]] = sub i8 [[B]], [[C]] -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A_OP]], [[B_OP]] -; CHECK-NEXT: ret i1 [[CMP]] +; CHECK-NEXT: ret i1 false ; %A = add i8 %B, 1 %A.op = sub i8 %A, %C @@ -124,11 +120,7 @@ define i1 @sub1(i8 %B, i8 %C) { define i1 @sub2(i8 %B, i8 %C) { ; CHECK-LABEL: @sub2( -; CHECK-NEXT: [[A:%.*]] = add i8 [[B:%.*]], 1 -; CHECK-NEXT: [[A_OP:%.*]] = sub i8 [[C:%.*]], [[A]] -; CHECK-NEXT: [[B_OP:%.*]] = sub i8 [[C]], [[B]] -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A_OP]], [[B_OP]] -; CHECK-NEXT: ret i1 [[CMP]] +; CHECK-NEXT: ret i1 false ; %A = add i8 %B, 1 %A.op = sub i8 %C, %A