1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains routines that help analyze properties that chains of
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AssumeBundleQueries.h"
28 #include "llvm/Analysis/AssumptionCache.h"
29 #include "llvm/Analysis/ConstantFolding.h"
30 #include "llvm/Analysis/EHPersonalities.h"
31 #include "llvm/Analysis/GuardUtils.h"
32 #include "llvm/Analysis/InstructionSimplify.h"
33 #include "llvm/Analysis/Loads.h"
34 #include "llvm/Analysis/LoopInfo.h"
35 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
36 #include "llvm/Analysis/TargetLibraryInfo.h"
37 #include "llvm/IR/Argument.h"
38 #include "llvm/IR/Attributes.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/Constant.h"
41 #include "llvm/IR/ConstantRange.h"
42 #include "llvm/IR/Constants.h"
43 #include "llvm/IR/DerivedTypes.h"
44 #include "llvm/IR/DiagnosticInfo.h"
45 #include "llvm/IR/Dominators.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/GetElementPtrTypeIterator.h"
48 #include "llvm/IR/GlobalAlias.h"
49 #include "llvm/IR/GlobalValue.h"
50 #include "llvm/IR/GlobalVariable.h"
51 #include "llvm/IR/InstrTypes.h"
52 #include "llvm/IR/Instruction.h"
53 #include "llvm/IR/Instructions.h"
54 #include "llvm/IR/IntrinsicInst.h"
55 #include "llvm/IR/Intrinsics.h"
56 #include "llvm/IR/IntrinsicsAArch64.h"
57 #include "llvm/IR/IntrinsicsRISCV.h"
58 #include "llvm/IR/IntrinsicsX86.h"
59 #include "llvm/IR/LLVMContext.h"
60 #include "llvm/IR/Metadata.h"
61 #include "llvm/IR/Module.h"
62 #include "llvm/IR/Operator.h"
63 #include "llvm/IR/PatternMatch.h"
64 #include "llvm/IR/Type.h"
65 #include "llvm/IR/User.h"
66 #include "llvm/IR/Value.h"
67 #include "llvm/Support/Casting.h"
68 #include "llvm/Support/CommandLine.h"
69 #include "llvm/Support/Compiler.h"
70 #include "llvm/Support/ErrorHandling.h"
71 #include "llvm/Support/KnownBits.h"
72 #include "llvm/Support/MathExtras.h"
79 using namespace llvm::PatternMatch;
81 // Controls the number of uses of the value searched for possible
82 // dominating comparisons.
83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
84 cl::Hidden, cl::init(20));
86 // According to the LangRef, branching on a poison condition is absolutely
87 // immediate full UB. However, historically we haven't implemented that
88 // consistently as we had an important transformation (non-trivial unswitch)
89 // which introduced instances of branch on poison/undef to otherwise well
90 // defined programs. This issue has since been fixed, but the flag is
91 // temporarily retained to easily diagnose potential regressions.
92 static cl::opt<bool> BranchOnPoisonAsUB("branch-on-poison-as-ub",
93 cl::Hidden, cl::init(true));
96 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
97 /// returns the element type's bitwidth.
98 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
99 if (unsigned BitWidth = Ty->getScalarSizeInBits())
102 return DL.getPointerTypeSizeInBits(Ty);
107 // Simplifying using an assume can only be done in a particular control-flow
108 // context (the context instruction provides that context). If an assume and
109 // the context instruction are not in the same block then the DT helps in
110 // figuring out if we can use it.
112 const DataLayout &DL;
114 const Instruction *CxtI;
115 const DominatorTree *DT;
117 // Unlike the other analyses, this may be a nullptr because not all clients
118 // provide it currently.
119 OptimizationRemarkEmitter *ORE;
121 /// If true, it is safe to use metadata during simplification.
124 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
125 const DominatorTree *DT, bool UseInstrInfo,
126 OptimizationRemarkEmitter *ORE = nullptr)
127 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
130 } // end anonymous namespace
132 // Given the provided Value and, potentially, a context instruction, return
133 // the preferred context instruction (if any).
134 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
135 // If we've been provided with a context instruction, then use that (provided
136 // it has been inserted).
137 if (CxtI && CxtI->getParent())
140 // If the value is really an already-inserted instruction, then use that.
141 CxtI = dyn_cast<Instruction>(V);
142 if (CxtI && CxtI->getParent())
148 static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
149 // If we've been provided with a context instruction, then use that (provided
150 // it has been inserted).
151 if (CxtI && CxtI->getParent())
154 // If the value is really an already-inserted instruction, then use that.
155 CxtI = dyn_cast<Instruction>(V1);
156 if (CxtI && CxtI->getParent())
159 CxtI = dyn_cast<Instruction>(V2);
160 if (CxtI && CxtI->getParent())
166 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
167 const APInt &DemandedElts,
168 APInt &DemandedLHS, APInt &DemandedRHS) {
169 // The length of scalable vectors is unknown at compile time, thus we
170 // cannot check their values
171 if (isa<ScalableVectorType>(Shuf->getType()))
175 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
176 int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements();
177 DemandedLHS = DemandedRHS = APInt::getZero(NumElts);
178 if (DemandedElts.isZero())
180 // Simple case of a shuffle with zeroinitializer.
181 if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) {
182 DemandedLHS.setBit(0);
185 for (int i = 0; i != NumMaskElts; ++i) {
186 if (!DemandedElts[i])
188 int M = Shuf->getMaskValue(i);
189 assert(M < (NumElts * 2) && "Invalid shuffle mask constant");
191 // For undef elements, we don't know anything about the common state of
192 // the shuffle result.
196 DemandedLHS.setBit(M % NumElts);
198 DemandedRHS.setBit(M % NumElts);
204 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
205 KnownBits &Known, unsigned Depth, const Query &Q);
207 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
209 // FIXME: We currently have no way to represent the DemandedElts of a scalable
211 if (isa<ScalableVectorType>(V->getType())) {
216 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
218 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
219 computeKnownBits(V, DemandedElts, Known, Depth, Q);
222 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
223 const DataLayout &DL, unsigned Depth,
224 AssumptionCache *AC, const Instruction *CxtI,
225 const DominatorTree *DT,
226 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
227 ::computeKnownBits(V, Known, Depth,
228 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
231 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
232 KnownBits &Known, const DataLayout &DL,
233 unsigned Depth, AssumptionCache *AC,
234 const Instruction *CxtI, const DominatorTree *DT,
235 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
236 ::computeKnownBits(V, DemandedElts, Known, Depth,
237 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
240 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
241 unsigned Depth, const Query &Q);
243 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
246 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
247 unsigned Depth, AssumptionCache *AC,
248 const Instruction *CxtI,
249 const DominatorTree *DT,
250 OptimizationRemarkEmitter *ORE,
252 return ::computeKnownBits(
253 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
256 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
257 const DataLayout &DL, unsigned Depth,
258 AssumptionCache *AC, const Instruction *CxtI,
259 const DominatorTree *DT,
260 OptimizationRemarkEmitter *ORE,
262 return ::computeKnownBits(
263 V, DemandedElts, Depth,
264 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
267 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
268 const DataLayout &DL, AssumptionCache *AC,
269 const Instruction *CxtI, const DominatorTree *DT,
271 assert(LHS->getType() == RHS->getType() &&
272 "LHS and RHS should have the same type");
273 assert(LHS->getType()->isIntOrIntVectorTy() &&
274 "LHS and RHS should be integers");
275 // Look for an inverted mask: (X & ~M) op (Y & M).
278 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
279 match(RHS, m_c_And(m_Specific(M), m_Value())))
281 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
282 match(LHS, m_c_And(m_Specific(M), m_Value())))
287 if (match(RHS, m_c_And(m_Not(m_Specific(LHS)), m_Value())) ||
288 match(LHS, m_c_And(m_Not(m_Specific(RHS)), m_Value())))
291 // X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern
295 m_c_Xor(m_c_And(m_Specific(LHS), m_Value(Y)), m_Deferred(Y))) ||
296 match(LHS, m_c_Xor(m_c_And(m_Specific(RHS), m_Value(Y)), m_Deferred(Y))))
299 // Look for: (A & B) op ~(A | B)
302 if (match(LHS, m_And(m_Value(A), m_Value(B))) &&
303 match(RHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
305 if (match(RHS, m_And(m_Value(A), m_Value(B))) &&
306 match(LHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
309 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
310 KnownBits LHSKnown(IT->getBitWidth());
311 KnownBits RHSKnown(IT->getBitWidth());
312 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
313 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
314 return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown);
317 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) {
318 return !I->user_empty() && all_of(I->users(), [](const User *U) {
319 ICmpInst::Predicate P;
320 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
324 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
327 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
328 bool OrZero, unsigned Depth,
329 AssumptionCache *AC, const Instruction *CxtI,
330 const DominatorTree *DT, bool UseInstrInfo) {
331 return ::isKnownToBeAPowerOfTwo(
332 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
335 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
336 unsigned Depth, const Query &Q);
338 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
340 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
341 AssumptionCache *AC, const Instruction *CxtI,
342 const DominatorTree *DT, bool UseInstrInfo) {
343 return ::isKnownNonZero(V, Depth,
344 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
347 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
348 unsigned Depth, AssumptionCache *AC,
349 const Instruction *CxtI, const DominatorTree *DT,
352 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
353 return Known.isNonNegative();
356 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
357 AssumptionCache *AC, const Instruction *CxtI,
358 const DominatorTree *DT, bool UseInstrInfo) {
359 if (auto *CI = dyn_cast<ConstantInt>(V))
360 return CI->getValue().isStrictlyPositive();
362 // TODO: We'd doing two recursive queries here. We should factor this such
363 // that only a single query is needed.
364 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
365 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
368 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
369 AssumptionCache *AC, const Instruction *CxtI,
370 const DominatorTree *DT, bool UseInstrInfo) {
372 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
373 return Known.isNegative();
376 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
379 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
380 const DataLayout &DL, AssumptionCache *AC,
381 const Instruction *CxtI, const DominatorTree *DT,
383 return ::isKnownNonEqual(V1, V2, 0,
384 Query(DL, AC, safeCxtI(V2, V1, CxtI), DT,
385 UseInstrInfo, /*ORE=*/nullptr));
388 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
391 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
392 const DataLayout &DL, unsigned Depth,
393 AssumptionCache *AC, const Instruction *CxtI,
394 const DominatorTree *DT, bool UseInstrInfo) {
395 return ::MaskedValueIsZero(
396 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
399 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
400 unsigned Depth, const Query &Q);
402 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
404 // FIXME: We currently have no way to represent the DemandedElts of a scalable
406 if (isa<ScalableVectorType>(V->getType()))
409 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
411 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
412 return ComputeNumSignBits(V, DemandedElts, Depth, Q);
415 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
416 unsigned Depth, AssumptionCache *AC,
417 const Instruction *CxtI,
418 const DominatorTree *DT, bool UseInstrInfo) {
419 return ::ComputeNumSignBits(
420 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
423 unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL,
424 unsigned Depth, AssumptionCache *AC,
425 const Instruction *CxtI,
426 const DominatorTree *DT) {
427 unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT);
428 return V->getType()->getScalarSizeInBits() - SignBits + 1;
431 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
432 bool NSW, const APInt &DemandedElts,
433 KnownBits &KnownOut, KnownBits &Known2,
434 unsigned Depth, const Query &Q) {
435 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
437 // If one operand is unknown and we have no nowrap information,
438 // the result will be unknown independently of the second operand.
439 if (KnownOut.isUnknown() && !NSW)
442 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
443 KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
446 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
447 const APInt &DemandedElts, KnownBits &Known,
448 KnownBits &Known2, unsigned Depth,
450 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
451 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
453 bool isKnownNegative = false;
454 bool isKnownNonNegative = false;
455 // If the multiplication is known not to overflow, compute the sign bit.
458 // The product of a number with itself is non-negative.
459 isKnownNonNegative = true;
461 bool isKnownNonNegativeOp1 = Known.isNonNegative();
462 bool isKnownNonNegativeOp0 = Known2.isNonNegative();
463 bool isKnownNegativeOp1 = Known.isNegative();
464 bool isKnownNegativeOp0 = Known2.isNegative();
465 // The product of two numbers with the same sign is non-negative.
466 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
467 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
468 // The product of a negative number and a non-negative number is either
470 if (!isKnownNonNegative)
472 (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
473 Known2.isNonZero()) ||
474 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
478 bool SelfMultiply = Op0 == Op1;
479 // TODO: SelfMultiply can be poison, but not undef.
482 isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT, Depth + 1);
483 Known = KnownBits::mul(Known, Known2, SelfMultiply);
485 // Only make use of no-wrap flags if we failed to compute the sign bit
486 // directly. This matters if the multiplication always overflows, in
487 // which case we prefer to follow the result of the direct computation,
488 // though as the program is invoking undefined behaviour we can choose
489 // whatever we like here.
490 if (isKnownNonNegative && !Known.isNegative())
491 Known.makeNonNegative();
492 else if (isKnownNegative && !Known.isNonNegative())
493 Known.makeNegative();
496 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
498 unsigned BitWidth = Known.getBitWidth();
499 unsigned NumRanges = Ranges.getNumOperands() / 2;
500 assert(NumRanges >= 1);
502 Known.Zero.setAllBits();
503 Known.One.setAllBits();
505 for (unsigned i = 0; i < NumRanges; ++i) {
507 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
509 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
510 ConstantRange Range(Lower->getValue(), Upper->getValue());
512 // The first CommonPrefixBits of all values in Range are equal.
513 unsigned CommonPrefixBits =
514 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
515 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
516 APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
517 Known.One &= UnsignedMax & Mask;
518 Known.Zero &= ~UnsignedMax & Mask;
522 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
523 SmallVector<const Value *, 16> WorkSet(1, I);
524 SmallPtrSet<const Value *, 32> Visited;
525 SmallPtrSet<const Value *, 16> EphValues;
527 // The instruction defining an assumption's condition itself is always
528 // considered ephemeral to that assumption (even if it has other
529 // non-ephemeral users). See r246696's test case for an example.
530 if (is_contained(I->operands(), E))
533 while (!WorkSet.empty()) {
534 const Value *V = WorkSet.pop_back_val();
535 if (!Visited.insert(V).second)
538 // If all uses of this value are ephemeral, then so is this value.
539 if (llvm::all_of(V->users(), [&](const User *U) {
540 return EphValues.count(U);
545 if (V == I || (isa<Instruction>(V) &&
546 !cast<Instruction>(V)->mayHaveSideEffects() &&
547 !cast<Instruction>(V)->isTerminator())) {
549 if (const User *U = dyn_cast<User>(V))
550 append_range(WorkSet, U->operands());
558 // Is this an intrinsic that cannot be speculated but also cannot trap?
559 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
560 if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
561 return CI->isAssumeLikeIntrinsic();
566 bool llvm::isValidAssumeForContext(const Instruction *Inv,
567 const Instruction *CxtI,
568 const DominatorTree *DT) {
569 // There are two restrictions on the use of an assume:
570 // 1. The assume must dominate the context (or the control flow must
571 // reach the assume whenever it reaches the context).
572 // 2. The context must not be in the assume's set of ephemeral values
573 // (otherwise we will use the assume to prove that the condition
574 // feeding the assume is trivially true, thus causing the removal of
577 if (Inv->getParent() == CxtI->getParent()) {
578 // If Inv and CtxI are in the same block, check if the assume (Inv) is first
580 if (Inv->comesBefore(CxtI))
583 // Don't let an assume affect itself - this would cause the problems
584 // `isEphemeralValueOf` is trying to prevent, and it would also make
585 // the loop below go out of bounds.
589 // The context comes first, but they're both in the same block.
590 // Make sure there is nothing in between that might interrupt
591 // the control flow, not even CxtI itself.
592 // We limit the scan distance between the assume and its context instruction
593 // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
594 // it can be adjusted if needed (could be turned into a cl::opt).
595 auto Range = make_range(CxtI->getIterator(), Inv->getIterator());
596 if (!isGuaranteedToTransferExecutionToSuccessor(Range, 15))
599 return !isEphemeralValueOf(Inv, CxtI);
602 // Inv and CxtI are in different blocks.
604 if (DT->dominates(Inv, CxtI))
606 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
607 // We don't have a DT, but this trivially dominates.
614 static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
615 // v u> y implies v != 0.
616 if (Pred == ICmpInst::ICMP_UGT)
619 // Special-case v != 0 to also handle v != null.
620 if (Pred == ICmpInst::ICMP_NE)
621 return match(RHS, m_Zero());
623 // All other predicates - rely on generic ConstantRange handling.
625 if (!match(RHS, m_APInt(C)))
628 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C);
629 return !TrueValues.contains(APInt::getZero(C->getBitWidth()));
632 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
633 // Use of assumptions is context-sensitive. If we don't have a context, we
635 if (!Q.AC || !Q.CxtI)
638 if (Q.CxtI && V->getType()->isPointerTy()) {
639 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
640 if (!NullPointerIsDefined(Q.CxtI->getFunction(),
641 V->getType()->getPointerAddressSpace()))
642 AttrKinds.push_back(Attribute::Dereferenceable);
644 if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
648 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
651 CallInst *I = cast<CallInst>(AssumeVH);
652 assert(I->getFunction() == Q.CxtI->getFunction() &&
653 "Got assumption for the wrong function!");
655 // Warning: This loop can end up being somewhat performance sensitive.
656 // We're running this loop for once for each value queried resulting in a
657 // runtime of ~O(#assumes * #values).
659 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
660 "must be an assume intrinsic");
663 CmpInst::Predicate Pred;
664 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
665 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
668 if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
675 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
676 unsigned Depth, const Query &Q) {
677 // Use of assumptions is context-sensitive. If we don't have a context, we
679 if (!Q.AC || !Q.CxtI)
682 unsigned BitWidth = Known.getBitWidth();
684 // Refine Known set if the pointer alignment is set by assume bundles.
685 if (V->getType()->isPointerTy()) {
686 if (RetainedKnowledge RK = getKnowledgeValidInContext(
687 V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) {
688 if (isPowerOf2_64(RK.ArgValue))
689 Known.Zero.setLowBits(Log2_64(RK.ArgValue));
693 // Note that the patterns below need to be kept in sync with the code
694 // in AssumptionCache::updateAffectedValues.
696 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
699 CallInst *I = cast<CallInst>(AssumeVH);
700 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
701 "Got assumption for the wrong function!");
703 // Warning: This loop can end up being somewhat performance sensitive.
704 // We're running this loop for once for each value queried resulting in a
705 // runtime of ~O(#assumes * #values).
707 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
708 "must be an assume intrinsic");
710 Value *Arg = I->getArgOperand(0);
712 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
713 assert(BitWidth == 1 && "assume operand is not i1?");
717 if (match(Arg, m_Not(m_Specific(V))) &&
718 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
719 assert(BitWidth == 1 && "assume operand is not i1?");
724 // The remaining tests are all recursive, so bail out if we hit the limit.
725 if (Depth == MaxAnalysisRecursionDepth)
728 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
732 // We are attempting to compute known bits for the operands of an assume.
733 // Do not try to use other assumptions for those recursive calls because
734 // that can lead to mutual recursion and a compile-time explosion.
735 // An example of the mutual recursion: computeKnownBits can call
736 // isKnownNonZero which calls computeKnownBitsFromAssume (this function)
739 QueryNoAC.AC = nullptr;
741 // Note that ptrtoint may change the bitwidth.
743 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
745 CmpInst::Predicate Pred;
747 switch (Cmp->getPredicate()) {
750 case ICmpInst::ICMP_EQ:
752 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
753 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
755 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
756 Known.Zero |= RHSKnown.Zero;
757 Known.One |= RHSKnown.One;
759 } else if (match(Cmp,
760 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
761 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
763 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
764 KnownBits MaskKnown =
765 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
767 // For those bits in the mask that are known to be one, we can propagate
768 // known bits from the RHS to V.
769 Known.Zero |= RHSKnown.Zero & MaskKnown.One;
770 Known.One |= RHSKnown.One & MaskKnown.One;
771 // assume(~(v & b) = a)
772 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
774 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
776 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
777 KnownBits MaskKnown =
778 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
780 // For those bits in the mask that are known to be one, we can propagate
781 // inverted known bits from the RHS to V.
782 Known.Zero |= RHSKnown.One & MaskKnown.One;
783 Known.One |= RHSKnown.Zero & MaskKnown.One;
785 } else if (match(Cmp,
786 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
787 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
789 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
791 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
793 // For those bits in B that are known to be zero, we can propagate known
794 // bits from the RHS to V.
795 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
796 Known.One |= RHSKnown.One & BKnown.Zero;
797 // assume(~(v | b) = a)
798 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
800 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
802 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
804 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
806 // For those bits in B that are known to be zero, we can propagate
807 // inverted known bits from the RHS to V.
808 Known.Zero |= RHSKnown.One & BKnown.Zero;
809 Known.One |= RHSKnown.Zero & BKnown.Zero;
811 } else if (match(Cmp,
812 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
813 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
815 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
817 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
819 // For those bits in B that are known to be zero, we can propagate known
820 // bits from the RHS to V. For those bits in B that are known to be one,
821 // we can propagate inverted known bits from the RHS to V.
822 Known.Zero |= RHSKnown.Zero & BKnown.Zero;
823 Known.One |= RHSKnown.One & BKnown.Zero;
824 Known.Zero |= RHSKnown.One & BKnown.One;
825 Known.One |= RHSKnown.Zero & BKnown.One;
826 // assume(~(v ^ b) = a)
827 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
829 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
831 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
833 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
835 // For those bits in B that are known to be zero, we can propagate
836 // inverted known bits from the RHS to V. For those bits in B that are
837 // known to be one, we can propagate known bits from the RHS to V.
838 Known.Zero |= RHSKnown.One & BKnown.Zero;
839 Known.One |= RHSKnown.Zero & BKnown.Zero;
840 Known.Zero |= RHSKnown.Zero & BKnown.One;
841 Known.One |= RHSKnown.One & BKnown.One;
842 // assume(v << c = a)
843 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
845 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
847 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
849 // For those bits in RHS that are known, we can propagate them to known
850 // bits in V shifted to the right by C.
851 RHSKnown.Zero.lshrInPlace(C);
852 Known.Zero |= RHSKnown.Zero;
853 RHSKnown.One.lshrInPlace(C);
854 Known.One |= RHSKnown.One;
855 // assume(~(v << c) = a)
856 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
858 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
860 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
861 // For those bits in RHS that are known, we can propagate them inverted
862 // to known bits in V shifted to the right by C.
863 RHSKnown.One.lshrInPlace(C);
864 Known.Zero |= RHSKnown.One;
865 RHSKnown.Zero.lshrInPlace(C);
866 Known.One |= RHSKnown.Zero;
867 // assume(v >> c = a)
868 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
870 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
872 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
873 // For those bits in RHS that are known, we can propagate them to known
874 // bits in V shifted to the right by C.
875 Known.Zero |= RHSKnown.Zero << C;
876 Known.One |= RHSKnown.One << C;
877 // assume(~(v >> c) = a)
878 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
880 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
882 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
883 // For those bits in RHS that are known, we can propagate them inverted
884 // to known bits in V shifted to the right by C.
885 Known.Zero |= RHSKnown.One << C;
886 Known.One |= RHSKnown.Zero << C;
889 case ICmpInst::ICMP_SGE:
890 // assume(v >=_s c) where c is non-negative
891 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
892 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
894 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
896 if (RHSKnown.isNonNegative()) {
897 // We know that the sign bit is zero.
898 Known.makeNonNegative();
902 case ICmpInst::ICMP_SGT:
903 // assume(v >_s c) where c is at least -1.
904 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
905 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
907 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
909 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
910 // We know that the sign bit is zero.
911 Known.makeNonNegative();
915 case ICmpInst::ICMP_SLE:
916 // assume(v <=_s c) where c is negative
917 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
918 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
920 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
922 if (RHSKnown.isNegative()) {
923 // We know that the sign bit is one.
924 Known.makeNegative();
928 case ICmpInst::ICMP_SLT:
929 // assume(v <_s c) where c is non-positive
930 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
931 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
933 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
935 if (RHSKnown.isZero() || RHSKnown.isNegative()) {
936 // We know that the sign bit is one.
937 Known.makeNegative();
941 case ICmpInst::ICMP_ULE:
943 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
944 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
946 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
948 // Whatever high bits in c are zero are known to be zero.
949 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
952 case ICmpInst::ICMP_ULT:
954 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
955 isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
957 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
959 // If the RHS is known zero, then this assumption must be wrong (nothing
960 // is unsigned less than zero). Signal a conflict and get out of here.
961 if (RHSKnown.isZero()) {
962 Known.Zero.setAllBits();
963 Known.One.setAllBits();
967 // Whatever high bits in c are zero are known to be zero (if c is a power
968 // of 2, then one more).
969 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, QueryNoAC))
970 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
972 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
978 // If assumptions conflict with each other or previous known bits, then we
979 // have a logical fallacy. It's possible that the assumption is not reachable,
980 // so this isn't a real bug. On the other hand, the program may have undefined
981 // behavior, or we might have a bug in the compiler. We can't assert/crash, so
982 // clear out the known bits, try to warn the user, and hope for the best.
983 if (Known.Zero.intersects(Known.One)) {
988 auto *CxtI = const_cast<Instruction *>(Q.CxtI);
989 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
991 << "Detected conflicting code assumptions. Program may "
992 "have undefined behavior, or compiler may have "
998 /// Compute known bits from a shift operator, including those with a
999 /// non-constant shift amount. Known is the output of this function. Known2 is a
1000 /// pre-allocated temporary with the same bit width as Known and on return
1001 /// contains the known bit of the shift value source. KF is an
1002 /// operator-specific function that, given the known-bits and a shift amount,
1003 /// compute the implied known-bits of the shift operator's result respectively
1004 /// for that shift amount. The results from calling KF are conservatively
1005 /// combined for all permitted shift amounts.
1006 static void computeKnownBitsFromShiftOperator(
1007 const Operator *I, const APInt &DemandedElts, KnownBits &Known,
1008 KnownBits &Known2, unsigned Depth, const Query &Q,
1009 function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
1010 unsigned BitWidth = Known.getBitWidth();
1011 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1012 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1014 // Note: We cannot use Known.Zero.getLimitedValue() here, because if
1015 // BitWidth > 64 and any upper bits are known, we'll end up returning the
1016 // limit value (which implies all bits are known).
1017 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
1018 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
1019 bool ShiftAmtIsConstant = Known.isConstant();
1020 bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth);
1022 if (ShiftAmtIsConstant) {
1023 Known = KF(Known2, Known);
1025 // If the known bits conflict, this must be an overflowing left shift, so
1026 // the shift result is poison. We can return anything we want. Choose 0 for
1027 // the best folding opportunity.
1028 if (Known.hasConflict())
1034 // If the shift amount could be greater than or equal to the bit-width of the
1035 // LHS, the value could be poison, but bail out because the check below is
1037 // TODO: Should we just carry on?
1038 if (MaxShiftAmtIsOutOfRange) {
1043 // It would be more-clearly correct to use the two temporaries for this
1044 // calculation. Reusing the APInts here to prevent unnecessary allocations.
1047 // If we know the shifter operand is nonzero, we can sometimes infer more
1048 // known bits. However this is expensive to compute, so be lazy about it and
1049 // only compute it when absolutely necessary.
1050 Optional<bool> ShifterOperandIsNonZero;
1052 // Early exit if we can't constrain any well-defined shift amount.
1053 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1054 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1055 ShifterOperandIsNonZero =
1056 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1057 if (!*ShifterOperandIsNonZero)
1061 Known.Zero.setAllBits();
1062 Known.One.setAllBits();
1063 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1064 // Combine the shifted known input bits only for those shift amounts
1065 // compatible with its known constraints.
1066 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1068 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1070 // If we know the shifter is nonzero, we may be able to infer more known
1071 // bits. This check is sunk down as far as possible to avoid the expensive
1072 // call to isKnownNonZero if the cheaper checks above fail.
1073 if (ShiftAmt == 0) {
1074 if (!ShifterOperandIsNonZero)
1075 ShifterOperandIsNonZero =
1076 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1077 if (*ShifterOperandIsNonZero)
1081 Known = KnownBits::commonBits(
1082 Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1085 // If the known bits conflict, the result is poison. Return a 0 and hope the
1086 // caller can further optimize that.
1087 if (Known.hasConflict())
1091 static void computeKnownBitsFromOperator(const Operator *I,
1092 const APInt &DemandedElts,
1093 KnownBits &Known, unsigned Depth,
1095 unsigned BitWidth = Known.getBitWidth();
1097 KnownBits Known2(BitWidth);
1098 switch (I->getOpcode()) {
1100 case Instruction::Load:
1102 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1103 computeKnownBitsFromRangeMetadata(*MD, Known);
1105 case Instruction::And: {
1106 // If either the LHS or the RHS are Zero, the result is zero.
1107 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1108 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1112 // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1113 // here we handle the more general case of adding any odd number by
1114 // matching the form add(x, add(x, y)) where y is odd.
1115 // TODO: This could be generalized to clearing any bit set in y where the
1116 // following bit is known to be unset in y.
1117 Value *X = nullptr, *Y = nullptr;
1118 if (!Known.Zero[0] && !Known.One[0] &&
1119 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1121 computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1122 if (Known2.countMinTrailingOnes() > 0)
1123 Known.Zero.setBit(0);
1127 case Instruction::Or:
1128 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1129 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1133 case Instruction::Xor:
1134 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1135 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1139 case Instruction::Mul: {
1140 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1141 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1142 Known, Known2, Depth, Q);
1145 case Instruction::UDiv: {
1146 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1147 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1148 Known = KnownBits::udiv(Known, Known2);
1151 case Instruction::Select: {
1152 const Value *LHS = nullptr, *RHS = nullptr;
1153 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1154 if (SelectPatternResult::isMinOrMax(SPF)) {
1155 computeKnownBits(RHS, Known, Depth + 1, Q);
1156 computeKnownBits(LHS, Known2, Depth + 1, Q);
1159 llvm_unreachable("Unhandled select pattern flavor!");
1161 Known = KnownBits::smax(Known, Known2);
1164 Known = KnownBits::smin(Known, Known2);
1167 Known = KnownBits::umax(Known, Known2);
1170 Known = KnownBits::umin(Known, Known2);
1176 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1177 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1179 // Only known if known in both the LHS and RHS.
1180 Known = KnownBits::commonBits(Known, Known2);
1182 if (SPF == SPF_ABS) {
1183 // RHS from matchSelectPattern returns the negation part of abs pattern.
1184 // If the negate has an NSW flag we can assume the sign bit of the result
1185 // will be 0 because that makes abs(INT_MIN) undefined.
1186 if (match(RHS, m_Neg(m_Specific(LHS))) &&
1187 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RHS)))
1188 Known.Zero.setSignBit();
1193 case Instruction::FPTrunc:
1194 case Instruction::FPExt:
1195 case Instruction::FPToUI:
1196 case Instruction::FPToSI:
1197 case Instruction::SIToFP:
1198 case Instruction::UIToFP:
1199 break; // Can't work with floating point.
1200 case Instruction::PtrToInt:
1201 case Instruction::IntToPtr:
1202 // Fall through and handle them the same as zext/trunc.
1204 case Instruction::ZExt:
1205 case Instruction::Trunc: {
1206 Type *SrcTy = I->getOperand(0)->getType();
1208 unsigned SrcBitWidth;
1209 // Note that we handle pointer operands here because of inttoptr/ptrtoint
1210 // which fall through here.
1211 Type *ScalarTy = SrcTy->getScalarType();
1212 SrcBitWidth = ScalarTy->isPointerTy() ?
1213 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1214 Q.DL.getTypeSizeInBits(ScalarTy);
1216 assert(SrcBitWidth && "SrcBitWidth can't be zero");
1217 Known = Known.anyextOrTrunc(SrcBitWidth);
1218 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1219 Known = Known.zextOrTrunc(BitWidth);
1222 case Instruction::BitCast: {
1223 Type *SrcTy = I->getOperand(0)->getType();
1224 if (SrcTy->isIntOrPtrTy() &&
1225 // TODO: For now, not handling conversions like:
1226 // (bitcast i64 %x to <2 x i32>)
1227 !I->getType()->isVectorTy()) {
1228 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1232 // Handle cast from vector integer type to scalar or vector integer.
1233 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy);
1234 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1235 !I->getType()->isIntOrIntVectorTy())
1238 // Look through a cast from narrow vector elements to wider type.
1239 // Examples: v4i32 -> v2i64, v3i8 -> v24
1240 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1241 if (BitWidth % SubBitWidth == 0) {
1242 // Known bits are automatically intersected across demanded elements of a
1243 // vector. So for example, if a bit is computed as known zero, it must be
1244 // zero across all demanded elements of the vector.
1246 // For this bitcast, each demanded element of the output is sub-divided
1247 // across a set of smaller vector elements in the source vector. To get
1248 // the known bits for an entire element of the output, compute the known
1249 // bits for each sub-element sequentially. This is done by shifting the
1250 // one-set-bit demanded elements parameter across the sub-elements for
1251 // consecutive calls to computeKnownBits. We are using the demanded
1252 // elements parameter as a mask operator.
1254 // The known bits of each sub-element are then inserted into place
1255 // (dependent on endian) to form the full result of known bits.
1256 unsigned NumElts = DemandedElts.getBitWidth();
1257 unsigned SubScale = BitWidth / SubBitWidth;
1258 APInt SubDemandedElts = APInt::getZero(NumElts * SubScale);
1259 for (unsigned i = 0; i != NumElts; ++i) {
1260 if (DemandedElts[i])
1261 SubDemandedElts.setBit(i * SubScale);
1264 KnownBits KnownSrc(SubBitWidth);
1265 for (unsigned i = 0; i != SubScale; ++i) {
1266 computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc,
1268 unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i;
1269 Known.insertBits(KnownSrc, ShiftElt * SubBitWidth);
1274 case Instruction::SExt: {
1275 // Compute the bits in the result that are not present in the input.
1276 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1278 Known = Known.trunc(SrcBitWidth);
1279 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1280 // If the sign bit of the input is known set or clear, then we know the
1281 // top bits of the result.
1282 Known = Known.sext(BitWidth);
1285 case Instruction::Shl: {
1286 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1287 auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1288 KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1289 // If this shift has "nsw" keyword, then the result is either a poison
1290 // value or has the same sign bit as the first operand.
1292 if (KnownVal.Zero.isSignBitSet())
1293 Result.Zero.setSignBit();
1294 if (KnownVal.One.isSignBitSet())
1295 Result.One.setSignBit();
1299 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1301 // Trailing zeros of a right-shifted constant never decrease.
1303 if (match(I->getOperand(0), m_APInt(C)))
1304 Known.Zero.setLowBits(C->countTrailingZeros());
1307 case Instruction::LShr: {
1308 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1309 return KnownBits::lshr(KnownVal, KnownAmt);
1311 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1313 // Leading zeros of a left-shifted constant never decrease.
1315 if (match(I->getOperand(0), m_APInt(C)))
1316 Known.Zero.setHighBits(C->countLeadingZeros());
1319 case Instruction::AShr: {
1320 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1321 return KnownBits::ashr(KnownVal, KnownAmt);
1323 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1327 case Instruction::Sub: {
1328 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1329 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1330 DemandedElts, Known, Known2, Depth, Q);
1333 case Instruction::Add: {
1334 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1335 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1336 DemandedElts, Known, Known2, Depth, Q);
1339 case Instruction::SRem:
1340 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1341 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1342 Known = KnownBits::srem(Known, Known2);
1345 case Instruction::URem:
1346 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1347 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1348 Known = KnownBits::urem(Known, Known2);
1350 case Instruction::Alloca:
1351 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1353 case Instruction::GetElementPtr: {
1354 // Analyze all of the subscripts of this getelementptr instruction
1355 // to determine if we can prove known low zero bits.
1356 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1357 // Accumulate the constant indices in a separate variable
1358 // to minimize the number of calls to computeForAddSub.
1359 APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1361 gep_type_iterator GTI = gep_type_begin(I);
1362 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1363 // TrailZ can only become smaller, short-circuit if we hit zero.
1364 if (Known.isUnknown())
1367 Value *Index = I->getOperand(i);
1369 // Handle case when index is zero.
1370 Constant *CIndex = dyn_cast<Constant>(Index);
1371 if (CIndex && CIndex->isZeroValue())
1374 if (StructType *STy = GTI.getStructTypeOrNull()) {
1375 // Handle struct member offset arithmetic.
1378 "Access to structure field must be known at compile time");
1380 if (CIndex->getType()->isVectorTy())
1381 Index = CIndex->getSplatValue();
1383 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1384 const StructLayout *SL = Q.DL.getStructLayout(STy);
1385 uint64_t Offset = SL->getElementOffset(Idx);
1386 AccConstIndices += Offset;
1390 // Handle array index arithmetic.
1391 Type *IndexedTy = GTI.getIndexedType();
1392 if (!IndexedTy->isSized()) {
1397 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1398 KnownBits IndexBits(IndexBitWidth);
1399 computeKnownBits(Index, IndexBits, Depth + 1, Q);
1400 TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1401 uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize();
1402 KnownBits ScalingFactor(IndexBitWidth);
1403 // Multiply by current sizeof type.
1404 // &A[i] == A + i * sizeof(*A[i]).
1405 if (IndexTypeSize.isScalable()) {
1406 // For scalable types the only thing we know about sizeof is
1407 // that this is a multiple of the minimum size.
1408 ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes));
1409 } else if (IndexBits.isConstant()) {
1410 APInt IndexConst = IndexBits.getConstant();
1411 APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1412 IndexConst *= ScalingFactor;
1413 AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1417 KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1419 IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
1421 // If the offsets have a different width from the pointer, according
1422 // to the language reference we need to sign-extend or truncate them
1423 // to the width of the pointer.
1424 IndexBits = IndexBits.sextOrTrunc(BitWidth);
1426 // Note that inbounds does *not* guarantee nsw for the addition, as only
1427 // the offset is signed, while the base address is unsigned.
1428 Known = KnownBits::computeForAddSub(
1429 /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1431 if (!Known.isUnknown() && !AccConstIndices.isZero()) {
1432 KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1433 Known = KnownBits::computeForAddSub(
1434 /*Add=*/true, /*NSW=*/false, Known, Index);
1438 case Instruction::PHI: {
1439 const PHINode *P = cast<PHINode>(I);
1440 BinaryOperator *BO = nullptr;
1441 Value *R = nullptr, *L = nullptr;
1442 if (matchSimpleRecurrence(P, BO, R, L)) {
1443 // Handle the case of a simple two-predecessor recurrence PHI.
1444 // There's a lot more that could theoretically be done here, but
1445 // this is sufficient to catch some interesting cases.
1446 unsigned Opcode = BO->getOpcode();
1448 // If this is a shift recurrence, we know the bits being shifted in.
1449 // We can combine that with information about the start value of the
1450 // recurrence to conclude facts about the result.
1451 if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
1452 Opcode == Instruction::Shl) &&
1453 BO->getOperand(0) == I) {
1455 // We have matched a recurrence of the form:
1456 // %iv = [R, %entry], [%iv.next, %backedge]
1457 // %iv.next = shift_op %iv, L
1459 // Recurse with the phi context to avoid concern about whether facts
1460 // inferred hold at original context instruction. TODO: It may be
1461 // correct to use the original context. IF warranted, explore and
1462 // add sufficient tests to cover.
1465 computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1467 case Instruction::Shl:
1468 // A shl recurrence will only increase the tailing zeros
1469 Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1471 case Instruction::LShr:
1472 // A lshr recurrence will preserve the leading zeros of the
1474 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1476 case Instruction::AShr:
1477 // An ashr recurrence will extend the initial sign bit
1478 Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1479 Known.One.setHighBits(Known2.countMinLeadingOnes());
1484 // Check for operations that have the property that if
1485 // both their operands have low zero bits, the result
1486 // will have low zero bits.
1487 if (Opcode == Instruction::Add ||
1488 Opcode == Instruction::Sub ||
1489 Opcode == Instruction::And ||
1490 Opcode == Instruction::Or ||
1491 Opcode == Instruction::Mul) {
1492 // Change the context instruction to the "edge" that flows into the
1493 // phi. This is important because that is where the value is actually
1494 // "evaluated" even though it is used later somewhere else. (see also
1498 unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
1499 Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1500 Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator();
1502 // Ok, we have a PHI of the form L op= R. Check for low
1505 computeKnownBits(R, Known2, Depth + 1, RecQ);
1507 // We need to take the minimum number of known bits
1508 KnownBits Known3(BitWidth);
1510 computeKnownBits(L, Known3, Depth + 1, RecQ);
1512 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1513 Known3.countMinTrailingZeros()));
1515 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1516 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1517 // If initial value of recurrence is nonnegative, and we are adding
1518 // a nonnegative number with nsw, the result can only be nonnegative
1519 // or poison value regardless of the number of times we execute the
1520 // add in phi recurrence. If initial value is negative and we are
1521 // adding a negative number with nsw, the result can only be
1522 // negative or poison value. Similar arguments apply to sub and mul.
1524 // (add non-negative, non-negative) --> non-negative
1525 // (add negative, negative) --> negative
1526 if (Opcode == Instruction::Add) {
1527 if (Known2.isNonNegative() && Known3.isNonNegative())
1528 Known.makeNonNegative();
1529 else if (Known2.isNegative() && Known3.isNegative())
1530 Known.makeNegative();
1533 // (sub nsw non-negative, negative) --> non-negative
1534 // (sub nsw negative, non-negative) --> negative
1535 else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
1536 if (Known2.isNonNegative() && Known3.isNegative())
1537 Known.makeNonNegative();
1538 else if (Known2.isNegative() && Known3.isNonNegative())
1539 Known.makeNegative();
1542 // (mul nsw non-negative, non-negative) --> non-negative
1543 else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1544 Known3.isNonNegative())
1545 Known.makeNonNegative();
1552 // Unreachable blocks may have zero-operand PHI nodes.
1553 if (P->getNumIncomingValues() == 0)
1556 // Otherwise take the unions of the known bit sets of the operands,
1557 // taking conservative care to avoid excessive recursion.
1558 if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1559 // Skip if every incoming value references to ourself.
1560 if (isa_and_nonnull<UndefValue>(P->hasConstantValue()))
1563 Known.Zero.setAllBits();
1564 Known.One.setAllBits();
1565 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1566 Value *IncValue = P->getIncomingValue(u);
1567 // Skip direct self references.
1568 if (IncValue == P) continue;
1570 // Change the context instruction to the "edge" that flows into the
1571 // phi. This is important because that is where the value is actually
1572 // "evaluated" even though it is used later somewhere else. (see also
1575 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1577 Known2 = KnownBits(BitWidth);
1578 // Recurse, but cap the recursion to one level, because we don't
1579 // want to waste time spinning around in loops.
1580 computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1581 Known = KnownBits::commonBits(Known, Known2);
1582 // If all bits have been ruled out, there's no need to check
1584 if (Known.isUnknown())
1590 case Instruction::Call:
1591 case Instruction::Invoke:
1592 // If range metadata is attached to this call, set known bits from that,
1593 // and then intersect with known bits based on other properties of the
1596 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1597 computeKnownBitsFromRangeMetadata(*MD, Known);
1598 if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1599 computeKnownBits(RV, Known2, Depth + 1, Q);
1600 Known.Zero |= Known2.Zero;
1601 Known.One |= Known2.One;
1603 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1604 switch (II->getIntrinsicID()) {
1606 case Intrinsic::abs: {
1607 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1608 bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1609 Known = Known2.abs(IntMinIsPoison);
1612 case Intrinsic::bitreverse:
1613 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1614 Known.Zero |= Known2.Zero.reverseBits();
1615 Known.One |= Known2.One.reverseBits();
1617 case Intrinsic::bswap:
1618 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1619 Known.Zero |= Known2.Zero.byteSwap();
1620 Known.One |= Known2.One.byteSwap();
1622 case Intrinsic::ctlz: {
1623 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1624 // If we have a known 1, its position is our upper bound.
1625 unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1626 // If this call is poison for 0 input, the result will be less than 2^n.
1627 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1628 PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1629 unsigned LowBits = Log2_32(PossibleLZ)+1;
1630 Known.Zero.setBitsFrom(LowBits);
1633 case Intrinsic::cttz: {
1634 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1635 // If we have a known 1, its position is our upper bound.
1636 unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1637 // If this call is poison for 0 input, the result will be less than 2^n.
1638 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1639 PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1640 unsigned LowBits = Log2_32(PossibleTZ)+1;
1641 Known.Zero.setBitsFrom(LowBits);
1644 case Intrinsic::ctpop: {
1645 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1646 // We can bound the space the count needs. Also, bits known to be zero
1647 // can't contribute to the population.
1648 unsigned BitsPossiblySet = Known2.countMaxPopulation();
1649 unsigned LowBits = Log2_32(BitsPossiblySet)+1;
1650 Known.Zero.setBitsFrom(LowBits);
1651 // TODO: we could bound KnownOne using the lower bound on the number
1652 // of bits which might be set provided by popcnt KnownOne2.
1655 case Intrinsic::fshr:
1656 case Intrinsic::fshl: {
1658 if (!match(I->getOperand(2), m_APInt(SA)))
1661 // Normalize to funnel shift left.
1662 uint64_t ShiftAmt = SA->urem(BitWidth);
1663 if (II->getIntrinsicID() == Intrinsic::fshr)
1664 ShiftAmt = BitWidth - ShiftAmt;
1666 KnownBits Known3(BitWidth);
1667 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1668 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1671 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1673 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1676 case Intrinsic::uadd_sat:
1677 case Intrinsic::usub_sat: {
1678 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1679 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1680 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1682 // Add: Leading ones of either operand are preserved.
1683 // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1684 // as leading zeros in the result.
1685 unsigned LeadingKnown;
1687 LeadingKnown = std::max(Known.countMinLeadingOnes(),
1688 Known2.countMinLeadingOnes());
1690 LeadingKnown = std::max(Known.countMinLeadingZeros(),
1691 Known2.countMinLeadingOnes());
1693 Known = KnownBits::computeForAddSub(
1694 IsAdd, /* NSW */ false, Known, Known2);
1696 // We select between the operation result and all-ones/zero
1697 // respectively, so we can preserve known ones/zeros.
1699 Known.One.setHighBits(LeadingKnown);
1700 Known.Zero.clearAllBits();
1702 Known.Zero.setHighBits(LeadingKnown);
1703 Known.One.clearAllBits();
1707 case Intrinsic::umin:
1708 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1709 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1710 Known = KnownBits::umin(Known, Known2);
1712 case Intrinsic::umax:
1713 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1714 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1715 Known = KnownBits::umax(Known, Known2);
1717 case Intrinsic::smin:
1718 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1719 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1720 Known = KnownBits::smin(Known, Known2);
1722 case Intrinsic::smax:
1723 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1724 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1725 Known = KnownBits::smax(Known, Known2);
1727 case Intrinsic::x86_sse42_crc32_64_64:
1728 Known.Zero.setBitsFrom(32);
1730 case Intrinsic::riscv_vsetvli:
1731 case Intrinsic::riscv_vsetvlimax:
1732 // Assume that VL output is positive and would fit in an int32_t.
1733 // TODO: VLEN might be capped at 16 bits in a future V spec update.
1735 Known.Zero.setBitsFrom(31);
1737 case Intrinsic::vscale: {
1738 if (!II->getParent() || !II->getFunction() ||
1739 !II->getFunction()->hasFnAttribute(Attribute::VScaleRange))
1742 auto Attr = II->getFunction()->getFnAttribute(Attribute::VScaleRange);
1743 Optional<unsigned> VScaleMax = Attr.getVScaleRangeMax();
1748 unsigned VScaleMin = Attr.getVScaleRangeMin();
1750 // If vscale min = max then we know the exact value at compile time
1751 // and hence we know the exact bits.
1752 if (VScaleMin == VScaleMax) {
1753 Known.One = VScaleMin;
1754 Known.Zero = VScaleMin;
1755 Known.Zero.flipAllBits();
1759 unsigned FirstZeroHighBit = 32 - countLeadingZeros(*VScaleMax);
1760 if (FirstZeroHighBit < BitWidth)
1761 Known.Zero.setBitsFrom(FirstZeroHighBit);
1768 case Instruction::ShuffleVector: {
1769 auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1770 // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1775 // For undef elements, we don't know anything about the common state of
1776 // the shuffle result.
1777 APInt DemandedLHS, DemandedRHS;
1778 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1782 Known.One.setAllBits();
1783 Known.Zero.setAllBits();
1784 if (!!DemandedLHS) {
1785 const Value *LHS = Shuf->getOperand(0);
1786 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1787 // If we don't know any bits, early out.
1788 if (Known.isUnknown())
1791 if (!!DemandedRHS) {
1792 const Value *RHS = Shuf->getOperand(1);
1793 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1794 Known = KnownBits::commonBits(Known, Known2);
1798 case Instruction::InsertElement: {
1799 const Value *Vec = I->getOperand(0);
1800 const Value *Elt = I->getOperand(1);
1801 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1802 // Early out if the index is non-constant or out-of-range.
1803 unsigned NumElts = DemandedElts.getBitWidth();
1804 if (!CIdx || CIdx->getValue().uge(NumElts)) {
1808 Known.One.setAllBits();
1809 Known.Zero.setAllBits();
1810 unsigned EltIdx = CIdx->getZExtValue();
1811 // Do we demand the inserted element?
1812 if (DemandedElts[EltIdx]) {
1813 computeKnownBits(Elt, Known, Depth + 1, Q);
1814 // If we don't know any bits, early out.
1815 if (Known.isUnknown())
1818 // We don't need the base vector element that has been inserted.
1819 APInt DemandedVecElts = DemandedElts;
1820 DemandedVecElts.clearBit(EltIdx);
1821 if (!!DemandedVecElts) {
1822 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1823 Known = KnownBits::commonBits(Known, Known2);
1827 case Instruction::ExtractElement: {
1828 // Look through extract element. If the index is non-constant or
1829 // out-of-range demand all elements, otherwise just the extracted element.
1830 const Value *Vec = I->getOperand(0);
1831 const Value *Idx = I->getOperand(1);
1832 auto *CIdx = dyn_cast<ConstantInt>(Idx);
1833 if (isa<ScalableVectorType>(Vec->getType())) {
1834 // FIXME: there's probably *something* we can do with scalable vectors
1838 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1839 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
1840 if (CIdx && CIdx->getValue().ult(NumElts))
1841 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1842 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1845 case Instruction::ExtractValue:
1846 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1847 const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1848 if (EVI->getNumIndices() != 1) break;
1849 if (EVI->getIndices()[0] == 0) {
1850 switch (II->getIntrinsicID()) {
1852 case Intrinsic::uadd_with_overflow:
1853 case Intrinsic::sadd_with_overflow:
1854 computeKnownBitsAddSub(true, II->getArgOperand(0),
1855 II->getArgOperand(1), false, DemandedElts,
1856 Known, Known2, Depth, Q);
1858 case Intrinsic::usub_with_overflow:
1859 case Intrinsic::ssub_with_overflow:
1860 computeKnownBitsAddSub(false, II->getArgOperand(0),
1861 II->getArgOperand(1), false, DemandedElts,
1862 Known, Known2, Depth, Q);
1864 case Intrinsic::umul_with_overflow:
1865 case Intrinsic::smul_with_overflow:
1866 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1867 DemandedElts, Known, Known2, Depth, Q);
1873 case Instruction::Freeze:
1874 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1876 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1881 /// Determine which bits of V are known to be either zero or one and return
1883 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1884 unsigned Depth, const Query &Q) {
1885 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1886 computeKnownBits(V, DemandedElts, Known, Depth, Q);
1890 /// Determine which bits of V are known to be either zero or one and return
1892 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1893 KnownBits Known(getBitWidth(V->getType(), Q.DL));
1894 computeKnownBits(V, Known, Depth, Q);
1898 /// Determine which bits of V are known to be either zero or one and return
1899 /// them in the Known bit set.
1901 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that
1902 /// we cannot optimize based on the assumption that it is zero without changing
1903 /// it to be an explicit zero. If we don't change it to zero, other code could
1904 /// optimized based on the contradictory assumption that it is non-zero.
1905 /// Because instcombine aggressively folds operations with undef args anyway,
1906 /// this won't lose us code quality.
1908 /// This function is defined on values with integer type, values with pointer
1909 /// type, and vectors of integers. In the case
1910 /// where V is a vector, known zero, and known one values are the
1911 /// same width as the vector element, and the bit is set only if it is true
1912 /// for all of the demanded elements in the vector specified by DemandedElts.
1913 void computeKnownBits(const Value *V, const APInt &DemandedElts,
1914 KnownBits &Known, unsigned Depth, const Query &Q) {
1915 if (!DemandedElts || isa<ScalableVectorType>(V->getType())) {
1916 // No demanded elts or V is a scalable vector, better to assume we don't
1922 assert(V && "No Value?");
1923 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1926 Type *Ty = V->getType();
1927 unsigned BitWidth = Known.getBitWidth();
1929 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
1930 "Not integer or pointer type!");
1932 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1934 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
1935 "DemandedElt width should equal the fixed vector number of elements");
1937 assert(DemandedElts == APInt(1, 1) &&
1938 "DemandedElt width should be 1 for scalars");
1941 Type *ScalarTy = Ty->getScalarType();
1942 if (ScalarTy->isPointerTy()) {
1943 assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
1944 "V and Known should have same BitWidth");
1946 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
1947 "V and Known should have same BitWidth");
1952 if (match(V, m_APInt(C))) {
1953 // We know all of the bits for a scalar constant or a splat vector constant!
1954 Known = KnownBits::makeConstant(*C);
1957 // Null and aggregate-zero are all-zeros.
1958 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1962 // Handle a constant vector by taking the intersection of the known bits of
1964 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1965 // We know that CDV must be a vector of integers. Take the intersection of
1967 Known.Zero.setAllBits(); Known.One.setAllBits();
1968 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1969 if (!DemandedElts[i])
1971 APInt Elt = CDV->getElementAsAPInt(i);
1978 if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1979 // We know that CV must be a vector of integers. Take the intersection of
1981 Known.Zero.setAllBits(); Known.One.setAllBits();
1982 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1983 if (!DemandedElts[i])
1985 Constant *Element = CV->getAggregateElement(i);
1986 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1991 const APInt &Elt = ElementCI->getValue();
1998 // Start out not knowing anything.
2001 // We can't imply anything about undefs.
2002 if (isa<UndefValue>(V))
2005 // There's no point in looking through other users of ConstantData for
2006 // assumptions. Confirm that we've handled them all.
2007 assert(!isa<ConstantData>(V) && "Unhandled constant data!");
2009 // All recursive calls that increase depth must come after this.
2010 if (Depth == MaxAnalysisRecursionDepth)
2013 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
2014 // the bits of its aliasee.
2015 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2016 if (!GA->isInterposable())
2017 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
2021 if (const Operator *I = dyn_cast<Operator>(V))
2022 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
2024 // Aligned pointers have trailing zeros - refine Known.Zero set
2025 if (isa<PointerType>(V->getType())) {
2026 Align Alignment = V->getPointerAlignment(Q.DL);
2027 Known.Zero.setLowBits(Log2(Alignment));
2030 // computeKnownBitsFromAssume strictly refines Known.
2031 // Therefore, we run them after computeKnownBitsFromOperator.
2033 // Check whether a nearby assume intrinsic can determine some known bits.
2034 computeKnownBitsFromAssume(V, Known, Depth, Q);
2036 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
2039 /// Try to detect a recurrence that the value of the induction variable is
2040 /// always a power of two (or zero).
2041 static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
2042 unsigned Depth, Query &Q) {
2043 BinaryOperator *BO = nullptr;
2044 Value *Start = nullptr, *Step = nullptr;
2045 if (!matchSimpleRecurrence(PN, BO, Start, Step))
2048 // Initial value must be a power of two.
2049 for (const Use &U : PN->operands()) {
2050 if (U.get() == Start) {
2051 // Initial value comes from a different BB, need to adjust context
2052 // instruction for analysis.
2053 Q.CxtI = PN->getIncomingBlock(U)->getTerminator();
2054 if (!isKnownToBeAPowerOfTwo(Start, OrZero, Depth, Q))
2059 // Except for Mul, the induction variable must be on the left side of the
2060 // increment expression, otherwise its value can be arbitrary.
2061 if (BO->getOpcode() != Instruction::Mul && BO->getOperand(1) != Step)
2064 Q.CxtI = BO->getParent()->getTerminator();
2065 switch (BO->getOpcode()) {
2066 case Instruction::Mul:
2067 // Power of two is closed under multiplication.
2068 return (OrZero || Q.IIQ.hasNoUnsignedWrap(BO) ||
2069 Q.IIQ.hasNoSignedWrap(BO)) &&
2070 isKnownToBeAPowerOfTwo(Step, OrZero, Depth, Q);
2071 case Instruction::SDiv:
2072 // Start value must not be signmask for signed division, so simply being a
2073 // power of two is not sufficient, and it has to be a constant.
2074 if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
2077 case Instruction::UDiv:
2078 // Divisor must be a power of two.
2079 // If OrZero is false, cannot guarantee induction variable is non-zero after
2080 // division, same for Shr, unless it is exact division.
2081 return (OrZero || Q.IIQ.isExact(BO)) &&
2082 isKnownToBeAPowerOfTwo(Step, false, Depth, Q);
2083 case Instruction::Shl:
2084 return OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO);
2085 case Instruction::AShr:
2086 if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
2089 case Instruction::LShr:
2090 return OrZero || Q.IIQ.isExact(BO);
2096 /// Return true if the given value is known to have exactly one
2097 /// bit set when defined. For vectors return true if every element is known to
2098 /// be a power of two when defined. Supports values with integer or pointer
2099 /// types and vectors of integers.
2100 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
2102 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2104 // Attempt to match against constants.
2105 if (OrZero && match(V, m_Power2OrZero()))
2107 if (match(V, m_Power2()))
2110 // 1 << X is clearly a power of two if the one is not shifted off the end. If
2111 // it is shifted off the end then the result is undefined.
2112 if (match(V, m_Shl(m_One(), m_Value())))
2115 // (signmask) >>l X is clearly a power of two if the one is not shifted off
2116 // the bottom. If it is shifted off the bottom then the result is undefined.
2117 if (match(V, m_LShr(m_SignMask(), m_Value())))
2120 // The remaining tests are all recursive, so bail out if we hit the limit.
2121 if (Depth++ == MaxAnalysisRecursionDepth)
2124 Value *X = nullptr, *Y = nullptr;
2125 // A shift left or a logical shift right of a power of two is a power of two
2127 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
2128 match(V, m_LShr(m_Value(X), m_Value()))))
2129 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
2131 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
2132 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
2134 if (const SelectInst *SI = dyn_cast<SelectInst>(V))
2135 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
2136 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
2138 // Peek through min/max.
2139 if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) {
2140 return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) &&
2141 isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q);
2144 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
2145 // A power of two and'd with anything is a power of two or zero.
2146 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
2147 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
2149 // X & (-X) is always a power of two or zero.
2150 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
2155 // Adding a power-of-two or zero to the same power-of-two or zero yields
2156 // either the original power-of-two, a larger power-of-two or zero.
2157 if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2158 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2159 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2160 Q.IIQ.hasNoSignedWrap(VOBO)) {
2161 if (match(X, m_And(m_Specific(Y), m_Value())) ||
2162 match(X, m_And(m_Value(), m_Specific(Y))))
2163 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
2165 if (match(Y, m_And(m_Specific(X), m_Value())) ||
2166 match(Y, m_And(m_Value(), m_Specific(X))))
2167 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
2170 unsigned BitWidth = V->getType()->getScalarSizeInBits();
2171 KnownBits LHSBits(BitWidth);
2172 computeKnownBits(X, LHSBits, Depth, Q);
2174 KnownBits RHSBits(BitWidth);
2175 computeKnownBits(Y, RHSBits, Depth, Q);
2176 // If i8 V is a power of two or zero:
2177 // ZeroBits: 1 1 1 0 1 1 1 1
2178 // ~ZeroBits: 0 0 0 1 0 0 0 0
2179 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2180 // If OrZero isn't set, we cannot give back a zero result.
2181 // Make sure either the LHS or RHS has a bit set.
2182 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2187 // A PHI node is power of two if all incoming values are power of two, or if
2188 // it is an induction variable where in each step its value is a power of two.
2189 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2192 // Check if it is an induction variable and always power of two.
2193 if (isPowerOfTwoRecurrence(PN, OrZero, Depth, RecQ))
2196 // Recursively check all incoming values. Limit recursion to 2 levels, so
2197 // that search complexity is limited to number of operands^2.
2198 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2199 return llvm::all_of(PN->operands(), [&](const Use &U) {
2200 // Value is power of 2 if it is coming from PHI node itself by induction.
2204 // Change the context instruction to the incoming block where it is
2206 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2207 return isKnownToBeAPowerOfTwo(U.get(), OrZero, NewDepth, RecQ);
2211 // An exact divide or right shift can only shift off zero bits, so the result
2212 // is a power of two only if the first operand is a power of two and not
2213 // copying a sign bit (sdiv int_min, 2).
2214 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2215 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2216 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2223 /// Test whether a GEP's result is known to be non-null.
2225 /// Uses properties inherent in a GEP to try to determine whether it is known
2228 /// Currently this routine does not support vector GEPs.
2229 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2231 const Function *F = nullptr;
2232 if (const Instruction *I = dyn_cast<Instruction>(GEP))
2233 F = I->getFunction();
2235 if (!GEP->isInBounds() ||
2236 NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2239 // FIXME: Support vector-GEPs.
2240 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2242 // If the base pointer is non-null, we cannot walk to a null address with an
2243 // inbounds GEP in address space zero.
2244 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2247 // Walk the GEP operands and see if any operand introduces a non-zero offset.
2248 // If so, then the GEP cannot produce a null pointer, as doing so would
2249 // inherently violate the inbounds contract within address space zero.
2250 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2251 GTI != GTE; ++GTI) {
2252 // Struct types are easy -- they must always be indexed by a constant.
2253 if (StructType *STy = GTI.getStructTypeOrNull()) {
2254 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2255 unsigned ElementIdx = OpC->getZExtValue();
2256 const StructLayout *SL = Q.DL.getStructLayout(STy);
2257 uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2258 if (ElementOffset > 0)
2263 // If we have a zero-sized type, the index doesn't matter. Keep looping.
2264 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0)
2267 // Fast path the constant operand case both for efficiency and so we don't
2268 // increment Depth when just zipping down an all-constant GEP.
2269 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2275 // We post-increment Depth here because while isKnownNonZero increments it
2276 // as well, when we pop back up that increment won't persist. We don't want
2277 // to recurse 10k times just because we have 10k GEP operands. We don't
2278 // bail completely out because we want to handle constant GEPs regardless
2280 if (Depth++ >= MaxAnalysisRecursionDepth)
2283 if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2290 static bool isKnownNonNullFromDominatingCondition(const Value *V,
2291 const Instruction *CtxI,
2292 const DominatorTree *DT) {
2293 if (isa<Constant>(V))
2299 unsigned NumUsesExplored = 0;
2300 for (const auto *U : V->users()) {
2301 // Avoid massive lists
2302 if (NumUsesExplored >= DomConditionsMaxUses)
2306 // If the value is used as an argument to a call or invoke, then argument
2307 // attributes may provide an answer about null-ness.
2308 if (const auto *CB = dyn_cast<CallBase>(U))
2309 if (auto *CalledFunc = CB->getCalledFunction())
2310 for (const Argument &Arg : CalledFunc->args())
2311 if (CB->getArgOperand(Arg.getArgNo()) == V &&
2312 Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2313 DT->dominates(CB, CtxI))
2316 // If the value is used as a load/store, then the pointer must be non null.
2317 if (V == getLoadStorePointerOperand(U)) {
2318 const Instruction *I = cast<Instruction>(U);
2319 if (!NullPointerIsDefined(I->getFunction(),
2320 V->getType()->getPointerAddressSpace()) &&
2321 DT->dominates(I, CtxI))
2325 // Consider only compare instructions uniquely controlling a branch
2327 CmpInst::Predicate Pred;
2328 if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2332 if (cmpExcludesZero(Pred, RHS))
2333 NonNullIfTrue = true;
2334 else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS))
2335 NonNullIfTrue = false;
2339 SmallVector<const User *, 4> WorkList;
2340 SmallPtrSet<const User *, 4> Visited;
2341 for (const auto *CmpU : U->users()) {
2342 assert(WorkList.empty() && "Should be!");
2343 if (Visited.insert(CmpU).second)
2344 WorkList.push_back(CmpU);
2346 while (!WorkList.empty()) {
2347 auto *Curr = WorkList.pop_back_val();
2349 // If a user is an AND, add all its users to the work list. We only
2350 // propagate "pred != null" condition through AND because it is only
2351 // correct to assume that all conditions of AND are met in true branch.
2352 // TODO: Support similar logic of OR and EQ predicate?
2354 if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2355 for (const auto *CurrU : Curr->users())
2356 if (Visited.insert(CurrU).second)
2357 WorkList.push_back(CurrU);
2361 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2362 assert(BI->isConditional() && "uses a comparison!");
2364 BasicBlock *NonNullSuccessor =
2365 BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2366 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2367 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2369 } else if (NonNullIfTrue && isGuard(Curr) &&
2370 DT->dominates(cast<Instruction>(Curr), CtxI)) {
2380 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2381 /// ensure that the value it's attached to is never Value? 'RangeType' is
2382 /// is the type of the value described by the range.
2383 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2384 const unsigned NumRanges = Ranges->getNumOperands() / 2;
2385 assert(NumRanges >= 1);
2386 for (unsigned i = 0; i < NumRanges; ++i) {
2387 ConstantInt *Lower =
2388 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2389 ConstantInt *Upper =
2390 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2391 ConstantRange Range(Lower->getValue(), Upper->getValue());
2392 if (Range.contains(Value))
2398 /// Try to detect a recurrence that monotonically increases/decreases from a
2399 /// non-zero starting value. These are common as induction variables.
2400 static bool isNonZeroRecurrence(const PHINode *PN) {
2401 BinaryOperator *BO = nullptr;
2402 Value *Start = nullptr, *Step = nullptr;
2403 const APInt *StartC, *StepC;
2404 if (!matchSimpleRecurrence(PN, BO, Start, Step) ||
2405 !match(Start, m_APInt(StartC)) || StartC->isZero())
2408 switch (BO->getOpcode()) {
2409 case Instruction::Add:
2410 // Starting from non-zero and stepping away from zero can never wrap back
2412 return BO->hasNoUnsignedWrap() ||
2413 (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) &&
2414 StartC->isNegative() == StepC->isNegative());
2415 case Instruction::Mul:
2416 return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
2417 match(Step, m_APInt(StepC)) && !StepC->isZero();
2418 case Instruction::Shl:
2419 return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap();
2420 case Instruction::AShr:
2421 case Instruction::LShr:
2422 return BO->isExact();
2428 /// Return true if the given value is known to be non-zero when defined. For
2429 /// vectors, return true if every demanded element is known to be non-zero when
2430 /// defined. For pointers, if the context instruction and dominator tree are
2431 /// specified, perform context-sensitive analysis and return true if the
2432 /// pointer couldn't possibly be null at the specified instruction.
2433 /// Supports values with integer or pointer type and vectors of integers.
2434 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2436 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2438 if (isa<ScalableVectorType>(V->getType()))
2441 if (auto *C = dyn_cast<Constant>(V)) {
2442 if (C->isNullValue())
2444 if (isa<ConstantInt>(C))
2445 // Must be non-zero due to null test above.
2448 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
2449 // See the comment for IntToPtr/PtrToInt instructions below.
2450 if (CE->getOpcode() == Instruction::IntToPtr ||
2451 CE->getOpcode() == Instruction::PtrToInt)
2452 if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType())
2454 Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize())
2455 return isKnownNonZero(CE->getOperand(0), Depth, Q);
2458 // For constant vectors, check that all elements are undefined or known
2459 // non-zero to determine that the whole vector is known non-zero.
2460 if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2461 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2462 if (!DemandedElts[i])
2464 Constant *Elt = C->getAggregateElement(i);
2465 if (!Elt || Elt->isNullValue())
2467 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2473 // A global variable in address space 0 is non null unless extern weak
2474 // or an absolute symbol reference. Other address spaces may have null as a
2475 // valid address for a global, so we can't assume anything.
2476 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2477 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2478 GV->getType()->getAddressSpace() == 0)
2484 if (auto *I = dyn_cast<Instruction>(V)) {
2485 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2486 // If the possible ranges don't contain zero, then the value is
2487 // definitely non-zero.
2488 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2489 const APInt ZeroValue(Ty->getBitWidth(), 0);
2490 if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2496 if (isKnownNonZeroFromAssume(V, Q))
2499 // Some of the tests below are recursive, so bail out if we hit the limit.
2500 if (Depth++ >= MaxAnalysisRecursionDepth)
2503 // Check for pointer simplifications.
2505 if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2506 // Alloca never returns null, malloc might.
2507 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2510 // A byval, inalloca may not be null in a non-default addres space. A
2511 // nonnull argument is assumed never 0.
2512 if (const Argument *A = dyn_cast<Argument>(V)) {
2513 if (((A->hasPassPointeeByValueCopyAttr() &&
2514 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2515 A->hasNonNullAttr()))
2519 // A Load tagged with nonnull metadata is never null.
2520 if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2521 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2524 if (const auto *Call = dyn_cast<CallBase>(V)) {
2525 if (Call->isReturnNonNull())
2527 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2528 return isKnownNonZero(RP, Depth, Q);
2532 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2535 // Check for recursive pointer simplifications.
2536 if (V->getType()->isPointerTy()) {
2537 // Look through bitcast operations, GEPs, and int2ptr instructions as they
2538 // do not alter the value, or at least not the nullness property of the
2539 // value, e.g., int2ptr is allowed to zero/sign extend the value.
2541 // Note that we have to take special care to avoid looking through
2542 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2543 // as casts that can alter the value, e.g., AddrSpaceCasts.
2544 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
2545 return isGEPKnownNonNull(GEP, Depth, Q);
2547 if (auto *BCO = dyn_cast<BitCastOperator>(V))
2548 return isKnownNonZero(BCO->getOperand(0), Depth, Q);
2550 if (auto *I2P = dyn_cast<IntToPtrInst>(V))
2551 if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <=
2552 Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize())
2553 return isKnownNonZero(I2P->getOperand(0), Depth, Q);
2556 // Similar to int2ptr above, we can look through ptr2int here if the cast
2557 // is a no-op or an extend and not a truncate.
2558 if (auto *P2I = dyn_cast<PtrToIntInst>(V))
2559 if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <=
2560 Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize())
2561 return isKnownNonZero(P2I->getOperand(0), Depth, Q);
2563 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2565 // X | Y != 0 if X != 0 or Y != 0.
2566 Value *X = nullptr, *Y = nullptr;
2567 if (match(V, m_Or(m_Value(X), m_Value(Y))))
2568 return isKnownNonZero(X, DemandedElts, Depth, Q) ||
2569 isKnownNonZero(Y, DemandedElts, Depth, Q);
2571 // ext X != 0 if X != 0.
2572 if (isa<SExtInst>(V) || isa<ZExtInst>(V))
2573 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q);
2575 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
2576 // if the lowest bit is shifted off the end.
2577 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
2578 // shl nuw can't remove any non-zero bits.
2579 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2580 if (Q.IIQ.hasNoUnsignedWrap(BO))
2581 return isKnownNonZero(X, Depth, Q);
2583 KnownBits Known(BitWidth);
2584 computeKnownBits(X, DemandedElts, Known, Depth, Q);
2588 // shr X, Y != 0 if X is negative. Note that the value of the shift is not
2589 // defined if the sign bit is shifted off the end.
2590 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) {
2591 // shr exact can only shift out zero bits.
2592 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2594 return isKnownNonZero(X, Depth, Q);
2596 KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q);
2597 if (Known.isNegative())
2600 // If the shifter operand is a constant, and all of the bits shifted
2601 // out are known to be zero, and X is known non-zero then at least one
2602 // non-zero bit must remain.
2603 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
2604 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2605 // Is there a known one in the portion not shifted out?
2606 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2608 // Are all the bits to be shifted out known zero?
2609 if (Known.countMinTrailingZeros() >= ShiftVal)
2610 return isKnownNonZero(X, DemandedElts, Depth, Q);
2613 // div exact can only produce a zero if the dividend is zero.
2614 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
2615 return isKnownNonZero(X, DemandedElts, Depth, Q);
2618 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2619 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q);
2620 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q);
2622 // If X and Y are both non-negative (as signed values) then their sum is not
2623 // zero unless both X and Y are zero.
2624 if (XKnown.isNonNegative() && YKnown.isNonNegative())
2625 if (isKnownNonZero(X, DemandedElts, Depth, Q) ||
2626 isKnownNonZero(Y, DemandedElts, Depth, Q))
2629 // If X and Y are both negative (as signed values) then their sum is not
2630 // zero unless both X and Y equal INT_MIN.
2631 if (XKnown.isNegative() && YKnown.isNegative()) {
2632 APInt Mask = APInt::getSignedMaxValue(BitWidth);
2633 // The sign bit of X is set. If some other bit is set then X is not equal
2635 if (XKnown.One.intersects(Mask))
2637 // The sign bit of Y is set. If some other bit is set then Y is not equal
2639 if (YKnown.One.intersects(Mask))
2643 // The sum of a non-negative number and a power of two is not zero.
2644 if (XKnown.isNonNegative() &&
2645 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q))
2647 if (YKnown.isNonNegative() &&
2648 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q))
2652 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) {
2653 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2654 // If X and Y are non-zero then so is X * Y as long as the multiplication
2655 // does not overflow.
2656 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2657 isKnownNonZero(X, DemandedElts, Depth, Q) &&
2658 isKnownNonZero(Y, DemandedElts, Depth, Q))
2661 // (C ? X : Y) != 0 if X != 0 and Y != 0.
2662 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
2663 if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) &&
2664 isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q))
2668 else if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2669 if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN))
2672 // Check if all incoming values are non-zero using recursion.
2674 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2675 return llvm::all_of(PN->operands(), [&](const Use &U) {
2678 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2679 return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2683 else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2684 const Value *Vec = EEI->getVectorOperand();
2685 const Value *Idx = EEI->getIndexOperand();
2686 auto *CIdx = dyn_cast<ConstantInt>(Idx);
2687 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2688 unsigned NumElts = VecTy->getNumElements();
2689 APInt DemandedVecElts = APInt::getAllOnes(NumElts);
2690 if (CIdx && CIdx->getValue().ult(NumElts))
2691 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2692 return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2696 else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) {
2697 auto *Op = FI->getOperand(0);
2698 if (isKnownNonZero(Op, Depth, Q) &&
2699 isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth))
2701 } else if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
2702 if (II->getIntrinsicID() == Intrinsic::vscale)
2706 KnownBits Known(BitWidth);
2707 computeKnownBits(V, DemandedElts, Known, Depth, Q);
2708 return Known.One != 0;
2711 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2712 // FIXME: We currently have no way to represent the DemandedElts of a scalable
2714 if (isa<ScalableVectorType>(V->getType()))
2717 auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2718 APInt DemandedElts =
2719 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
2720 return isKnownNonZero(V, DemandedElts, Depth, Q);
2723 /// If the pair of operators are the same invertible function, return the
2724 /// the operands of the function corresponding to each input. Otherwise,
2725 /// return None. An invertible function is one that is 1-to-1 and maps
2726 /// every input value to exactly one output value. This is equivalent to
2727 /// saying that Op1 and Op2 are equal exactly when the specified pair of
2728 /// operands are equal, (except that Op1 and Op2 may be poison more often.)
2729 static Optional<std::pair<Value*, Value*>>
2730 getInvertibleOperands(const Operator *Op1,
2731 const Operator *Op2) {
2732 if (Op1->getOpcode() != Op2->getOpcode())
2735 auto getOperands = [&](unsigned OpNum) -> auto {
2736 return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum));
2739 switch (Op1->getOpcode()) {
2742 case Instruction::Add:
2743 case Instruction::Sub:
2744 if (Op1->getOperand(0) == Op2->getOperand(0))
2745 return getOperands(1);
2746 if (Op1->getOperand(1) == Op2->getOperand(1))
2747 return getOperands(0);
2749 case Instruction::Mul: {
2750 // invertible if A * B == (A * B) mod 2^N where A, and B are integers
2751 // and N is the bitwdith. The nsw case is non-obvious, but proven by
2752 // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
2753 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2754 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2755 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2756 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2759 // Assume operand order has been canonicalized
2760 if (Op1->getOperand(1) == Op2->getOperand(1) &&
2761 isa<ConstantInt>(Op1->getOperand(1)) &&
2762 !cast<ConstantInt>(Op1->getOperand(1))->isZero())
2763 return getOperands(0);
2766 case Instruction::Shl: {
2767 // Same as multiplies, with the difference that we don't need to check
2768 // for a non-zero multiply. Shifts always multiply by non-zero.
2769 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2770 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2771 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2772 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2775 if (Op1->getOperand(1) == Op2->getOperand(1))
2776 return getOperands(0);
2779 case Instruction::AShr:
2780 case Instruction::LShr: {
2781 auto *PEO1 = cast<PossiblyExactOperator>(Op1);
2782 auto *PEO2 = cast<PossiblyExactOperator>(Op2);
2783 if (!PEO1->isExact() || !PEO2->isExact())
2786 if (Op1->getOperand(1) == Op2->getOperand(1))
2787 return getOperands(0);
2790 case Instruction::SExt:
2791 case Instruction::ZExt:
2792 if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType())
2793 return getOperands(0);
2795 case Instruction::PHI: {
2796 const PHINode *PN1 = cast<PHINode>(Op1);
2797 const PHINode *PN2 = cast<PHINode>(Op2);
2799 // If PN1 and PN2 are both recurrences, can we prove the entire recurrences
2800 // are a single invertible function of the start values? Note that repeated
2801 // application of an invertible function is also invertible
2802 BinaryOperator *BO1 = nullptr;
2803 Value *Start1 = nullptr, *Step1 = nullptr;
2804 BinaryOperator *BO2 = nullptr;
2805 Value *Start2 = nullptr, *Step2 = nullptr;
2806 if (PN1->getParent() != PN2->getParent() ||
2807 !matchSimpleRecurrence(PN1, BO1, Start1, Step1) ||
2808 !matchSimpleRecurrence(PN2, BO2, Start2, Step2))
2811 auto Values = getInvertibleOperands(cast<Operator>(BO1),
2812 cast<Operator>(BO2));
2816 // We have to be careful of mutually defined recurrences here. Ex:
2817 // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V
2818 // * X_i = Y_i = X_(i-1) OP Y_(i-1)
2819 // The invertibility of these is complicated, and not worth reasoning
2821 if (Values->first != PN1 || Values->second != PN2)
2824 return std::make_pair(Start1, Start2);
2830 /// Return true if V2 == V1 + X, where X is known non-zero.
2831 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
2833 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2834 if (!BO || BO->getOpcode() != Instruction::Add)
2836 Value *Op = nullptr;
2837 if (V2 == BO->getOperand(0))
2838 Op = BO->getOperand(1);
2839 else if (V2 == BO->getOperand(1))
2840 Op = BO->getOperand(0);
2843 return isKnownNonZero(Op, Depth + 1, Q);
2846 /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
2847 /// the multiplication is nuw or nsw.
2848 static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth,
2850 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2852 return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
2853 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2854 !C->isZero() && !C->isOne() && isKnownNonZero(V1, Depth + 1, Q);
2859 /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
2860 /// the shift is nuw or nsw.
2861 static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth,
2863 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2865 return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
2866 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2867 !C->isZero() && isKnownNonZero(V1, Depth + 1, Q);
2872 static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
2873 unsigned Depth, const Query &Q) {
2874 // Check two PHIs are in same block.
2875 if (PN1->getParent() != PN2->getParent())
2878 SmallPtrSet<const BasicBlock *, 8> VisitedBBs;
2879 bool UsedFullRecursion = false;
2880 for (const BasicBlock *IncomBB : PN1->blocks()) {
2881 if (!VisitedBBs.insert(IncomBB).second)
2882 continue; // Don't reprocess blocks that we have dealt with already.
2883 const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB);
2884 const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB);
2885 const APInt *C1, *C2;
2886 if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2)
2889 // Only one pair of phi operands is allowed for full recursion.
2890 if (UsedFullRecursion)
2894 RecQ.CxtI = IncomBB->getTerminator();
2895 if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ))
2897 UsedFullRecursion = true;
2902 /// Return true if it is known that V1 != V2.
2903 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
2907 if (V1->getType() != V2->getType())
2908 // We can't look through casts yet.
2911 if (Depth >= MaxAnalysisRecursionDepth)
2914 // See if we can recurse through (exactly one of) our operands. This
2915 // requires our operation be 1-to-1 and map every input value to exactly
2916 // one output value. Such an operation is invertible.
2917 auto *O1 = dyn_cast<Operator>(V1);
2918 auto *O2 = dyn_cast<Operator>(V2);
2919 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
2920 if (auto Values = getInvertibleOperands(O1, O2))
2921 return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q);
2923 if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
2924 const PHINode *PN2 = cast<PHINode>(V2);
2925 // FIXME: This is missing a generalization to handle the case where one is
2926 // a PHI and another one isn't.
2927 if (isNonEqualPHIs(PN1, PN2, Depth, Q))
2932 if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
2935 if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q))
2938 if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q))
2941 if (V1->getType()->isIntOrIntVectorTy()) {
2942 // Are any known bits in V1 contradictory to known bits in V2? If V1
2943 // has a known zero where V2 has a known one, they must not be equal.
2944 KnownBits Known1 = computeKnownBits(V1, Depth, Q);
2945 KnownBits Known2 = computeKnownBits(V2, Depth, Q);
2947 if (Known1.Zero.intersects(Known2.One) ||
2948 Known2.Zero.intersects(Known1.One))
2954 /// Return true if 'V & Mask' is known to be zero. We use this predicate to
2955 /// simplify operations downstream. Mask is known to be zero for bits that V
2958 /// This function is defined on values with integer type, values with pointer
2959 /// type, and vectors of integers. In the case
2960 /// where V is a vector, the mask, known zero, and known one values are the
2961 /// same width as the vector element, and the bit is set only if it is true
2962 /// for all of the elements in the vector.
2963 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2965 KnownBits Known(Mask.getBitWidth());
2966 computeKnownBits(V, Known, Depth, Q);
2967 return Mask.isSubsetOf(Known.Zero);
2970 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2971 // Returns the input and lower/upper bounds.
2972 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2973 const APInt *&CLow, const APInt *&CHigh) {
2974 assert(isa<Operator>(Select) &&
2975 cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2976 "Input should be a Select!");
2978 const Value *LHS = nullptr, *RHS = nullptr;
2979 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
2980 if (SPF != SPF_SMAX && SPF != SPF_SMIN)
2983 if (!match(RHS, m_APInt(CLow)))
2986 const Value *LHS2 = nullptr, *RHS2 = nullptr;
2987 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
2988 if (getInverseMinMaxFlavor(SPF) != SPF2)
2991 if (!match(RHS2, m_APInt(CHigh)))
2994 if (SPF == SPF_SMIN)
2995 std::swap(CLow, CHigh);
2998 return CLow->sle(*CHigh);
3001 static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II,
3003 const APInt *&CHigh) {
3004 assert((II->getIntrinsicID() == Intrinsic::smin ||
3005 II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax");
3007 Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
3008 auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
3009 if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
3010 !match(II->getArgOperand(1), m_APInt(CLow)) ||
3011 !match(InnerII->getArgOperand(1), m_APInt(CHigh)))
3014 if (II->getIntrinsicID() == Intrinsic::smin)
3015 std::swap(CLow, CHigh);
3016 return CLow->sle(*CHigh);
3019 /// For vector constants, loop over the elements and find the constant with the
3020 /// minimum number of sign bits. Return 0 if the value is not a vector constant
3021 /// or if any element was not analyzed; otherwise, return the count for the
3022 /// element with the minimum number of sign bits.
3023 static unsigned computeNumSignBitsVectorConstant(const Value *V,
3024 const APInt &DemandedElts,
3026 const auto *CV = dyn_cast<Constant>(V);
3027 if (!CV || !isa<FixedVectorType>(CV->getType()))
3030 unsigned MinSignBits = TyBits;
3031 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
3032 for (unsigned i = 0; i != NumElts; ++i) {
3033 if (!DemandedElts[i])
3035 // If we find a non-ConstantInt, bail out.
3036 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
3040 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
3046 static unsigned ComputeNumSignBitsImpl(const Value *V,
3047 const APInt &DemandedElts,
3048 unsigned Depth, const Query &Q);
3050 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
3051 unsigned Depth, const Query &Q) {
3052 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
3053 assert(Result > 0 && "At least one sign bit needs to be present!");
3057 /// Return the number of times the sign bit of the register is replicated into
3058 /// the other bits. We know that at least 1 bit is always equal to the sign bit
3059 /// (itself), but other cases can give us information. For example, immediately
3060 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
3061 /// other, so we return 3. For vectors, return the number of sign bits for the
3062 /// vector element with the minimum number of known sign bits of the demanded
3063 /// elements in the vector specified by DemandedElts.
3064 static unsigned ComputeNumSignBitsImpl(const Value *V,
3065 const APInt &DemandedElts,
3066 unsigned Depth, const Query &Q) {
3067 Type *Ty = V->getType();
3069 // FIXME: We currently have no way to represent the DemandedElts of a scalable
3071 if (isa<ScalableVectorType>(Ty))
3075 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
3077 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
3079 FVTy->getNumElements() == DemandedElts.getBitWidth() &&
3080 "DemandedElt width should equal the fixed vector number of elements");
3082 assert(DemandedElts == APInt(1, 1) &&
3083 "DemandedElt width should be 1 for scalars");
3087 // We return the minimum number of sign bits that are guaranteed to be present
3088 // in V, so for undef we have to conservatively return 1. We don't have the
3089 // same behavior for poison though -- that's a FIXME today.
3091 Type *ScalarTy = Ty->getScalarType();
3092 unsigned TyBits = ScalarTy->isPointerTy() ?
3093 Q.DL.getPointerTypeSizeInBits(ScalarTy) :
3094 Q.DL.getTypeSizeInBits(ScalarTy);
3097 unsigned FirstAnswer = 1;
3099 // Note that ConstantInt is handled by the general computeKnownBits case
3102 if (Depth == MaxAnalysisRecursionDepth)
3105 if (auto *U = dyn_cast<Operator>(V)) {
3106 switch (Operator::getOpcode(V)) {
3108 case Instruction::SExt:
3109 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
3110 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
3112 case Instruction::SDiv: {
3113 const APInt *Denominator;
3114 // sdiv X, C -> adds log(C) sign bits.
3115 if (match(U->getOperand(1), m_APInt(Denominator))) {
3117 // Ignore non-positive denominator.
3118 if (!Denominator->isStrictlyPositive())
3121 // Calculate the incoming numerator bits.
3122 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3124 // Add floor(log(C)) bits to the numerator bits.
3125 return std::min(TyBits, NumBits + Denominator->logBase2());
3130 case Instruction::SRem: {
3131 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3133 const APInt *Denominator;
3134 // srem X, C -> we know that the result is within [-C+1,C) when C is a
3135 // positive constant. This let us put a lower bound on the number of sign
3137 if (match(U->getOperand(1), m_APInt(Denominator))) {
3139 // Ignore non-positive denominator.
3140 if (Denominator->isStrictlyPositive()) {
3141 // Calculate the leading sign bit constraints by examining the
3142 // denominator. Given that the denominator is positive, there are two
3145 // 1. The numerator is positive. The result range is [0,C) and
3146 // [0,C) u< (1 << ceilLogBase2(C)).
3148 // 2. The numerator is negative. Then the result range is (-C,0] and
3149 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
3151 // Thus a lower bound on the number of sign bits is `TyBits -
3152 // ceilLogBase2(C)`.
3154 unsigned ResBits = TyBits - Denominator->ceilLogBase2();
3155 Tmp = std::max(Tmp, ResBits);
3161 case Instruction::AShr: {
3162 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3163 // ashr X, C -> adds C sign bits. Vectors too.
3165 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3166 if (ShAmt->uge(TyBits))
3167 break; // Bad shift.
3168 unsigned ShAmtLimited = ShAmt->getZExtValue();
3169 Tmp += ShAmtLimited;
3170 if (Tmp > TyBits) Tmp = TyBits;
3174 case Instruction::Shl: {
3176 if (match(U->getOperand(1), m_APInt(ShAmt))) {
3177 // shl destroys sign bits.
3178 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3179 if (ShAmt->uge(TyBits) || // Bad shift.
3180 ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
3181 Tmp2 = ShAmt->getZExtValue();
3186 case Instruction::And:
3187 case Instruction::Or:
3188 case Instruction::Xor: // NOT is handled here.
3189 // Logical binary ops preserve the number of sign bits at the worst.
3190 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3192 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3193 FirstAnswer = std::min(Tmp, Tmp2);
3194 // We computed what we know about the sign bits as our first
3195 // answer. Now proceed to the generic code that uses
3196 // computeKnownBits, and pick whichever answer is better.
3200 case Instruction::Select: {
3201 // If we have a clamp pattern, we know that the number of sign bits will
3202 // be the minimum of the clamp min/max range.
3204 const APInt *CLow, *CHigh;
3205 if (isSignedMinMaxClamp(U, X, CLow, CHigh))
3206 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3208 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3209 if (Tmp == 1) break;
3210 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
3211 return std::min(Tmp, Tmp2);
3214 case Instruction::Add:
3215 // Add can have at most one carry bit. Thus we know that the output
3216 // is, at worst, one more bit than the inputs.
3217 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3218 if (Tmp == 1) break;
3220 // Special case decrementing a value (ADD X, -1):
3221 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
3222 if (CRHS->isAllOnesValue()) {
3223 KnownBits Known(TyBits);
3224 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
3226 // If the input is known to be 0 or 1, the output is 0/-1, which is
3227 // all sign bits set.
3228 if ((Known.Zero | 1).isAllOnes())
3231 // If we are subtracting one from a positive number, there is no carry
3232 // out of the result.
3233 if (Known.isNonNegative())
3237 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3238 if (Tmp2 == 1) break;
3239 return std::min(Tmp, Tmp2) - 1;
3241 case Instruction::Sub:
3242 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3243 if (Tmp2 == 1) break;
3246 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
3247 if (CLHS->isNullValue()) {
3248 KnownBits Known(TyBits);
3249 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
3250 // If the input is known to be 0 or 1, the output is 0/-1, which is
3251 // all sign bits set.
3252 if ((Known.Zero | 1).isAllOnes())
3255 // If the input is known to be positive (the sign bit is known clear),
3256 // the output of the NEG has the same number of sign bits as the
3258 if (Known.isNonNegative())
3261 // Otherwise, we treat this like a SUB.
3264 // Sub can have at most one carry bit. Thus we know that the output
3265 // is, at worst, one more bit than the inputs.
3266 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3267 if (Tmp == 1) break;
3268 return std::min(Tmp, Tmp2) - 1;
3270 case Instruction::Mul: {
3271 // The output of the Mul can be at most twice the valid bits in the
3273 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3274 if (SignBitsOp0 == 1) break;
3275 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3276 if (SignBitsOp1 == 1) break;
3277 unsigned OutValidBits =
3278 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
3279 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
3282 case Instruction::PHI: {
3283 const PHINode *PN = cast<PHINode>(U);
3284 unsigned NumIncomingValues = PN->getNumIncomingValues();
3285 // Don't analyze large in-degree PHIs.
3286 if (NumIncomingValues > 4) break;
3287 // Unreachable blocks may have zero-operand PHI nodes.
3288 if (NumIncomingValues == 0) break;
3290 // Take the minimum of all incoming values. This can't infinitely loop
3291 // because of our depth threshold.
3294 for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
3295 if (Tmp == 1) return Tmp;
3296 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
3298 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
3303 case Instruction::Trunc:
3304 // FIXME: it's tricky to do anything useful for this, but it is an
3305 // important case for targets like X86.
3308 case Instruction::ExtractElement:
3309 // Look through extract element. At the moment we keep this simple and
3310 // skip tracking the specific element. But at least we might find
3311 // information valid for all elements of the vector (for example if vector
3312 // is sign extended, shifted, etc).
3313 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3315 case Instruction::ShuffleVector: {
3316 // Collect the minimum number of sign bits that are shared by every vector
3317 // element referenced by the shuffle.
3318 auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
3320 // FIXME: Add support for shufflevector constant expressions.
3323 APInt DemandedLHS, DemandedRHS;
3324 // For undef elements, we don't know anything about the common state of
3325 // the shuffle result.
3326 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3328 Tmp = std::numeric_limits<unsigned>::max();
3329 if (!!DemandedLHS) {
3330 const Value *LHS = Shuf->getOperand(0);
3331 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
3333 // If we don't know anything, early out and try computeKnownBits
3337 if (!!DemandedRHS) {
3338 const Value *RHS = Shuf->getOperand(1);
3339 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
3340 Tmp = std::min(Tmp, Tmp2);
3342 // If we don't know anything, early out and try computeKnownBits
3346 assert(Tmp <= TyBits && "Failed to determine minimum sign bits");
3349 case Instruction::Call: {
3350 if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
3351 switch (II->getIntrinsicID()) {
3353 case Intrinsic::abs:
3354 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3355 if (Tmp == 1) break;
3357 // Absolute value reduces number of sign bits by at most 1.
3359 case Intrinsic::smin:
3360 case Intrinsic::smax: {
3361 const APInt *CLow, *CHigh;
3362 if (isSignedMinMaxIntrinsicClamp(II, CLow, CHigh))
3363 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3371 // Finally, if we can prove that the top bits of the result are 0's or 1's,
3372 // use this information.
3374 // If we can examine all elements of a vector constant successfully, we're
3375 // done (we can't do any better than that). If not, keep trying.
3376 if (unsigned VecSignBits =
3377 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3380 KnownBits Known(TyBits);
3381 computeKnownBits(V, DemandedElts, Known, Depth, Q);
3383 // If we know that the sign bit is either zero or one, determine the number of
3384 // identical bits in the top of the input value.
3385 return std::max(FirstAnswer, Known.countMinSignBits());
3388 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3389 const TargetLibraryInfo *TLI) {
3390 const Function *F = CB.getCalledFunction();
3392 return Intrinsic::not_intrinsic;
3394 if (F->isIntrinsic())
3395 return F->getIntrinsicID();
3397 // We are going to infer semantics of a library function based on mapping it
3398 // to an LLVM intrinsic. Check that the library function is available from
3399 // this callbase and in this environment.
3401 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3402 !CB.onlyReadsMemory())
3403 return Intrinsic::not_intrinsic;
3411 return Intrinsic::sin;
3415 return Intrinsic::cos;
3419 return Intrinsic::exp;
3423 return Intrinsic::exp2;
3427 return Intrinsic::log;
3429 case LibFunc_log10f:
3430 case LibFunc_log10l:
3431 return Intrinsic::log10;
3435 return Intrinsic::log2;
3439 return Intrinsic::fabs;
3443 return Intrinsic::minnum;
3447 return Intrinsic::maxnum;
3448 case LibFunc_copysign:
3449 case LibFunc_copysignf:
3450 case LibFunc_copysignl:
3451 return Intrinsic::copysign;
3453 case LibFunc_floorf:
3454 case LibFunc_floorl:
3455 return Intrinsic::floor;
3459 return Intrinsic::ceil;
3461 case LibFunc_truncf:
3462 case LibFunc_truncl:
3463 return Intrinsic::trunc;
3467 return Intrinsic::rint;
3468 case LibFunc_nearbyint:
3469 case LibFunc_nearbyintf:
3470 case LibFunc_nearbyintl:
3471 return Intrinsic::nearbyint;
3473 case LibFunc_roundf:
3474 case LibFunc_roundl:
3475 return Intrinsic::round;
3476 case LibFunc_roundeven:
3477 case LibFunc_roundevenf:
3478 case LibFunc_roundevenl:
3479 return Intrinsic::roundeven;
3483 return Intrinsic::pow;
3487 return Intrinsic::sqrt;
3490 return Intrinsic::not_intrinsic;
3493 /// Return true if we can prove that the specified FP value is never equal to
3495 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3496 /// that a value is not -0.0. It only guarantees that -0.0 may be treated
3497 /// the same as +0.0 in floating-point ops.
3498 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3500 if (auto *CFP = dyn_cast<ConstantFP>(V))
3501 return !CFP->getValueAPF().isNegZero();
3503 if (Depth == MaxAnalysisRecursionDepth)
3506 auto *Op = dyn_cast<Operator>(V);
3510 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3511 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3514 // sitofp and uitofp turn into +0.0 for zero.
3515 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3518 if (auto *Call = dyn_cast<CallInst>(Op)) {
3519 Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3523 // sqrt(-0.0) = -0.0, no other negative results are possible.
3524 case Intrinsic::sqrt:
3525 case Intrinsic::canonicalize:
3526 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3527 case Intrinsic::experimental_constrained_sqrt: {
3528 // NOTE: This rounding mode restriction may be too strict.
3529 const auto *CI = cast<ConstrainedFPIntrinsic>(Call);
3530 if (CI->getRoundingMode() == RoundingMode::NearestTiesToEven)
3531 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3536 case Intrinsic::fabs:
3538 // sitofp and uitofp turn into +0.0 for zero.
3539 case Intrinsic::experimental_constrained_sitofp:
3540 case Intrinsic::experimental_constrained_uitofp:
3548 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3549 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3550 /// bit despite comparing equal.
3551 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3552 const TargetLibraryInfo *TLI,
3555 // TODO: This function does not do the right thing when SignBitOnly is true
3556 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3557 // which flips the sign bits of NaNs. See
3558 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3560 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3561 return !CFP->getValueAPF().isNegative() ||
3562 (!SignBitOnly && CFP->getValueAPF().isZero());
3565 // Handle vector of constants.
3566 if (auto *CV = dyn_cast<Constant>(V)) {
3567 if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3568 unsigned NumElts = CVFVTy->getNumElements();
3569 for (unsigned i = 0; i != NumElts; ++i) {
3570 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3573 if (CFP->getValueAPF().isNegative() &&
3574 (SignBitOnly || !CFP->getValueAPF().isZero()))
3578 // All non-negative ConstantFPs.
3583 if (Depth == MaxAnalysisRecursionDepth)
3586 const Operator *I = dyn_cast<Operator>(V);
3590 switch (I->getOpcode()) {
3593 // Unsigned integers are always nonnegative.
3594 case Instruction::UIToFP:
3596 case Instruction::FDiv:
3597 // X / X is always exactly 1.0 or a NaN.
3598 if (I->getOperand(0) == I->getOperand(1) &&
3599 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3602 // Set SignBitOnly for RHS, because X / -0.0 is -Inf (or NaN).
3603 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3605 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI,
3606 /*SignBitOnly*/ true, Depth + 1);
3607 case Instruction::FMul:
3608 // X * X is always non-negative or a NaN.
3609 if (I->getOperand(0) == I->getOperand(1) &&
3610 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3614 case Instruction::FAdd:
3615 case Instruction::FRem:
3616 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3618 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3620 case Instruction::Select:
3621 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3623 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3625 case Instruction::FPExt:
3626 case Instruction::FPTrunc:
3627 // Widening/narrowing never change sign.
3628 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3630 case Instruction::ExtractElement:
3631 // Look through extract element. At the moment we keep this simple and skip
3632 // tracking the specific element. But at least we might find information
3633 // valid for all elements of the vector.
3634 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3636 case Instruction::Call:
3637 const auto *CI = cast<CallInst>(I);
3638 Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3642 case Intrinsic::maxnum: {
3643 Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3644 auto isPositiveNum = [&](Value *V) {
3646 // With SignBitOnly, this is tricky because the result of
3647 // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3648 // a constant strictly greater than 0.0.
3650 return match(V, m_APFloat(C)) &&
3651 *C > APFloat::getZero(C->getSemantics());
3654 // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3655 // maxnum can't be ordered-less-than-zero.
3656 return isKnownNeverNaN(V, TLI) &&
3657 cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3660 // TODO: This could be improved. We could also check that neither operand
3661 // has its sign bit set (and at least 1 is not-NAN?).
3662 return isPositiveNum(V0) || isPositiveNum(V1);
3665 case Intrinsic::maximum:
3666 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3668 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3670 case Intrinsic::minnum:
3671 case Intrinsic::minimum:
3672 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3674 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3676 case Intrinsic::exp:
3677 case Intrinsic::exp2:
3678 case Intrinsic::fabs:
3681 case Intrinsic::sqrt:
3682 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0.
3685 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3686 CannotBeNegativeZero(CI->getOperand(0), TLI));
3688 case Intrinsic::powi:
3689 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3690 // powi(x,n) is non-negative if n is even.
3691 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3694 // TODO: This is not correct. Given that exp is an integer, here are the
3695 // ways that pow can return a negative value:
3697 // pow(x, exp) --> negative if exp is odd and x is negative.
3698 // pow(-0, exp) --> -inf if exp is negative odd.
3699 // pow(-0, exp) --> -0 if exp is positive odd.
3700 // pow(-inf, exp) --> -0 if exp is negative odd.
3701 // pow(-inf, exp) --> -inf if exp is positive odd.
3703 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3704 // but we must return false if x == -0. Unfortunately we do not currently
3705 // have a way of expressing this constraint. See details in
3706 // https://llvm.org/bugs/show_bug.cgi?id=31702.
3707 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3710 case Intrinsic::fma:
3711 case Intrinsic::fmuladd:
3712 // x*x+y is non-negative if y is non-negative.
3713 return I->getOperand(0) == I->getOperand(1) &&
3714 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3715 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3723 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3724 const TargetLibraryInfo *TLI) {
3725 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3728 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3729 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3732 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3734 assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
3736 // If we're told that infinities won't happen, assume they won't.
3737 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3738 if (FPMathOp->hasNoInfs())
3741 // Handle scalar constants.
3742 if (auto *CFP = dyn_cast<ConstantFP>(V))
3743 return !CFP->isInfinity();
3745 if (Depth == MaxAnalysisRecursionDepth)
3748 if (auto *Inst = dyn_cast<Instruction>(V)) {
3749 switch (Inst->getOpcode()) {
3750 case Instruction::Select: {
3751 return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3752 isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3754 case Instruction::SIToFP:
3755 case Instruction::UIToFP: {
3756 // Get width of largest magnitude integer (remove a bit if signed).
3757 // This still works for a signed minimum value because the largest FP
3758 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
3759 int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
3760 if (Inst->getOpcode() == Instruction::SIToFP)
3763 // If the exponent of the largest finite FP value can hold the largest
3764 // integer, the result of the cast must be finite.
3765 Type *FPTy = Inst->getType()->getScalarType();
3766 return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
3773 // try to handle fixed width vector constants
3774 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3775 if (VFVTy && isa<Constant>(V)) {
3776 // For vectors, verify that each element is not infinity.
3777 unsigned NumElts = VFVTy->getNumElements();
3778 for (unsigned i = 0; i != NumElts; ++i) {
3779 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3782 if (isa<UndefValue>(Elt))
3784 auto *CElt = dyn_cast<ConstantFP>(Elt);
3785 if (!CElt || CElt->isInfinity())
3788 // All elements were confirmed non-infinity or undefined.
3792 // was not able to prove that V never contains infinity
3796 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3798 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3800 // If we're told that NaNs won't happen, assume they won't.
3801 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3802 if (FPMathOp->hasNoNaNs())
3805 // Handle scalar constants.
3806 if (auto *CFP = dyn_cast<ConstantFP>(V))
3807 return !CFP->isNaN();
3809 if (Depth == MaxAnalysisRecursionDepth)
3812 if (auto *Inst = dyn_cast<Instruction>(V)) {
3813 switch (Inst->getOpcode()) {
3814 case Instruction::FAdd:
3815 case Instruction::FSub:
3816 // Adding positive and negative infinity produces NaN.
3817 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3818 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3819 (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3820 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3822 case Instruction::FMul:
3823 // Zero multiplied with infinity produces NaN.
3824 // FIXME: If neither side can be zero fmul never produces NaN.
3825 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3826 isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3827 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3828 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3830 case Instruction::FDiv:
3831 case Instruction::FRem:
3832 // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3835 case Instruction::Select: {
3836 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3837 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3839 case Instruction::SIToFP:
3840 case Instruction::UIToFP:
3842 case Instruction::FPTrunc:
3843 case Instruction::FPExt:
3844 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3850 if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3851 switch (II->getIntrinsicID()) {
3852 case Intrinsic::canonicalize:
3853 case Intrinsic::fabs:
3854 case Intrinsic::copysign:
3855 case Intrinsic::exp:
3856 case Intrinsic::exp2:
3857 case Intrinsic::floor:
3858 case Intrinsic::ceil:
3859 case Intrinsic::trunc:
3860 case Intrinsic::rint:
3861 case Intrinsic::nearbyint:
3862 case Intrinsic::round:
3863 case Intrinsic::roundeven:
3864 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3865 case Intrinsic::sqrt:
3866 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3867 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3868 case Intrinsic::minnum:
3869 case Intrinsic::maxnum:
3870 // If either operand is not NaN, the result is not NaN.
3871 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3872 isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3878 // Try to handle fixed width vector constants
3879 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3880 if (VFVTy && isa<Constant>(V)) {
3881 // For vectors, verify that each element is not NaN.
3882 unsigned NumElts = VFVTy->getNumElements();
3883 for (unsigned i = 0; i != NumElts; ++i) {
3884 Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3887 if (isa<UndefValue>(Elt))
3889 auto *CElt = dyn_cast<ConstantFP>(Elt);
3890 if (!CElt || CElt->isNaN())
3893 // All elements were confirmed not-NaN or undefined.
3897 // Was not able to prove that V never contains NaN
3901 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
3903 // All byte-wide stores are splatable, even of arbitrary variables.
3904 if (V->getType()->isIntegerTy(8))
3907 LLVMContext &Ctx = V->getContext();
3909 // Undef don't care.
3910 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
3911 if (isa<UndefValue>(V))
3914 // Return Undef for zero-sized type.
3915 if (!DL.getTypeStoreSize(V->getType()).isNonZero())
3918 Constant *C = dyn_cast<Constant>(V);
3920 // Conceptually, we could handle things like:
3921 // %a = zext i8 %X to i16
3922 // %b = shl i16 %a, 8
3923 // %c = or i16 %a, %b
3924 // but until there is an example that actually needs this, it doesn't seem
3925 // worth worrying about.
3929 // Handle 'null' ConstantArrayZero etc.
3930 if (C->isNullValue())
3931 return Constant::getNullValue(Type::getInt8Ty(Ctx));
3933 // Constant floating-point values can be handled as integer values if the
3934 // corresponding integer value is "byteable". An important case is 0.0.
3935 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
3937 if (CFP->getType()->isHalfTy())
3938 Ty = Type::getInt16Ty(Ctx);
3939 else if (CFP->getType()->isFloatTy())
3940 Ty = Type::getInt32Ty(Ctx);
3941 else if (CFP->getType()->isDoubleTy())
3942 Ty = Type::getInt64Ty(Ctx);
3943 // Don't handle long double formats, which have strange constraints.
3944 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
3948 // We can handle constant integers that are multiple of 8 bits.
3949 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
3950 if (CI->getBitWidth() % 8 == 0) {
3951 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
3952 if (!CI->getValue().isSplat(8))
3954 return ConstantInt::get(Ctx, CI->getValue().trunc(8));
3958 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
3959 if (CE->getOpcode() == Instruction::IntToPtr) {
3960 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
3961 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
3962 return isBytewiseValue(
3963 ConstantExpr::getIntegerCast(CE->getOperand(0),
3964 Type::getIntNTy(Ctx, BitWidth), false),
3970 auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
3975 if (LHS == UndefInt8)
3977 if (RHS == UndefInt8)
3982 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
3983 Value *Val = UndefInt8;
3984 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
3985 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
3990 if (isa<ConstantAggregate>(C)) {
3991 Value *Val = UndefInt8;
3992 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
3993 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
3998 // Don't try to handle the handful of other constants.
4002 // This is the recursive version of BuildSubAggregate. It takes a few different
4003 // arguments. Idxs is the index within the nested struct From that we are
4004 // looking at now (which is of type IndexedType). IdxSkip is the number of
4005 // indices from Idxs that should be left out when inserting into the resulting
4006 // struct. To is the result struct built so far, new insertvalue instructions
4008 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
4009 SmallVectorImpl<unsigned> &Idxs,
4011 Instruction *InsertBefore) {
4012 StructType *STy = dyn_cast<StructType>(IndexedType);
4014 // Save the original To argument so we can modify it
4016 // General case, the type indexed by Idxs is a struct
4017 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4018 // Process each struct element recursively
4021 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
4025 // Couldn't find any inserted value for this index? Cleanup
4026 while (PrevTo != OrigTo) {
4027 InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
4028 PrevTo = Del->getAggregateOperand();
4029 Del->eraseFromParent();
4031 // Stop processing elements
4035 // If we successfully found a value for each of our subaggregates
4039 // Base case, the type indexed by SourceIdxs is not a struct, or not all of
4040 // the struct's elements had a value that was inserted directly. In the latter
4041 // case, perhaps we can't determine each of the subelements individually, but
4042 // we might be able to find the complete struct somewhere.
4044 // Find the value that is at that particular spot
4045 Value *V = FindInsertedValue(From, Idxs);
4050 // Insert the value in the new (sub) aggregate
4051 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip),
4052 "tmp", InsertBefore);
4055 // This helper takes a nested struct and extracts a part of it (which is again a
4056 // struct) into a new value. For example, given the struct:
4057 // { a, { b, { c, d }, e } }
4058 // and the indices "1, 1" this returns
4061 // It does this by inserting an insertvalue for each element in the resulting
4062 // struct, as opposed to just inserting a single struct. This will only work if
4063 // each of the elements of the substruct are known (ie, inserted into From by an
4064 // insertvalue instruction somewhere).
4066 // All inserted insertvalue instructions are inserted before InsertBefore
4067 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
4068 Instruction *InsertBefore) {
4069 assert(InsertBefore && "Must have someplace to insert!");
4070 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
4072 Value *To = UndefValue::get(IndexedType);
4073 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
4074 unsigned IdxSkip = Idxs.size();
4076 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
4079 /// Given an aggregate and a sequence of indices, see if the scalar value
4080 /// indexed is already around as a register, for example if it was inserted
4081 /// directly into the aggregate.
4083 /// If InsertBefore is not null, this function will duplicate (modified)
4084 /// insertvalues when a part of a nested struct is extracted.
4085 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
4086 Instruction *InsertBefore) {
4087 // Nothing to index? Just return V then (this is useful at the end of our
4089 if (idx_range.empty())
4091 // We have indices, so V should have an indexable type.
4092 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
4093 "Not looking at a struct or array?");
4094 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
4095 "Invalid indices for type?");
4097 if (Constant *C = dyn_cast<Constant>(V)) {
4098 C = C->getAggregateElement(idx_range[0]);
4099 if (!C) return nullptr;
4100 return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
4103 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
4104 // Loop the indices for the insertvalue instruction in parallel with the
4105 // requested indices
4106 const unsigned *req_idx = idx_range.begin();
4107 for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
4108 i != e; ++i, ++req_idx) {
4109 if (req_idx == idx_range.end()) {
4110 // We can't handle this without inserting insertvalues
4114 // The requested index identifies a part of a nested aggregate. Handle
4115 // this specially. For example,
4116 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
4117 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
4118 // %C = extractvalue {i32, { i32, i32 } } %B, 1
4119 // This can be changed into
4120 // %A = insertvalue {i32, i32 } undef, i32 10, 0
4121 // %C = insertvalue {i32, i32 } %A, i32 11, 1
4122 // which allows the unused 0,0 element from the nested struct to be
4124 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx),
4128 // This insert value inserts something else than what we are looking for.
4129 // See if the (aggregate) value inserted into has the value we are
4130 // looking for, then.
4132 return FindInsertedValue(I->getAggregateOperand(), idx_range,
4135 // If we end up here, the indices of the insertvalue match with those
4136 // requested (though possibly only partially). Now we recursively look at
4137 // the inserted value, passing any remaining indices.
4138 return FindInsertedValue(I->getInsertedValueOperand(),
4139 makeArrayRef(req_idx, idx_range.end()),
4143 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
4144 // If we're extracting a value from an aggregate that was extracted from
4145 // something else, we can extract from that something else directly instead.
4146 // However, we will need to chain I's indices with the requested indices.
4148 // Calculate the number of indices required
4149 unsigned size = I->getNumIndices() + idx_range.size();
4150 // Allocate some space to put the new indices in
4151 SmallVector<unsigned, 5> Idxs;
4153 // Add indices from the extract value instruction
4154 Idxs.append(I->idx_begin(), I->idx_end());
4156 // Add requested indices
4157 Idxs.append(idx_range.begin(), idx_range.end());
4159 assert(Idxs.size() == size
4160 && "Number of indices added not correct?");
4162 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
4164 // Otherwise, we don't know (such as, extracting from a function return value
4165 // or load instruction)
4169 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
4170 unsigned CharSize) {
4171 // Make sure the GEP has exactly three arguments.
4172 if (GEP->getNumOperands() != 3)
4175 // Make sure the index-ee is a pointer to array of \p CharSize integers.
4177 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
4178 if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
4181 // Check to make sure that the first operand of the GEP is an integer and
4182 // has value 0 so that we are sure we're indexing into the initializer.
4183 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
4184 if (!FirstIdx || !FirstIdx->isZero())
4190 // If V refers to an initialized global constant, set Slice either to
4191 // its initializer if the size of its elements equals ElementSize, or,
4192 // for ElementSize == 8, to its representation as an array of unsiged
4193 // char. Return true on success.
4194 bool llvm::getConstantDataArrayInfo(const Value *V,
4195 ConstantDataArraySlice &Slice,
4196 unsigned ElementSize, uint64_t Offset) {
4199 // Drill down into the pointer expression V, ignoring any intervening
4200 // casts, and determine the identity of the object it references along
4201 // with the cumulative byte offset into it.
4202 const GlobalVariable *GV =
4203 dyn_cast<GlobalVariable>(getUnderlyingObject(V));
4204 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
4205 // Fail if V is not based on constant global object.
4208 const DataLayout &DL = GV->getParent()->getDataLayout();
4209 APInt Off(DL.getIndexTypeSizeInBits(V->getType()), 0);
4211 if (GV != V->stripAndAccumulateConstantOffsets(DL, Off,
4212 /*AllowNonInbounds*/ true))
4213 // Fail if a constant offset could not be determined.
4216 uint64_t StartIdx = Off.getLimitedValue();
4217 if (StartIdx == UINT64_MAX)
4218 // Fail if the constant offset is excessive.
4223 ConstantDataArray *Array = nullptr;
4224 ArrayType *ArrayTy = nullptr;
4226 if (GV->getInitializer()->isNullValue()) {
4227 Type *GVTy = GV->getValueType();
4228 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
4229 uint64_t Length = SizeInBytes / (ElementSize / 8);
4231 Slice.Array = nullptr;
4233 // Return an empty Slice for undersized constants to let callers
4234 // transform even undefined library calls into simpler, well-defined
4235 // expressions. This is preferable to making the calls although it
4236 // prevents sanitizers from detecting such calls.
4237 Slice.Length = Length < Offset ? 0 : Length - Offset;
4241 auto *Init = const_cast<Constant *>(GV->getInitializer());
4242 if (auto *ArrayInit = dyn_cast<ConstantDataArray>(Init)) {
4243 Type *InitElTy = ArrayInit->getElementType();
4244 if (InitElTy->isIntegerTy(ElementSize)) {
4245 // If Init is an initializer for an array of the expected type
4246 // and size, use it as is.
4248 ArrayTy = ArrayInit->getType();
4253 if (ElementSize != 8)
4254 // TODO: Handle conversions to larger integral types.
4257 // Otherwise extract the portion of the initializer starting
4258 // at Offset as an array of bytes, and reset Offset.
4259 Init = ReadByteArrayFromGlobal(GV, Offset);
4264 Array = dyn_cast<ConstantDataArray>(Init);
4265 ArrayTy = dyn_cast<ArrayType>(Init->getType());
4268 uint64_t NumElts = ArrayTy->getArrayNumElements();
4269 if (Offset > NumElts)
4272 Slice.Array = Array;
4273 Slice.Offset = Offset;
4274 Slice.Length = NumElts - Offset;
4278 /// Extract bytes from the initializer of the constant array V, which need
4279 /// not be a nul-terminated string. On success, store the bytes in Str and
4280 /// return true. When TrimAtNul is set, Str will contain only the bytes up
4281 /// to but not including the first nul. Return false on failure.
4282 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
4283 uint64_t Offset, bool TrimAtNul) {
4284 ConstantDataArraySlice Slice;
4285 if (!getConstantDataArrayInfo(V, Slice, 8, Offset))
4288 if (Slice.Array == nullptr) {
4290 // Return a nul-terminated string even for an empty Slice. This is
4291 // safe because all existing SimplifyLibcalls callers require string
4292 // arguments and the behavior of the functions they fold is undefined
4293 // otherwise. Folding the calls this way is preferable to making
4294 // the undefined library calls, even though it prevents sanitizers
4295 // from reporting such calls.
4299 if (Slice.Length == 1) {
4300 Str = StringRef("", 1);
4303 // We cannot instantiate a StringRef as we do not have an appropriate string
4308 // Start out with the entire array in the StringRef.
4309 Str = Slice.Array->getAsString();
4310 // Skip over 'offset' bytes.
4311 Str = Str.substr(Slice.Offset);
4314 // Trim off the \0 and anything after it. If the array is not nul
4315 // terminated, we just return the whole end of string. The client may know
4316 // some other way that the string is length-bound.
4317 Str = Str.substr(0, Str.find('\0'));
4322 // These next two are very similar to the above, but also look through PHI
4324 // TODO: See if we can integrate these two together.
4326 /// If we can compute the length of the string pointed to by
4327 /// the specified pointer, return 'len+1'. If we can't, return 0.
4328 static uint64_t GetStringLengthH(const Value *V,
4329 SmallPtrSetImpl<const PHINode*> &PHIs,
4330 unsigned CharSize) {
4331 // Look through noop bitcast instructions.
4332 V = V->stripPointerCasts();
4334 // If this is a PHI node, there are two cases: either we have already seen it
4336 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4337 if (!PHIs.insert(PN).second)
4338 return ~0ULL; // already in the set.
4340 // If it was new, see if all the input strings are the same length.
4341 uint64_t LenSoFar = ~0ULL;
4342 for (Value *IncValue : PN->incoming_values()) {
4343 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4344 if (Len == 0) return 0; // Unknown length -> unknown.
4346 if (Len == ~0ULL) continue;
4348 if (Len != LenSoFar && LenSoFar != ~0ULL)
4349 return 0; // Disagree -> unknown.
4353 // Success, all agree.
4357 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4358 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4359 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4360 if (Len1 == 0) return 0;
4361 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4362 if (Len2 == 0) return 0;
4363 if (Len1 == ~0ULL) return Len2;
4364 if (Len2 == ~0ULL) return Len1;
4365 if (Len1 != Len2) return 0;
4369 // Otherwise, see if we can read the string.
4370 ConstantDataArraySlice Slice;
4371 if (!getConstantDataArrayInfo(V, Slice, CharSize))
4374 if (Slice.Array == nullptr)
4375 // Zeroinitializer (including an empty one).
4378 // Search for the first nul character. Return a conservative result even
4379 // when there is no nul. This is safe since otherwise the string function
4380 // being folded such as strlen is undefined, and can be preferable to
4381 // making the undefined library call.
4382 unsigned NullIndex = 0;
4383 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4384 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4388 return NullIndex + 1;
4391 /// If we can compute the length of the string pointed to by
4392 /// the specified pointer, return 'len+1'. If we can't, return 0.
4393 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4394 if (!V->getType()->isPointerTy())
4397 SmallPtrSet<const PHINode*, 32> PHIs;
4398 uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4399 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4400 // an empty string as a length.
4401 return Len == ~0ULL ? 1 : Len;
4405 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4406 bool MustPreserveNullness) {
4408 "getArgumentAliasingToReturnedPointer only works on nonnull calls");
4409 if (const Value *RV = Call->getReturnedArgOperand())
4411 // This can be used only as a aliasing property.
4412 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4413 Call, MustPreserveNullness))
4414 return Call->getArgOperand(0);
4418 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4419 const CallBase *Call, bool MustPreserveNullness) {
4420 switch (Call->getIntrinsicID()) {
4421 case Intrinsic::launder_invariant_group:
4422 case Intrinsic::strip_invariant_group:
4423 case Intrinsic::aarch64_irg:
4424 case Intrinsic::aarch64_tagp:
4426 case Intrinsic::ptrmask:
4427 return !MustPreserveNullness;
4433 /// \p PN defines a loop-variant pointer to an object. Check if the
4434 /// previous iteration of the loop was referring to the same object as \p PN.
4435 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4436 const LoopInfo *LI) {
4437 // Find the loop-defined value.
4438 Loop *L = LI->getLoopFor(PN->getParent());
4439 if (PN->getNumIncomingValues() != 2)
4442 // Find the value from previous iteration.
4443 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4444 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4445 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4446 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4449 // If a new pointer is loaded in the loop, the pointer references a different
4450 // object in every iteration. E.g.:
4454 if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4455 if (!L->isLoopInvariant(Load->getPointerOperand()))
4460 const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
4461 if (!V->getType()->isPointerTy())
4463 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4464 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
4465 V = GEP->getPointerOperand();
4466 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4467 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4468 V = cast<Operator>(V)->getOperand(0);
4469 if (!V->getType()->isPointerTy())
4471 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
4472 if (GA->isInterposable())
4474 V = GA->getAliasee();
4476 if (auto *PHI = dyn_cast<PHINode>(V)) {
4477 // Look through single-arg phi nodes created by LCSSA.
4478 if (PHI->getNumIncomingValues() == 1) {
4479 V = PHI->getIncomingValue(0);
4482 } else if (auto *Call = dyn_cast<CallBase>(V)) {
4483 // CaptureTracking can know about special capturing properties of some
4484 // intrinsics like launder.invariant.group, that can't be expressed with
4485 // the attributes, but have properties like returning aliasing pointer.
4486 // Because some analysis may assume that nocaptured pointer is not
4487 // returned from some special intrinsic (because function would have to
4488 // be marked with returns attribute), it is crucial to use this function
4489 // because it should be in sync with CaptureTracking. Not using it may
4490 // cause weird miscompilations where 2 aliasing pointers are assumed to
4492 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4500 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
4505 void llvm::getUnderlyingObjects(const Value *V,
4506 SmallVectorImpl<const Value *> &Objects,
4507 LoopInfo *LI, unsigned MaxLookup) {
4508 SmallPtrSet<const Value *, 4> Visited;
4509 SmallVector<const Value *, 4> Worklist;
4510 Worklist.push_back(V);
4512 const Value *P = Worklist.pop_back_val();
4513 P = getUnderlyingObject(P, MaxLookup);
4515 if (!Visited.insert(P).second)
4518 if (auto *SI = dyn_cast<SelectInst>(P)) {
4519 Worklist.push_back(SI->getTrueValue());
4520 Worklist.push_back(SI->getFalseValue());
4524 if (auto *PN = dyn_cast<PHINode>(P)) {
4525 // If this PHI changes the underlying object in every iteration of the
4526 // loop, don't look through it. Consider:
4529 // Prev = Curr; // Prev = PHI (Prev_0, Curr)
4533 // Prev is tracking Curr one iteration behind so they refer to different
4534 // underlying objects.
4535 if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4536 isSameUnderlyingObjectInLoop(PN, LI))
4537 append_range(Worklist, PN->incoming_values());
4541 Objects.push_back(P);
4542 } while (!Worklist.empty());
4545 /// This is the function that does the work of looking through basic
4546 /// ptrtoint+arithmetic+inttoptr sequences.
4547 static const Value *getUnderlyingObjectFromInt(const Value *V) {
4549 if (const Operator *U = dyn_cast<Operator>(V)) {
4550 // If we find a ptrtoint, we can transfer control back to the
4551 // regular getUnderlyingObjectFromInt.
4552 if (U->getOpcode() == Instruction::PtrToInt)
4553 return U->getOperand(0);
4554 // If we find an add of a constant, a multiplied value, or a phi, it's
4555 // likely that the other operand will lead us to the base
4556 // object. We don't have to worry about the case where the
4557 // object address is somehow being computed by the multiply,
4558 // because our callers only care when the result is an
4559 // identifiable object.
4560 if (U->getOpcode() != Instruction::Add ||
4561 (!isa<ConstantInt>(U->getOperand(1)) &&
4562 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4563 !isa<PHINode>(U->getOperand(1))))
4565 V = U->getOperand(0);
4569 assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
4573 /// This is a wrapper around getUnderlyingObjects and adds support for basic
4574 /// ptrtoint+arithmetic+inttoptr sequences.
4575 /// It returns false if unidentified object is found in getUnderlyingObjects.
4576 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4577 SmallVectorImpl<Value *> &Objects) {
4578 SmallPtrSet<const Value *, 16> Visited;
4579 SmallVector<const Value *, 4> Working(1, V);
4581 V = Working.pop_back_val();
4583 SmallVector<const Value *, 4> Objs;
4584 getUnderlyingObjects(V, Objs);
4586 for (const Value *V : Objs) {
4587 if (!Visited.insert(V).second)
4589 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4591 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4592 if (O->getType()->isPointerTy()) {
4593 Working.push_back(O);
4597 // If getUnderlyingObjects fails to find an identifiable object,
4598 // getUnderlyingObjectsForCodeGen also fails for safety.
4599 if (!isIdentifiedObject(V)) {
4603 Objects.push_back(const_cast<Value *>(V));
4605 } while (!Working.empty());
4609 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
4610 AllocaInst *Result = nullptr;
4611 SmallPtrSet<Value *, 4> Visited;
4612 SmallVector<Value *, 4> Worklist;
4614 auto AddWork = [&](Value *V) {
4615 if (Visited.insert(V).second)
4616 Worklist.push_back(V);
4621 V = Worklist.pop_back_val();
4622 assert(Visited.count(V));
4624 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
4625 if (Result && Result != AI)
4628 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
4629 AddWork(CI->getOperand(0));
4630 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4631 for (Value *IncValue : PN->incoming_values())
4633 } else if (auto *SI = dyn_cast<SelectInst>(V)) {
4634 AddWork(SI->getTrueValue());
4635 AddWork(SI->getFalseValue());
4636 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
4637 if (OffsetZero && !GEP->hasAllZeroIndices())
4639 AddWork(GEP->getPointerOperand());
4640 } else if (CallBase *CB = dyn_cast<CallBase>(V)) {
4641 Value *Returned = CB->getReturnedArgOperand();
4649 } while (!Worklist.empty());
4654 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4655 const Value *V, bool AllowLifetime, bool AllowDroppable) {
4656 for (const User *U : V->users()) {
4657 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4661 if (AllowLifetime && II->isLifetimeStartOrEnd())
4664 if (AllowDroppable && II->isDroppable())
4672 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4673 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4674 V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4676 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
4677 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4678 V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4681 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4682 if (!LI.isUnordered())
4684 const Function &F = *LI.getFunction();
4685 // Speculative load may create a race that did not exist in the source.
4686 return F.hasFnAttribute(Attribute::SanitizeThread) ||
4687 // Speculative load may load data from dirty regions.
4688 F.hasFnAttribute(Attribute::SanitizeAddress) ||
4689 F.hasFnAttribute(Attribute::SanitizeHWAddress);
4692 bool llvm::isSafeToSpeculativelyExecute(const Instruction *Inst,
4693 const Instruction *CtxI,
4694 const DominatorTree *DT,
4695 const TargetLibraryInfo *TLI) {
4696 return isSafeToSpeculativelyExecuteWithOpcode(Inst->getOpcode(), Inst, CtxI,
4700 bool llvm::isSafeToSpeculativelyExecuteWithOpcode(
4701 unsigned Opcode, const Instruction *Inst, const Instruction *CtxI,
4702 const DominatorTree *DT, const TargetLibraryInfo *TLI) {
4704 if (Inst->getOpcode() != Opcode) {
4705 // Check that the operands are actually compatible with the Opcode override.
4706 auto hasEqualReturnAndLeadingOperandTypes =
4707 [](const Instruction *Inst, unsigned NumLeadingOperands) {
4708 if (Inst->getNumOperands() < NumLeadingOperands)
4710 const Type *ExpectedType = Inst->getType();
4711 for (unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp)
4712 if (Inst->getOperand(ItOp)->getType() != ExpectedType)
4716 assert(!Instruction::isBinaryOp(Opcode) ||
4717 hasEqualReturnAndLeadingOperandTypes(Inst, 2));
4718 assert(!Instruction::isUnaryOp(Opcode) ||
4719 hasEqualReturnAndLeadingOperandTypes(Inst, 1));
4726 case Instruction::UDiv:
4727 case Instruction::URem: {
4728 // x / y is undefined if y == 0.
4730 if (match(Inst->getOperand(1), m_APInt(V)))
4734 case Instruction::SDiv:
4735 case Instruction::SRem: {
4736 // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4737 const APInt *Numerator, *Denominator;
4738 if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4740 // We cannot hoist this division if the denominator is 0.
4741 if (*Denominator == 0)
4743 // It's safe to hoist if the denominator is not 0 or -1.
4744 if (!Denominator->isAllOnes())
4746 // At this point we know that the denominator is -1. It is safe to hoist as
4747 // long we know that the numerator is not INT_MIN.
4748 if (match(Inst->getOperand(0), m_APInt(Numerator)))
4749 return !Numerator->isMinSignedValue();
4750 // The numerator *might* be MinSignedValue.
4753 case Instruction::Load: {
4754 const LoadInst *LI = dyn_cast<LoadInst>(Inst);
4757 if (mustSuppressSpeculation(*LI))
4759 const DataLayout &DL = LI->getModule()->getDataLayout();
4760 return isDereferenceableAndAlignedPointer(
4761 LI->getPointerOperand(), LI->getType(), LI->getAlign(), DL, CtxI, DT,
4764 case Instruction::Call: {
4765 auto *CI = dyn_cast<const CallInst>(Inst);
4768 const Function *Callee = CI->getCalledFunction();
4770 // The called function could have undefined behavior or side-effects, even
4771 // if marked readnone nounwind.
4772 return Callee && Callee->isSpeculatable();
4774 case Instruction::VAArg:
4775 case Instruction::Alloca:
4776 case Instruction::Invoke:
4777 case Instruction::CallBr:
4778 case Instruction::PHI:
4779 case Instruction::Store:
4780 case Instruction::Ret:
4781 case Instruction::Br:
4782 case Instruction::IndirectBr:
4783 case Instruction::Switch:
4784 case Instruction::Unreachable:
4785 case Instruction::Fence:
4786 case Instruction::AtomicRMW:
4787 case Instruction::AtomicCmpXchg:
4788 case Instruction::LandingPad:
4789 case Instruction::Resume:
4790 case Instruction::CatchSwitch:
4791 case Instruction::CatchPad:
4792 case Instruction::CatchRet:
4793 case Instruction::CleanupPad:
4794 case Instruction::CleanupRet:
4795 return false; // Misc instructions which have effects
4799 bool llvm::mayHaveNonDefUseDependency(const Instruction &I) {
4800 if (I.mayReadOrWriteMemory())
4801 // Memory dependency possible
4803 if (!isSafeToSpeculativelyExecute(&I))
4804 // Can't move above a maythrow call or infinite loop. Or if an
4805 // inalloca alloca, above a stacksave call.
4807 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
4808 // 1) Can't reorder two inf-loop calls, even if readonly
4809 // 2) Also can't reorder an inf-loop call below a instruction which isn't
4810 // safe to speculative execute. (Inverse of above)
4815 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
4816 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4818 case ConstantRange::OverflowResult::MayOverflow:
4819 return OverflowResult::MayOverflow;
4820 case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4821 return OverflowResult::AlwaysOverflowsLow;
4822 case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4823 return OverflowResult::AlwaysOverflowsHigh;
4824 case ConstantRange::OverflowResult::NeverOverflows:
4825 return OverflowResult::NeverOverflows;
4827 llvm_unreachable("Unknown OverflowResult");
4830 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
4831 static ConstantRange computeConstantRangeIncludingKnownBits(
4832 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4833 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4834 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4835 KnownBits Known = computeKnownBits(
4836 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4837 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4838 ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4839 ConstantRange::PreferredRangeType RangeType =
4840 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4841 return CR1.intersectWith(CR2, RangeType);
4844 OverflowResult llvm::computeOverflowForUnsignedMul(
4845 const Value *LHS, const Value *RHS, const DataLayout &DL,
4846 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4847 bool UseInstrInfo) {
4848 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4849 nullptr, UseInstrInfo);
4850 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4851 nullptr, UseInstrInfo);
4852 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4853 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4854 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4858 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4859 const DataLayout &DL, AssumptionCache *AC,
4860 const Instruction *CxtI,
4861 const DominatorTree *DT, bool UseInstrInfo) {
4862 // Multiplying n * m significant bits yields a result of n + m significant
4863 // bits. If the total number of significant bits does not exceed the
4864 // result bit width (minus 1), there is no overflow.
4865 // This means if we have enough leading sign bits in the operands
4866 // we can guarantee that the result does not overflow.
4867 // Ref: "Hacker's Delight" by Henry Warren
4868 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4870 // Note that underestimating the number of sign bits gives a more
4871 // conservative answer.
4872 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4873 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4875 // First handle the easy case: if we have enough sign bits there's
4876 // definitely no overflow.
4877 if (SignBits > BitWidth + 1)
4878 return OverflowResult::NeverOverflows;
4880 // There are two ambiguous cases where there can be no overflow:
4881 // SignBits == BitWidth + 1 and
4882 // SignBits == BitWidth
4883 // The second case is difficult to check, therefore we only handle the
4885 if (SignBits == BitWidth + 1) {
4886 // It overflows only when both arguments are negative and the true
4887 // product is exactly the minimum negative number.
4888 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4889 // For simplicity we just check if at least one side is not negative.
4890 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4891 nullptr, UseInstrInfo);
4892 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4893 nullptr, UseInstrInfo);
4894 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
4895 return OverflowResult::NeverOverflows;
4897 return OverflowResult::MayOverflow;
4900 OverflowResult llvm::computeOverflowForUnsignedAdd(
4901 const Value *LHS, const Value *RHS, const DataLayout &DL,
4902 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4903 bool UseInstrInfo) {
4904 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4905 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4906 nullptr, UseInstrInfo);
4907 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4908 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
4909 nullptr, UseInstrInfo);
4910 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
4913 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
4915 const AddOperator *Add,
4916 const DataLayout &DL,
4917 AssumptionCache *AC,
4918 const Instruction *CxtI,
4919 const DominatorTree *DT) {
4920 if (Add && Add->hasNoSignedWrap()) {
4921 return OverflowResult::NeverOverflows;
4924 // If LHS and RHS each have at least two sign bits, the addition will look
4930 // If the carry into the most significant position is 0, X and Y can't both
4931 // be 1 and therefore the carry out of the addition is also 0.
4933 // If the carry into the most significant position is 1, X and Y can't both
4934 // be 0 and therefore the carry out of the addition is also 1.
4936 // Since the carry into the most significant position is always equal to
4937 // the carry out of the addition, there is no signed overflow.
4938 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
4939 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
4940 return OverflowResult::NeverOverflows;
4942 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
4943 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4944 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
4945 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
4947 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
4948 if (OR != OverflowResult::MayOverflow)
4951 // The remaining code needs Add to be available. Early returns if not so.
4953 return OverflowResult::MayOverflow;
4955 // If the sign of Add is the same as at least one of the operands, this add
4956 // CANNOT overflow. If this can be determined from the known bits of the
4957 // operands the above signedAddMayOverflow() check will have already done so.
4958 // The only other way to improve on the known bits is from an assumption, so
4959 // call computeKnownBitsFromAssume() directly.
4960 bool LHSOrRHSKnownNonNegative =
4961 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
4962 bool LHSOrRHSKnownNegative =
4963 (LHSRange.isAllNegative() || RHSRange.isAllNegative());
4964 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
4965 KnownBits AddKnown(LHSRange.getBitWidth());
4966 computeKnownBitsFromAssume(
4967 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
4968 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
4969 (AddKnown.isNegative() && LHSOrRHSKnownNegative))
4970 return OverflowResult::NeverOverflows;
4973 return OverflowResult::MayOverflow;
4976 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
4978 const DataLayout &DL,
4979 AssumptionCache *AC,
4980 const Instruction *CxtI,
4981 const DominatorTree *DT) {
4983 // The remainder of a value can't have greater magnitude than itself,
4984 // so the subtraction can't overflow.
4987 // In the minimal case, this would simplify to "?", so there's no subtract
4988 // at all. But if this analysis is used to peek through casts, for example,
4989 // then determining no-overflow may allow other transforms.
4991 // TODO: There are other patterns like this.
4992 // See simplifyICmpWithBinOpOnLHS() for candidates.
4993 if (match(RHS, m_URem(m_Specific(LHS), m_Value())) ||
4994 match(RHS, m_NUWSub(m_Specific(LHS), m_Value())))
4995 if (isGuaranteedNotToBeUndefOrPoison(LHS, AC, CxtI, DT))
4996 return OverflowResult::NeverOverflows;
4998 // Checking for conditions implied by dominating conditions may be expensive.
4999 // Limit it to usub_with_overflow calls for now.
5001 m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
5003 isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
5005 return OverflowResult::NeverOverflows;
5006 return OverflowResult::AlwaysOverflowsLow;
5008 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
5009 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
5010 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
5011 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
5012 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
5015 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
5017 const DataLayout &DL,
5018 AssumptionCache *AC,
5019 const Instruction *CxtI,
5020 const DominatorTree *DT) {
5022 // The remainder of a value can't have greater magnitude than itself,
5023 // so the subtraction can't overflow.
5026 // In the minimal case, this would simplify to "?", so there's no subtract
5027 // at all. But if this analysis is used to peek through casts, for example,
5028 // then determining no-overflow may allow other transforms.
5029 if (match(RHS, m_SRem(m_Specific(LHS), m_Value())) ||
5030 match(RHS, m_NSWSub(m_Specific(LHS), m_Value())))
5031 if (isGuaranteedNotToBeUndefOrPoison(LHS, AC, CxtI, DT))
5032 return OverflowResult::NeverOverflows;
5034 // If LHS and RHS each have at least two sign bits, the subtraction
5036 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
5037 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
5038 return OverflowResult::NeverOverflows;
5040 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
5041 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
5042 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
5043 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
5044 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
5047 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
5048 const DominatorTree &DT) {
5049 SmallVector<const BranchInst *, 2> GuardingBranches;
5050 SmallVector<const ExtractValueInst *, 2> Results;
5052 for (const User *U : WO->users()) {
5053 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
5054 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
5056 if (EVI->getIndices()[0] == 0)
5057 Results.push_back(EVI);
5059 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
5061 for (const auto *U : EVI->users())
5062 if (const auto *B = dyn_cast<BranchInst>(U)) {
5063 assert(B->isConditional() && "How else is it using an i1?");
5064 GuardingBranches.push_back(B);
5068 // We are using the aggregate directly in a way we don't want to analyze
5069 // here (storing it to a global, say).
5074 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
5075 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
5076 if (!NoWrapEdge.isSingleEdge())
5079 // Check if all users of the add are provably no-wrap.
5080 for (const auto *Result : Results) {
5081 // If the extractvalue itself is not executed on overflow, the we don't
5082 // need to check each use separately, since domination is transitive.
5083 if (DT.dominates(NoWrapEdge, Result->getParent()))
5086 for (const auto &RU : Result->uses())
5087 if (!DT.dominates(NoWrapEdge, RU))
5094 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
5097 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly,
5098 bool ConsiderFlags) {
5100 if (ConsiderFlags && Op->hasPoisonGeneratingFlags())
5103 unsigned Opcode = Op->getOpcode();
5105 // Check whether opcode is a poison/undef-generating operation
5107 case Instruction::Shl:
5108 case Instruction::AShr:
5109 case Instruction::LShr: {
5110 // Shifts return poison if shiftwidth is larger than the bitwidth.
5111 if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) {
5112 SmallVector<Constant *, 4> ShiftAmounts;
5113 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
5114 unsigned NumElts = FVTy->getNumElements();
5115 for (unsigned i = 0; i < NumElts; ++i)
5116 ShiftAmounts.push_back(C->getAggregateElement(i));
5117 } else if (isa<ScalableVectorType>(C->getType()))
5118 return true; // Can't tell, just return true to be safe
5120 ShiftAmounts.push_back(C);
5122 bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) {
5123 auto *CI = dyn_cast_or_null<ConstantInt>(C);
5124 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
5130 case Instruction::FPToSI:
5131 case Instruction::FPToUI:
5132 // fptosi/ui yields poison if the resulting value does not fit in the
5133 // destination type.
5135 case Instruction::Call:
5136 if (auto *II = dyn_cast<IntrinsicInst>(Op)) {
5137 switch (II->getIntrinsicID()) {
5138 // TODO: Add more intrinsics.
5139 case Intrinsic::ctpop:
5140 case Intrinsic::sadd_with_overflow:
5141 case Intrinsic::ssub_with_overflow:
5142 case Intrinsic::smul_with_overflow:
5143 case Intrinsic::uadd_with_overflow:
5144 case Intrinsic::usub_with_overflow:
5145 case Intrinsic::umul_with_overflow:
5150 case Instruction::CallBr:
5151 case Instruction::Invoke: {
5152 const auto *CB = cast<CallBase>(Op);
5153 return !CB->hasRetAttr(Attribute::NoUndef);
5155 case Instruction::InsertElement:
5156 case Instruction::ExtractElement: {
5157 // If index exceeds the length of the vector, it returns poison
5158 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
5159 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
5160 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
5161 if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
5165 case Instruction::ShuffleVector: {
5166 // shufflevector may return undef.
5169 ArrayRef<int> Mask = isa<ConstantExpr>(Op)
5170 ? cast<ConstantExpr>(Op)->getShuffleMask()
5171 : cast<ShuffleVectorInst>(Op)->getShuffleMask();
5172 return is_contained(Mask, UndefMaskElem);
5174 case Instruction::FNeg:
5175 case Instruction::PHI:
5176 case Instruction::Select:
5177 case Instruction::URem:
5178 case Instruction::SRem:
5179 case Instruction::ExtractValue:
5180 case Instruction::InsertValue:
5181 case Instruction::Freeze:
5182 case Instruction::ICmp:
5183 case Instruction::FCmp:
5185 case Instruction::GetElementPtr:
5186 // inbounds is handled above
5187 // TODO: what about inrange on constexpr?
5190 const auto *CE = dyn_cast<ConstantExpr>(Op);
5191 if (isa<CastInst>(Op) || (CE && CE->isCast()))
5193 else if (Instruction::isBinaryOp(Opcode))
5195 // Be conservative and return true.
5201 bool llvm::canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlags) {
5202 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false, ConsiderFlags);
5205 bool llvm::canCreatePoison(const Operator *Op, bool ConsiderFlags) {
5206 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true, ConsiderFlags);
5209 static bool directlyImpliesPoison(const Value *ValAssumedPoison,
5210 const Value *V, unsigned Depth) {
5211 if (ValAssumedPoison == V)
5214 const unsigned MaxDepth = 2;
5215 if (Depth >= MaxDepth)
5218 if (const auto *I = dyn_cast<Instruction>(V)) {
5219 if (propagatesPoison(cast<Operator>(I)))
5220 return any_of(I->operands(), [=](const Value *Op) {
5221 return directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
5224 // 'select ValAssumedPoison, _, _' is poison.
5225 if (const auto *SI = dyn_cast<SelectInst>(I))
5226 return directlyImpliesPoison(ValAssumedPoison, SI->getCondition(),
5228 // V = extractvalue V0, idx
5229 // V2 = extractvalue V0, idx2
5230 // V0's elements are all poison or not. (e.g., add_with_overflow)
5231 const WithOverflowInst *II;
5232 if (match(I, m_ExtractValue(m_WithOverflowInst(II))) &&
5233 (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) ||
5234 llvm::is_contained(II->args(), ValAssumedPoison)))
5240 static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
5242 if (isGuaranteedNotToBeUndefOrPoison(ValAssumedPoison))
5245 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
5248 const unsigned MaxDepth = 2;
5249 if (Depth >= MaxDepth)
5252 const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
5253 if (I && !canCreatePoison(cast<Operator>(I))) {
5254 return all_of(I->operands(), [=](const Value *Op) {
5255 return impliesPoison(Op, V, Depth + 1);
5261 bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
5262 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
5265 static bool programUndefinedIfUndefOrPoison(const Value *V,
5268 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
5269 AssumptionCache *AC,
5270 const Instruction *CtxI,
5271 const DominatorTree *DT,
5272 unsigned Depth, bool PoisonOnly) {
5273 if (Depth >= MaxAnalysisRecursionDepth)
5276 if (isa<MetadataAsValue>(V))
5279 if (const auto *A = dyn_cast<Argument>(V)) {
5280 if (A->hasAttribute(Attribute::NoUndef))
5284 if (auto *C = dyn_cast<Constant>(V)) {
5285 if (isa<UndefValue>(C))
5286 return PoisonOnly && !isa<PoisonValue>(C);
5288 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
5289 isa<ConstantPointerNull>(C) || isa<Function>(C))
5292 if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
5293 return (PoisonOnly ? !C->containsPoisonElement()
5294 : !C->containsUndefOrPoisonElement()) &&
5295 !C->containsConstantExpression();
5298 // Strip cast operations from a pointer value.
5299 // Note that stripPointerCastsSameRepresentation can strip off getelementptr
5300 // inbounds with zero offset. To guarantee that the result isn't poison, the
5301 // stripped pointer is checked as it has to be pointing into an allocated
5302 // object or be null `null` to ensure `inbounds` getelement pointers with a
5303 // zero offset could not produce poison.
5304 // It can strip off addrspacecast that do not change bit representation as
5305 // well. We believe that such addrspacecast is equivalent to no-op.
5306 auto *StrippedV = V->stripPointerCastsSameRepresentation();
5307 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
5308 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
5311 auto OpCheck = [&](const Value *V) {
5312 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
5316 if (auto *Opr = dyn_cast<Operator>(V)) {
5317 // If the value is a freeze instruction, then it can never
5318 // be undef or poison.
5319 if (isa<FreezeInst>(V))
5322 if (const auto *CB = dyn_cast<CallBase>(V)) {
5323 if (CB->hasRetAttr(Attribute::NoUndef))
5327 if (const auto *PN = dyn_cast<PHINode>(V)) {
5328 unsigned Num = PN->getNumIncomingValues();
5329 bool IsWellDefined = true;
5330 for (unsigned i = 0; i < Num; ++i) {
5331 auto *TI = PN->getIncomingBlock(i)->getTerminator();
5332 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
5333 DT, Depth + 1, PoisonOnly)) {
5334 IsWellDefined = false;
5340 } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
5344 if (auto *I = dyn_cast<LoadInst>(V))
5345 if (I->hasMetadata(LLVMContext::MD_noundef) ||
5346 I->hasMetadata(LLVMContext::MD_dereferenceable) ||
5347 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null))
5350 if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
5353 // CxtI may be null or a cloned instruction.
5354 if (!CtxI || !CtxI->getParent() || !DT)
5357 auto *DNode = DT->getNode(CtxI->getParent());
5359 // Unreachable block
5362 // If V is used as a branch condition before reaching CtxI, V cannot be
5366 // CtxI ; V cannot be undef or poison here
5367 auto *Dominator = DNode->getIDom();
5369 auto *TI = Dominator->getBlock()->getTerminator();
5371 Value *Cond = nullptr;
5372 if (auto BI = dyn_cast_or_null<BranchInst>(TI)) {
5373 if (BI->isConditional())
5374 Cond = BI->getCondition();
5375 } else if (auto SI = dyn_cast_or_null<SwitchInst>(TI)) {
5376 Cond = SI->getCondition();
5382 else if (PoisonOnly && isa<Operator>(Cond)) {
5383 // For poison, we can analyze further
5384 auto *Opr = cast<Operator>(Cond);
5385 if (propagatesPoison(Opr) && is_contained(Opr->operand_values(), V))
5390 Dominator = Dominator->getIDom();
5393 if (getKnowledgeValidInContext(V, {Attribute::NoUndef}, CtxI, DT, AC))
5399 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
5400 const Instruction *CtxI,
5401 const DominatorTree *DT,
5403 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
5406 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
5407 const Instruction *CtxI,
5408 const DominatorTree *DT, unsigned Depth) {
5409 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
5412 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
5413 const DataLayout &DL,
5414 AssumptionCache *AC,
5415 const Instruction *CxtI,
5416 const DominatorTree *DT) {
5417 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
5418 Add, DL, AC, CxtI, DT);
5421 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
5423 const DataLayout &DL,
5424 AssumptionCache *AC,
5425 const Instruction *CxtI,
5426 const DominatorTree *DT) {
5427 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
5430 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
5431 // Note: An atomic operation isn't guaranteed to return in a reasonable amount
5432 // of time because it's possible for another thread to interfere with it for an
5433 // arbitrary length of time, but programs aren't allowed to rely on that.
5435 // If there is no successor, then execution can't transfer to it.
5436 if (isa<ReturnInst>(I))
5438 if (isa<UnreachableInst>(I))
5441 // Note: Do not add new checks here; instead, change Instruction::mayThrow or
5442 // Instruction::willReturn.
5444 // FIXME: Move this check into Instruction::willReturn.
5445 if (isa<CatchPadInst>(I)) {
5446 switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) {
5448 // A catchpad may invoke exception object constructors and such, which
5449 // in some languages can be arbitrary code, so be conservative by default.
5451 case EHPersonality::CoreCLR:
5452 // For CoreCLR, it just involves a type test.
5457 // An instruction that returns without throwing must transfer control flow
5459 return !I->mayThrow() && I->willReturn();
5462 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
5463 // TODO: This is slightly conservative for invoke instruction since exiting
5464 // via an exception *is* normal control for them.
5465 for (const Instruction &I : *BB)
5466 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5471 bool llvm::isGuaranteedToTransferExecutionToSuccessor(
5472 BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
5473 unsigned ScanLimit) {
5474 return isGuaranteedToTransferExecutionToSuccessor(make_range(Begin, End),
5478 bool llvm::isGuaranteedToTransferExecutionToSuccessor(
5479 iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit) {
5480 assert(ScanLimit && "scan limit must be non-zero");
5481 for (const Instruction &I : Range) {
5482 if (isa<DbgInfoIntrinsic>(I))
5484 if (--ScanLimit == 0)
5486 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5492 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5494 // The loop header is guaranteed to be executed for every iteration.
5496 // FIXME: Relax this constraint to cover all basic blocks that are
5497 // guaranteed to be executed at every iteration.
5498 if (I->getParent() != L->getHeader()) return false;
5500 for (const Instruction &LI : *L->getHeader()) {
5501 if (&LI == I) return true;
5502 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5504 llvm_unreachable("Instruction not contained in its own parent basic block.");
5507 bool llvm::propagatesPoison(const Operator *I) {
5508 switch (I->getOpcode()) {
5509 case Instruction::Freeze:
5510 case Instruction::Select:
5511 case Instruction::PHI:
5512 case Instruction::Invoke:
5514 case Instruction::Call:
5515 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
5516 switch (II->getIntrinsicID()) {
5517 // TODO: Add more intrinsics.
5518 case Intrinsic::sadd_with_overflow:
5519 case Intrinsic::ssub_with_overflow:
5520 case Intrinsic::smul_with_overflow:
5521 case Intrinsic::uadd_with_overflow:
5522 case Intrinsic::usub_with_overflow:
5523 case Intrinsic::umul_with_overflow:
5524 // If an input is a vector containing a poison element, the
5525 // two output vectors (calculated results, overflow bits)'
5526 // corresponding lanes are poison.
5528 case Intrinsic::ctpop:
5533 case Instruction::ICmp:
5534 case Instruction::FCmp:
5535 case Instruction::GetElementPtr:
5538 if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5541 // Be conservative and return false.
5546 void llvm::getGuaranteedWellDefinedOps(
5547 const Instruction *I, SmallPtrSetImpl<const Value *> &Operands) {
5548 switch (I->getOpcode()) {
5549 case Instruction::Store:
5550 Operands.insert(cast<StoreInst>(I)->getPointerOperand());
5553 case Instruction::Load:
5554 Operands.insert(cast<LoadInst>(I)->getPointerOperand());
5557 // Since dereferenceable attribute imply noundef, atomic operations
5558 // also implicitly have noundef pointers too
5559 case Instruction::AtomicCmpXchg:
5560 Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
5563 case Instruction::AtomicRMW:
5564 Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand());
5567 case Instruction::Call:
5568 case Instruction::Invoke: {
5569 const CallBase *CB = cast<CallBase>(I);
5570 if (CB->isIndirectCall())
5571 Operands.insert(CB->getCalledOperand());
5572 for (unsigned i = 0; i < CB->arg_size(); ++i) {
5573 if (CB->paramHasAttr(i, Attribute::NoUndef) ||
5574 CB->paramHasAttr(i, Attribute::Dereferenceable))
5575 Operands.insert(CB->getArgOperand(i));
5579 case Instruction::Ret:
5580 if (I->getFunction()->hasRetAttribute(Attribute::NoUndef))
5581 Operands.insert(I->getOperand(0));
5588 void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
5589 SmallPtrSetImpl<const Value *> &Operands) {
5590 getGuaranteedWellDefinedOps(I, Operands);
5591 switch (I->getOpcode()) {
5592 // Divisors of these operations are allowed to be partially undef.
5593 case Instruction::UDiv:
5594 case Instruction::SDiv:
5595 case Instruction::URem:
5596 case Instruction::SRem:
5597 Operands.insert(I->getOperand(1));
5599 case Instruction::Switch:
5600 if (BranchOnPoisonAsUB)
5601 Operands.insert(cast<SwitchInst>(I)->getCondition());
5603 case Instruction::Br: {
5604 auto *BR = cast<BranchInst>(I);
5605 if (BranchOnPoisonAsUB && BR->isConditional())
5606 Operands.insert(BR->getCondition());
5614 bool llvm::mustTriggerUB(const Instruction *I,
5615 const SmallSet<const Value *, 16>& KnownPoison) {
5616 SmallPtrSet<const Value *, 4> NonPoisonOps;
5617 getGuaranteedNonPoisonOps(I, NonPoisonOps);
5619 for (const auto *V : NonPoisonOps)
5620 if (KnownPoison.count(V))
5626 static bool programUndefinedIfUndefOrPoison(const Value *V,
5628 // We currently only look for uses of values within the same basic
5629 // block, as that makes it easier to guarantee that the uses will be
5630 // executed given that Inst is executed.
5632 // FIXME: Expand this to consider uses beyond the same basic block. To do
5633 // this, look out for the distinction between post-dominance and strong
5635 const BasicBlock *BB = nullptr;
5636 BasicBlock::const_iterator Begin;
5637 if (const auto *Inst = dyn_cast<Instruction>(V)) {
5638 BB = Inst->getParent();
5639 Begin = Inst->getIterator();
5641 } else if (const auto *Arg = dyn_cast<Argument>(V)) {
5642 BB = &Arg->getParent()->getEntryBlock();
5643 Begin = BB->begin();
5648 // Limit number of instructions we look at, to avoid scanning through large
5649 // blocks. The current limit is chosen arbitrarily.
5650 unsigned ScanLimit = 32;
5651 BasicBlock::const_iterator End = BB->end();
5654 // Since undef does not propagate eagerly, be conservative & just check
5655 // whether a value is directly passed to an instruction that must take
5656 // well-defined operands.
5658 for (const auto &I : make_range(Begin, End)) {
5659 if (isa<DbgInfoIntrinsic>(I))
5661 if (--ScanLimit == 0)
5664 SmallPtrSet<const Value *, 4> WellDefinedOps;
5665 getGuaranteedWellDefinedOps(&I, WellDefinedOps);
5666 if (WellDefinedOps.contains(V))
5669 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5675 // Set of instructions that we have proved will yield poison if Inst
5677 SmallSet<const Value *, 16> YieldsPoison;
5678 SmallSet<const BasicBlock *, 4> Visited;
5680 YieldsPoison.insert(V);
5681 auto Propagate = [&](const User *User) {
5682 if (propagatesPoison(cast<Operator>(User)))
5683 YieldsPoison.insert(User);
5685 for_each(V->users(), Propagate);
5689 for (const auto &I : make_range(Begin, End)) {
5690 if (isa<DbgInfoIntrinsic>(I))
5692 if (--ScanLimit == 0)
5694 if (mustTriggerUB(&I, YieldsPoison))
5696 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5699 // Mark poison that propagates from I through uses of I.
5700 if (YieldsPoison.count(&I))
5701 for_each(I.users(), Propagate);
5704 BB = BB->getSingleSuccessor();
5705 if (!BB || !Visited.insert(BB).second)
5708 Begin = BB->getFirstNonPHI()->getIterator();
5714 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
5715 return ::programUndefinedIfUndefOrPoison(Inst, false);
5718 bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
5719 return ::programUndefinedIfUndefOrPoison(Inst, true);
5722 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5726 if (auto *C = dyn_cast<ConstantFP>(V))
5729 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5730 if (!C->getElementType()->isFloatingPointTy())
5732 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5733 if (C->getElementAsAPFloat(I).isNaN())
5739 if (isa<ConstantAggregateZero>(V))
5745 static bool isKnownNonZero(const Value *V) {
5746 if (auto *C = dyn_cast<ConstantFP>(V))
5747 return !C->isZero();
5749 if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5750 if (!C->getElementType()->isFloatingPointTy())
5752 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5753 if (C->getElementAsAPFloat(I).isZero())
5762 /// Match clamp pattern for float types without care about NaNs or signed zeros.
5763 /// Given non-min/max outer cmp/select from the clamp pattern this
5764 /// function recognizes if it can be substitued by a "canonical" min/max
5766 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5767 Value *CmpLHS, Value *CmpRHS,
5768 Value *TrueVal, Value *FalseVal,
5769 Value *&LHS, Value *&RHS) {
5771 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5772 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5773 // and return description of the outer Max/Min.
5775 // First, check if select has inverse order:
5776 if (CmpRHS == FalseVal) {
5777 std::swap(TrueVal, FalseVal);
5778 Pred = CmpInst::getInversePredicate(Pred);
5781 // Assume success now. If there's no match, callers should not use these anyway.
5786 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5787 return {SPF_UNKNOWN, SPNB_NA, false};
5791 case CmpInst::FCMP_OLT:
5792 case CmpInst::FCMP_OLE:
5793 case CmpInst::FCMP_ULT:
5794 case CmpInst::FCMP_ULE:
5796 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5797 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5799 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5801 case CmpInst::FCMP_OGT:
5802 case CmpInst::FCMP_OGE:
5803 case CmpInst::FCMP_UGT:
5804 case CmpInst::FCMP_UGE:
5806 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5807 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5809 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5815 return {SPF_UNKNOWN, SPNB_NA, false};
5818 /// Recognize variations of:
5819 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
5820 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
5821 Value *CmpLHS, Value *CmpRHS,
5822 Value *TrueVal, Value *FalseVal) {
5823 // Swap the select operands and predicate to match the patterns below.
5824 if (CmpRHS != TrueVal) {
5825 Pred = ICmpInst::getSwappedPredicate(Pred);
5826 std::swap(TrueVal, FalseVal);
5829 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
5831 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
5832 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5833 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
5834 return {SPF_SMAX, SPNB_NA, false};
5836 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
5837 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5838 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
5839 return {SPF_SMIN, SPNB_NA, false};
5841 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
5842 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
5843 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
5844 return {SPF_UMAX, SPNB_NA, false};
5846 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
5847 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
5848 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
5849 return {SPF_UMIN, SPNB_NA, false};
5851 return {SPF_UNKNOWN, SPNB_NA, false};
5854 /// Recognize variations of:
5855 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
5856 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
5857 Value *CmpLHS, Value *CmpRHS,
5858 Value *TVal, Value *FVal,
5860 // TODO: Allow FP min/max with nnan/nsz.
5861 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
5863 Value *A = nullptr, *B = nullptr;
5864 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
5865 if (!SelectPatternResult::isMinOrMax(L.Flavor))
5866 return {SPF_UNKNOWN, SPNB_NA, false};
5868 Value *C = nullptr, *D = nullptr;
5869 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
5870 if (L.Flavor != R.Flavor)
5871 return {SPF_UNKNOWN, SPNB_NA, false};
5873 // We have something like: x Pred y ? min(a, b) : min(c, d).
5874 // Try to match the compare to the min/max operations of the select operands.
5875 // First, make sure we have the right compare predicate.
5878 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
5879 Pred = ICmpInst::getSwappedPredicate(Pred);
5880 std::swap(CmpLHS, CmpRHS);
5882 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
5884 return {SPF_UNKNOWN, SPNB_NA, false};
5886 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
5887 Pred = ICmpInst::getSwappedPredicate(Pred);
5888 std::swap(CmpLHS, CmpRHS);
5890 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
5892 return {SPF_UNKNOWN, SPNB_NA, false};
5894 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
5895 Pred = ICmpInst::getSwappedPredicate(Pred);
5896 std::swap(CmpLHS, CmpRHS);
5898 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
5900 return {SPF_UNKNOWN, SPNB_NA, false};
5902 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
5903 Pred = ICmpInst::getSwappedPredicate(Pred);
5904 std::swap(CmpLHS, CmpRHS);
5906 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
5908 return {SPF_UNKNOWN, SPNB_NA, false};
5910 return {SPF_UNKNOWN, SPNB_NA, false};
5913 // If there is a common operand in the already matched min/max and the other
5914 // min/max operands match the compare operands (either directly or inverted),
5915 // then this is min/max of the same flavor.
5917 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5918 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
5920 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5921 match(A, m_Not(m_Specific(CmpRHS)))))
5922 return {L.Flavor, SPNB_NA, false};
5924 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5925 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
5927 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5928 match(A, m_Not(m_Specific(CmpRHS)))))
5929 return {L.Flavor, SPNB_NA, false};
5931 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5932 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
5934 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
5935 match(B, m_Not(m_Specific(CmpRHS)))))
5936 return {L.Flavor, SPNB_NA, false};
5938 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5939 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
5941 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
5942 match(B, m_Not(m_Specific(CmpRHS)))))
5943 return {L.Flavor, SPNB_NA, false};
5946 return {SPF_UNKNOWN, SPNB_NA, false};
5949 /// If the input value is the result of a 'not' op, constant integer, or vector
5950 /// splat of a constant integer, return the bitwise-not source value.
5951 /// TODO: This could be extended to handle non-splat vector integer constants.
5952 static Value *getNotValue(Value *V) {
5954 if (match(V, m_Not(m_Value(NotV))))
5958 if (match(V, m_APInt(C)))
5959 return ConstantInt::get(V->getType(), ~(*C));
5964 /// Match non-obvious integer minimum and maximum sequences.
5965 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
5966 Value *CmpLHS, Value *CmpRHS,
5967 Value *TrueVal, Value *FalseVal,
5968 Value *&LHS, Value *&RHS,
5970 // Assume success. If there's no match, callers should not use these anyway.
5974 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
5975 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5978 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
5979 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
5982 // Look through 'not' ops to find disguised min/max.
5983 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
5984 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
5985 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
5987 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
5988 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
5989 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
5990 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
5995 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
5996 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
5997 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
5999 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
6000 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
6001 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
6002 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
6007 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
6008 return {SPF_UNKNOWN, SPNB_NA, false};
6011 if (!match(CmpRHS, m_APInt(C1)))
6012 return {SPF_UNKNOWN, SPNB_NA, false};
6014 // An unsigned min/max can be written with a signed compare.
6016 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
6017 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
6018 // Is the sign bit set?
6019 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
6020 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
6021 if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue())
6022 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
6024 // Is the sign bit clear?
6025 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
6026 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
6027 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue())
6028 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
6031 return {SPF_UNKNOWN, SPNB_NA, false};
6034 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
6035 assert(X && Y && "Invalid operand");
6037 // X = sub (0, Y) || X = sub nsw (0, Y)
6038 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
6039 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
6042 // Y = sub (0, X) || Y = sub nsw (0, X)
6043 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
6044 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
6047 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
6049 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
6050 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
6051 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
6052 match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
6055 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
6057 Value *CmpLHS, Value *CmpRHS,
6058 Value *TrueVal, Value *FalseVal,
6059 Value *&LHS, Value *&RHS,
6061 if (CmpInst::isFPPredicate(Pred)) {
6062 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
6063 // 0.0 operand, set the compare's 0.0 operands to that same value for the
6064 // purpose of identifying min/max. Disregard vector constants with undefined
6065 // elements because those can not be back-propagated for analysis.
6066 Value *OutputZeroVal = nullptr;
6067 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
6068 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
6069 OutputZeroVal = TrueVal;
6070 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
6071 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
6072 OutputZeroVal = FalseVal;
6074 if (OutputZeroVal) {
6075 if (match(CmpLHS, m_AnyZeroFP()))
6076 CmpLHS = OutputZeroVal;
6077 if (match(CmpRHS, m_AnyZeroFP()))
6078 CmpRHS = OutputZeroVal;
6085 // Signed zero may return inconsistent results between implementations.
6086 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
6087 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
6088 // Therefore, we behave conservatively and only proceed if at least one of the
6089 // operands is known to not be zero or if we don't care about signed zero.
6092 // FIXME: Include OGT/OLT/UGT/ULT.
6093 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
6094 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
6095 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
6096 !isKnownNonZero(CmpRHS))
6097 return {SPF_UNKNOWN, SPNB_NA, false};
6100 SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
6101 bool Ordered = false;
6103 // When given one NaN and one non-NaN input:
6104 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
6105 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the
6106 // ordered comparison fails), which could be NaN or non-NaN.
6107 // so here we discover exactly what NaN behavior is required/accepted.
6108 if (CmpInst::isFPPredicate(Pred)) {
6109 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
6110 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
6112 if (LHSSafe && RHSSafe) {
6113 // Both operands are known non-NaN.
6114 NaNBehavior = SPNB_RETURNS_ANY;
6115 } else if (CmpInst::isOrdered(Pred)) {
6116 // An ordered comparison will return false when given a NaN, so it
6120 // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
6121 NaNBehavior = SPNB_RETURNS_NAN;
6123 NaNBehavior = SPNB_RETURNS_OTHER;
6125 // Completely unsafe.
6126 return {SPF_UNKNOWN, SPNB_NA, false};
6129 // An unordered comparison will return true when given a NaN, so it
6132 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
6133 NaNBehavior = SPNB_RETURNS_OTHER;
6135 NaNBehavior = SPNB_RETURNS_NAN;
6137 // Completely unsafe.
6138 return {SPF_UNKNOWN, SPNB_NA, false};
6142 if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
6143 std::swap(CmpLHS, CmpRHS);
6144 Pred = CmpInst::getSwappedPredicate(Pred);
6145 if (NaNBehavior == SPNB_RETURNS_NAN)
6146 NaNBehavior = SPNB_RETURNS_OTHER;
6147 else if (NaNBehavior == SPNB_RETURNS_OTHER)
6148 NaNBehavior = SPNB_RETURNS_NAN;
6152 // ([if]cmp X, Y) ? X : Y
6153 if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
6155 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
6156 case ICmpInst::ICMP_UGT:
6157 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
6158 case ICmpInst::ICMP_SGT:
6159 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
6160 case ICmpInst::ICMP_ULT:
6161 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
6162 case ICmpInst::ICMP_SLT:
6163 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
6164 case FCmpInst::FCMP_UGT:
6165 case FCmpInst::FCMP_UGE:
6166 case FCmpInst::FCMP_OGT:
6167 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
6168 case FCmpInst::FCMP_ULT:
6169 case FCmpInst::FCMP_ULE:
6170 case FCmpInst::FCMP_OLT:
6171 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
6175 if (isKnownNegation(TrueVal, FalseVal)) {
6176 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
6177 // match against either LHS or sext(LHS).
6178 auto MaybeSExtCmpLHS =
6179 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
6180 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
6181 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
6182 if (match(TrueVal, MaybeSExtCmpLHS)) {
6183 // Set the return values. If the compare uses the negated value (-X >s 0),
6184 // swap the return values because the negated value is always 'RHS'.
6187 if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
6188 std::swap(LHS, RHS);
6190 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
6191 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
6192 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
6193 return {SPF_ABS, SPNB_NA, false};
6195 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
6196 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
6197 return {SPF_ABS, SPNB_NA, false};
6199 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
6200 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
6201 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
6202 return {SPF_NABS, SPNB_NA, false};
6204 else if (match(FalseVal, MaybeSExtCmpLHS)) {
6205 // Set the return values. If the compare uses the negated value (-X >s 0),
6206 // swap the return values because the negated value is always 'RHS'.
6209 if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
6210 std::swap(LHS, RHS);
6212 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
6213 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
6214 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
6215 return {SPF_NABS, SPNB_NA, false};
6217 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
6218 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
6219 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
6220 return {SPF_ABS, SPNB_NA, false};
6224 if (CmpInst::isIntPredicate(Pred))
6225 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
6227 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
6228 // may return either -0.0 or 0.0, so fcmp/select pair has stricter
6229 // semantics than minNum. Be conservative in such case.
6230 if (NaNBehavior != SPNB_RETURNS_ANY ||
6231 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
6232 !isKnownNonZero(CmpRHS)))
6233 return {SPF_UNKNOWN, SPNB_NA, false};
6235 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
6238 /// Helps to match a select pattern in case of a type mismatch.
6240 /// The function processes the case when type of true and false values of a
6241 /// select instruction differs from type of the cmp instruction operands because
6242 /// of a cast instruction. The function checks if it is legal to move the cast
6243 /// operation after "select". If yes, it returns the new second value of
6244 /// "select" (with the assumption that cast is moved):
6245 /// 1. As operand of cast instruction when both values of "select" are same cast
6247 /// 2. As restored constant (by applying reverse cast operation) when the first
6248 /// value of the "select" is a cast operation and the second value is a
6250 /// NOTE: We return only the new second value because the first value could be
6251 /// accessed as operand of cast instruction.
6252 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
6253 Instruction::CastOps *CastOp) {
6254 auto *Cast1 = dyn_cast<CastInst>(V1);
6258 *CastOp = Cast1->getOpcode();
6259 Type *SrcTy = Cast1->getSrcTy();
6260 if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
6261 // If V1 and V2 are both the same cast from the same type, look through V1.
6262 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
6263 return Cast2->getOperand(0);
6267 auto *C = dyn_cast<Constant>(V2);
6271 Constant *CastedTo = nullptr;
6273 case Instruction::ZExt:
6274 if (CmpI->isUnsigned())
6275 CastedTo = ConstantExpr::getTrunc(C, SrcTy);
6277 case Instruction::SExt:
6278 if (CmpI->isSigned())
6279 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
6281 case Instruction::Trunc:
6283 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
6284 CmpConst->getType() == SrcTy) {
6285 // Here we have the following case:
6287 // %cond = cmp iN %x, CmpConst
6288 // %tr = trunc iN %x to iK
6289 // %narrowsel = select i1 %cond, iK %t, iK C
6291 // We can always move trunc after select operation:
6293 // %cond = cmp iN %x, CmpConst
6294 // %widesel = select i1 %cond, iN %x, iN CmpConst
6295 // %tr = trunc iN %widesel to iK
6297 // Note that C could be extended in any way because we don't care about
6298 // upper bits after truncation. It can't be abs pattern, because it would
6301 // select i1 %cond, x, -x.
6303 // So only min/max pattern could be matched. Such match requires widened C
6304 // == CmpConst. That is why set widened C = CmpConst, condition trunc
6305 // CmpConst == C is checked below.
6306 CastedTo = CmpConst;
6308 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
6311 case Instruction::FPTrunc:
6312 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
6314 case Instruction::FPExt:
6315 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
6317 case Instruction::FPToUI:
6318 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
6320 case Instruction::FPToSI:
6321 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
6323 case Instruction::UIToFP:
6324 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
6326 case Instruction::SIToFP:
6327 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
6336 // Make sure the cast doesn't lose any information.
6337 Constant *CastedBack =
6338 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
6339 if (CastedBack != C)
6345 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
6346 Instruction::CastOps *CastOp,
6348 if (Depth >= MaxAnalysisRecursionDepth)
6349 return {SPF_UNKNOWN, SPNB_NA, false};
6351 SelectInst *SI = dyn_cast<SelectInst>(V);
6352 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
6354 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
6355 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
6357 Value *TrueVal = SI->getTrueValue();
6358 Value *FalseVal = SI->getFalseValue();
6360 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
6364 SelectPatternResult llvm::matchDecomposedSelectPattern(
6365 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
6366 Instruction::CastOps *CastOp, unsigned Depth) {
6367 CmpInst::Predicate Pred = CmpI->getPredicate();
6368 Value *CmpLHS = CmpI->getOperand(0);
6369 Value *CmpRHS = CmpI->getOperand(1);
6371 if (isa<FPMathOperator>(CmpI))
6372 FMF = CmpI->getFastMathFlags();
6375 if (CmpI->isEquality())
6376 return {SPF_UNKNOWN, SPNB_NA, false};
6378 // Deal with type mismatches.
6379 if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
6380 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
6381 // If this is a potential fmin/fmax with a cast to integer, then ignore
6382 // -0.0 because there is no corresponding integer value.
6383 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
6384 FMF.setNoSignedZeros();
6385 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
6386 cast<CastInst>(TrueVal)->getOperand(0), C,
6389 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
6390 // If this is a potential fmin/fmax with a cast to integer, then ignore
6391 // -0.0 because there is no corresponding integer value.
6392 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
6393 FMF.setNoSignedZeros();
6394 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
6395 C, cast<CastInst>(FalseVal)->getOperand(0),
6399 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
6403 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
6404 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
6405 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
6406 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
6407 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
6408 if (SPF == SPF_FMINNUM)
6409 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
6410 if (SPF == SPF_FMAXNUM)
6411 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
6412 llvm_unreachable("unhandled!");
6415 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
6416 if (SPF == SPF_SMIN) return SPF_SMAX;
6417 if (SPF == SPF_UMIN) return SPF_UMAX;
6418 if (SPF == SPF_SMAX) return SPF_SMIN;
6419 if (SPF == SPF_UMAX) return SPF_UMIN;
6420 llvm_unreachable("unhandled!");
6423 Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) {
6425 case Intrinsic::smax: return Intrinsic::smin;
6426 case Intrinsic::smin: return Intrinsic::smax;
6427 case Intrinsic::umax: return Intrinsic::umin;
6428 case Intrinsic::umin: return Intrinsic::umax;
6429 default: llvm_unreachable("Unexpected intrinsic");
6433 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) {
6434 return getMinMaxPred(getInverseMinMaxFlavor(SPF));
6437 APInt llvm::getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth) {
6439 case SPF_SMAX: return APInt::getSignedMaxValue(BitWidth);
6440 case SPF_SMIN: return APInt::getSignedMinValue(BitWidth);
6441 case SPF_UMAX: return APInt::getMaxValue(BitWidth);
6442 case SPF_UMIN: return APInt::getMinValue(BitWidth);
6443 default: llvm_unreachable("Unexpected flavor");
6447 std::pair<Intrinsic::ID, bool>
6448 llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {
6449 // Check if VL contains select instructions that can be folded into a min/max
6450 // vector intrinsic and return the intrinsic if it is possible.
6451 // TODO: Support floating point min/max.
6452 bool AllCmpSingleUse = true;
6453 SelectPatternResult SelectPattern;
6454 SelectPattern.Flavor = SPF_UNKNOWN;
6455 if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
6457 auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
6458 if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) ||
6459 CurrentPattern.Flavor == SPF_FMINNUM ||
6460 CurrentPattern.Flavor == SPF_FMAXNUM ||
6461 !I->getType()->isIntOrIntVectorTy())
6463 if (SelectPattern.Flavor != SPF_UNKNOWN &&
6464 SelectPattern.Flavor != CurrentPattern.Flavor)
6466 SelectPattern = CurrentPattern;
6468 match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value()));
6471 switch (SelectPattern.Flavor) {
6473 return {Intrinsic::smin, AllCmpSingleUse};
6475 return {Intrinsic::umin, AllCmpSingleUse};
6477 return {Intrinsic::smax, AllCmpSingleUse};
6479 return {Intrinsic::umax, AllCmpSingleUse};
6481 llvm_unreachable("unexpected select pattern flavor");
6484 return {Intrinsic::not_intrinsic, false};
6487 bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
6488 Value *&Start, Value *&Step) {
6489 // Handle the case of a simple two-predecessor recurrence PHI.
6490 // There's a lot more that could theoretically be done here, but
6491 // this is sufficient to catch some interesting cases.
6492 if (P->getNumIncomingValues() != 2)
6495 for (unsigned i = 0; i != 2; ++i) {
6496 Value *L = P->getIncomingValue(i);
6497 Value *R = P->getIncomingValue(!i);
6498 Operator *LU = dyn_cast<Operator>(L);
6501 unsigned Opcode = LU->getOpcode();
6506 // TODO: Expand list -- xor, div, gep, uaddo, etc..
6507 case Instruction::LShr:
6508 case Instruction::AShr:
6509 case Instruction::Shl:
6510 case Instruction::Add:
6511 case Instruction::Sub:
6512 case Instruction::And:
6513 case Instruction::Or:
6514 case Instruction::Mul: {
6515 Value *LL = LU->getOperand(0);
6516 Value *LR = LU->getOperand(1);
6517 // Find a recurrence.
6523 continue; // Check for recurrence with L and R flipped.
6529 // We have matched a recurrence of the form:
6530 // %iv = [R, %entry], [%iv.next, %backedge]
6531 // %iv.next = binop %iv, L
6533 // %iv = [R, %entry], [%iv.next, %backedge]
6534 // %iv.next = binop L, %iv
6535 BO = cast<BinaryOperator>(LU);
6543 bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
6544 Value *&Start, Value *&Step) {
6545 BinaryOperator *BO = nullptr;
6546 P = dyn_cast<PHINode>(I->getOperand(0));
6548 P = dyn_cast<PHINode>(I->getOperand(1));
6549 return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I;
6552 /// Return true if "icmp Pred LHS RHS" is always true.
6553 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
6554 const Value *RHS, const DataLayout &DL,
6556 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
6563 case CmpInst::ICMP_SLE: {
6566 // LHS s<= LHS +_{nsw} C if C >= 0
6567 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
6568 return !C->isNegative();
6572 case CmpInst::ICMP_ULE: {
6575 // LHS u<= LHS +_{nuw} C for any C
6576 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
6579 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
6580 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
6582 const APInt *&CA, const APInt *&CB) {
6583 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
6584 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
6587 // If X & C == 0 then (X | C) == X +_{nuw} C
6588 if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
6589 match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
6590 KnownBits Known(CA->getBitWidth());
6591 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
6592 /*CxtI*/ nullptr, /*DT*/ nullptr);
6593 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
6601 const APInt *CLHS, *CRHS;
6602 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
6603 return CLHS->ule(*CRHS);
6610 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
6611 /// ALHS ARHS" is true. Otherwise, return None.
6612 static Optional<bool>
6613 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
6614 const Value *ARHS, const Value *BLHS, const Value *BRHS,
6615 const DataLayout &DL, unsigned Depth) {
6620 case CmpInst::ICMP_SLT:
6621 case CmpInst::ICMP_SLE:
6622 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
6623 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
6627 case CmpInst::ICMP_ULT:
6628 case CmpInst::ICMP_ULE:
6629 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
6630 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
6636 /// Return true if the operands of the two compares match. IsSwappedOps is true
6637 /// when the operands match, but are swapped.
6638 static bool isMatchingOps(const Value *ALHS, const Value *ARHS,
6639 const Value *BLHS, const Value *BRHS,
6640 bool &IsSwappedOps) {
6642 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS);
6643 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS);
6644 return IsMatchingOps || IsSwappedOps;
6647 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true.
6648 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false.
6649 /// Otherwise, return None if we can't infer anything.
6650 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred,
6651 CmpInst::Predicate BPred,
6652 bool AreSwappedOps) {
6653 // Canonicalize the predicate as if the operands were not commuted.
6655 BPred = ICmpInst::getSwappedPredicate(BPred);
6657 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred))
6659 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred))
6665 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true.
6666 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false.
6667 /// Otherwise, return None if we can't infer anything.
6668 static Optional<bool> isImpliedCondMatchingImmOperands(CmpInst::Predicate APred,
6670 CmpInst::Predicate BPred,
6672 ConstantRange DomCR = ConstantRange::makeExactICmpRegion(APred, C1);
6673 ConstantRange CR = ConstantRange::makeExactICmpRegion(BPred, C2);
6674 ConstantRange Intersection = DomCR.intersectWith(CR);
6675 ConstantRange Difference = DomCR.difference(CR);
6676 if (Intersection.isEmptySet())
6678 if (Difference.isEmptySet())
6683 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
6684 /// false. Otherwise, return None if we can't infer anything.
6685 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
6686 CmpInst::Predicate BPred,
6687 const Value *BLHS, const Value *BRHS,
6688 const DataLayout &DL, bool LHSIsTrue,
6690 Value *ALHS = LHS->getOperand(0);
6691 Value *ARHS = LHS->getOperand(1);
6693 // The rest of the logic assumes the LHS condition is true. If that's not the
6694 // case, invert the predicate to make it so.
6695 CmpInst::Predicate APred =
6696 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
6698 // Can we infer anything when the two compares have matching operands?
6700 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) {
6701 if (Optional<bool> Implication = isImpliedCondMatchingOperands(
6702 APred, BPred, AreSwappedOps))
6704 // No amount of additional analysis will infer the second condition, so
6709 // Can we infer anything when the LHS operands match and the RHS operands are
6710 // constants (not necessarily matching)?
6711 const APInt *AC, *BC;
6712 if (ALHS == BLHS && match(ARHS, m_APInt(AC)) && match(BRHS, m_APInt(BC)))
6713 return isImpliedCondMatchingImmOperands(APred, *AC, BPred, *BC);
6716 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth);
6720 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is
6721 /// false. Otherwise, return None if we can't infer anything. We expect the
6722 /// RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select' instruction.
6723 static Optional<bool>
6724 isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
6725 const Value *RHSOp0, const Value *RHSOp1,
6726 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6727 // The LHS must be an 'or', 'and', or a 'select' instruction.
6728 assert((LHS->getOpcode() == Instruction::And ||
6729 LHS->getOpcode() == Instruction::Or ||
6730 LHS->getOpcode() == Instruction::Select) &&
6731 "Expected LHS to be 'and', 'or', or 'select'.");
6733 assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit");
6735 // If the result of an 'or' is false, then we know both legs of the 'or' are
6736 // false. Similarly, if the result of an 'and' is true, then we know both
6737 // legs of the 'and' are true.
6738 const Value *ALHS, *ARHS;
6739 if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
6740 (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
6741 // FIXME: Make this non-recursion.
6742 if (Optional<bool> Implication = isImpliedCondition(
6743 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6745 if (Optional<bool> Implication = isImpliedCondition(
6746 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6754 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
6755 const Value *RHSOp0, const Value *RHSOp1,
6756 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6757 // Bail out when we hit the limit.
6758 if (Depth == MaxAnalysisRecursionDepth)
6761 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
6763 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
6766 assert(LHS->getType()->isIntOrIntVectorTy(1) &&
6767 "Expected integer type only!");
6769 // Both LHS and RHS are icmps.
6770 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
6772 return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6775 /// The LHS should be an 'or', 'and', or a 'select' instruction. We expect
6776 /// the RHS to be an icmp.
6777 /// FIXME: Add support for and/or/select on the RHS.
6778 if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
6779 if ((LHSI->getOpcode() == Instruction::And ||
6780 LHSI->getOpcode() == Instruction::Or ||
6781 LHSI->getOpcode() == Instruction::Select))
6782 return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6788 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
6789 const DataLayout &DL, bool LHSIsTrue,
6791 // LHS ==> RHS by definition
6795 if (const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS))
6796 return isImpliedCondition(LHS, RHSCmp->getPredicate(),
6797 RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
6800 if (Depth == MaxAnalysisRecursionDepth)
6803 // LHS ==> (RHS1 || RHS2) if LHS ==> RHS1 or LHS ==> RHS2
6804 // LHS ==> !(RHS1 && RHS2) if LHS ==> !RHS1 or LHS ==> !RHS2
6805 const Value *RHS1, *RHS2;
6806 if (match(RHS, m_LogicalOr(m_Value(RHS1), m_Value(RHS2)))) {
6807 if (Optional<bool> Imp =
6808 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
6811 if (Optional<bool> Imp =
6812 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
6816 if (match(RHS, m_LogicalAnd(m_Value(RHS1), m_Value(RHS2)))) {
6817 if (Optional<bool> Imp =
6818 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
6821 if (Optional<bool> Imp =
6822 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
6830 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
6831 // condition dominating ContextI or nullptr, if no condition is found.
6832 static std::pair<Value *, bool>
6833 getDomPredecessorCondition(const Instruction *ContextI) {
6834 if (!ContextI || !ContextI->getParent())
6835 return {nullptr, false};
6837 // TODO: This is a poor/cheap way to determine dominance. Should we use a
6838 // dominator tree (eg, from a SimplifyQuery) instead?
6839 const BasicBlock *ContextBB = ContextI->getParent();
6840 const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
6842 return {nullptr, false};
6844 // We need a conditional branch in the predecessor.
6846 BasicBlock *TrueBB, *FalseBB;
6847 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
6848 return {nullptr, false};
6850 // The branch should get simplified. Don't bother simplifying this condition.
6851 if (TrueBB == FalseBB)
6852 return {nullptr, false};
6854 assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
6855 "Predecessor block does not point to successor?");
6857 // Is this condition implied by the predecessor condition?
6858 return {PredCond, TrueBB == ContextBB};
6861 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
6862 const Instruction *ContextI,
6863 const DataLayout &DL) {
6864 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
6865 auto PredCond = getDomPredecessorCondition(ContextI);
6867 return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
6871 Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
6872 const Value *LHS, const Value *RHS,
6873 const Instruction *ContextI,
6874 const DataLayout &DL) {
6875 auto PredCond = getDomPredecessorCondition(ContextI);
6877 return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
6882 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
6883 APInt &Upper, const InstrInfoQuery &IIQ,
6884 bool PreferSignedRange) {
6885 unsigned Width = Lower.getBitWidth();
6887 switch (BO.getOpcode()) {
6888 case Instruction::Add:
6889 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
6890 bool HasNSW = IIQ.hasNoSignedWrap(&BO);
6891 bool HasNUW = IIQ.hasNoUnsignedWrap(&BO);
6893 // If the caller expects a signed compare, then try to use a signed range.
6894 // Otherwise if both no-wraps are set, use the unsigned range because it
6895 // is never larger than the signed range. Example:
6896 // "add nuw nsw i8 X, -2" is unsigned [254,255] vs. signed [-128, 125].
6897 if (PreferSignedRange && HasNSW && HasNUW)
6901 // 'add nuw x, C' produces [C, UINT_MAX].
6903 } else if (HasNSW) {
6904 if (C->isNegative()) {
6905 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
6906 Lower = APInt::getSignedMinValue(Width);
6907 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
6909 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
6910 Lower = APInt::getSignedMinValue(Width) + *C;
6911 Upper = APInt::getSignedMaxValue(Width) + 1;
6917 case Instruction::And:
6918 if (match(BO.getOperand(1), m_APInt(C)))
6919 // 'and x, C' produces [0, C].
6923 case Instruction::Or:
6924 if (match(BO.getOperand(1), m_APInt(C)))
6925 // 'or x, C' produces [C, UINT_MAX].
6929 case Instruction::AShr:
6930 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6931 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
6932 Lower = APInt::getSignedMinValue(Width).ashr(*C);
6933 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
6934 } else if (match(BO.getOperand(0), m_APInt(C))) {
6935 unsigned ShiftAmount = Width - 1;
6936 if (!C->isZero() && IIQ.isExact(&BO))
6937 ShiftAmount = C->countTrailingZeros();
6938 if (C->isNegative()) {
6939 // 'ashr C, x' produces [C, C >> (Width-1)]
6941 Upper = C->ashr(ShiftAmount) + 1;
6943 // 'ashr C, x' produces [C >> (Width-1), C]
6944 Lower = C->ashr(ShiftAmount);
6950 case Instruction::LShr:
6951 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
6952 // 'lshr x, C' produces [0, UINT_MAX >> C].
6953 Upper = APInt::getAllOnes(Width).lshr(*C) + 1;
6954 } else if (match(BO.getOperand(0), m_APInt(C))) {
6955 // 'lshr C, x' produces [C >> (Width-1), C].
6956 unsigned ShiftAmount = Width - 1;
6957 if (!C->isZero() && IIQ.isExact(&BO))
6958 ShiftAmount = C->countTrailingZeros();
6959 Lower = C->lshr(ShiftAmount);
6964 case Instruction::Shl:
6965 if (match(BO.getOperand(0), m_APInt(C))) {
6966 if (IIQ.hasNoUnsignedWrap(&BO)) {
6967 // 'shl nuw C, x' produces [C, C << CLZ(C)]
6969 Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
6970 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
6971 if (C->isNegative()) {
6972 // 'shl nsw C, x' produces [C << CLO(C)-1, C]
6973 unsigned ShiftAmount = C->countLeadingOnes() - 1;
6974 Lower = C->shl(ShiftAmount);
6977 // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
6978 unsigned ShiftAmount = C->countLeadingZeros() - 1;
6980 Upper = C->shl(ShiftAmount) + 1;
6986 case Instruction::SDiv:
6987 if (match(BO.getOperand(1), m_APInt(C))) {
6988 APInt IntMin = APInt::getSignedMinValue(Width);
6989 APInt IntMax = APInt::getSignedMaxValue(Width);
6990 if (C->isAllOnes()) {
6991 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
6992 // where C != -1 and C != 0 and C != 1
6995 } else if (C->countLeadingZeros() < Width - 1) {
6996 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
6997 // where C != -1 and C != 0 and C != 1
6998 Lower = IntMin.sdiv(*C);
6999 Upper = IntMax.sdiv(*C);
7000 if (Lower.sgt(Upper))
7001 std::swap(Lower, Upper);
7003 assert(Upper != Lower && "Upper part of range has wrapped!");
7005 } else if (match(BO.getOperand(0), m_APInt(C))) {
7006 if (C->isMinSignedValue()) {
7007 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
7009 Upper = Lower.lshr(1) + 1;
7011 // 'sdiv C, x' produces [-|C|, |C|].
7012 Upper = C->abs() + 1;
7013 Lower = (-Upper) + 1;
7018 case Instruction::UDiv:
7019 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
7020 // 'udiv x, C' produces [0, UINT_MAX / C].
7021 Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
7022 } else if (match(BO.getOperand(0), m_APInt(C))) {
7023 // 'udiv C, x' produces [0, C].
7028 case Instruction::SRem:
7029 if (match(BO.getOperand(1), m_APInt(C))) {
7030 // 'srem x, C' produces (-|C|, |C|).
7032 Lower = (-Upper) + 1;
7036 case Instruction::URem:
7037 if (match(BO.getOperand(1), m_APInt(C)))
7038 // 'urem x, C' produces [0, C).
7047 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
7049 unsigned Width = Lower.getBitWidth();
7051 switch (II.getIntrinsicID()) {
7052 case Intrinsic::ctpop:
7053 case Intrinsic::ctlz:
7054 case Intrinsic::cttz:
7055 // Maximum of set/clear bits is the bit width.
7056 assert(Lower == 0 && "Expected lower bound to be zero");
7059 case Intrinsic::uadd_sat:
7060 // uadd.sat(x, C) produces [C, UINT_MAX].
7061 if (match(II.getOperand(0), m_APInt(C)) ||
7062 match(II.getOperand(1), m_APInt(C)))
7065 case Intrinsic::sadd_sat:
7066 if (match(II.getOperand(0), m_APInt(C)) ||
7067 match(II.getOperand(1), m_APInt(C))) {
7068 if (C->isNegative()) {
7069 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
7070 Lower = APInt::getSignedMinValue(Width);
7071 Upper = APInt::getSignedMaxValue(Width) + *C + 1;
7073 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
7074 Lower = APInt::getSignedMinValue(Width) + *C;
7075 Upper = APInt::getSignedMaxValue(Width) + 1;
7079 case Intrinsic::usub_sat:
7080 // usub.sat(C, x) produces [0, C].
7081 if (match(II.getOperand(0), m_APInt(C)))
7083 // usub.sat(x, C) produces [0, UINT_MAX - C].
7084 else if (match(II.getOperand(1), m_APInt(C)))
7085 Upper = APInt::getMaxValue(Width) - *C + 1;
7087 case Intrinsic::ssub_sat:
7088 if (match(II.getOperand(0), m_APInt(C))) {
7089 if (C->isNegative()) {
7090 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
7091 Lower = APInt::getSignedMinValue(Width);
7092 Upper = *C - APInt::getSignedMinValue(Width) + 1;
7094 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
7095 Lower = *C - APInt::getSignedMaxValue(Width);
7096 Upper = APInt::getSignedMaxValue(Width) + 1;
7098 } else if (match(II.getOperand(1), m_APInt(C))) {
7099 if (C->isNegative()) {
7100 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
7101 Lower = APInt::getSignedMinValue(Width) - *C;
7102 Upper = APInt::getSignedMaxValue(Width) + 1;
7104 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
7105 Lower = APInt::getSignedMinValue(Width);
7106 Upper = APInt::getSignedMaxValue(Width) - *C + 1;
7110 case Intrinsic::umin:
7111 case Intrinsic::umax:
7112 case Intrinsic::smin:
7113 case Intrinsic::smax:
7114 if (!match(II.getOperand(0), m_APInt(C)) &&
7115 !match(II.getOperand(1), m_APInt(C)))
7118 switch (II.getIntrinsicID()) {
7119 case Intrinsic::umin:
7122 case Intrinsic::umax:
7125 case Intrinsic::smin:
7126 Lower = APInt::getSignedMinValue(Width);
7129 case Intrinsic::smax:
7131 Upper = APInt::getSignedMaxValue(Width) + 1;
7134 llvm_unreachable("Must be min/max intrinsic");
7137 case Intrinsic::abs:
7138 // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
7139 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
7140 if (match(II.getOperand(1), m_One()))
7141 Upper = APInt::getSignedMaxValue(Width) + 1;
7143 Upper = APInt::getSignedMinValue(Width) + 1;
7150 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
7151 APInt &Upper, const InstrInfoQuery &IIQ) {
7152 const Value *LHS = nullptr, *RHS = nullptr;
7153 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
7154 if (R.Flavor == SPF_UNKNOWN)
7157 unsigned BitWidth = SI.getType()->getScalarSizeInBits();
7159 if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
7160 // If the negation part of the abs (in RHS) has the NSW flag,
7161 // then the result of abs(X) is [0..SIGNED_MAX],
7162 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
7163 Lower = APInt::getZero(BitWidth);
7164 if (match(RHS, m_Neg(m_Specific(LHS))) &&
7165 IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
7166 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
7168 Upper = APInt::getSignedMinValue(BitWidth) + 1;
7172 if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
7173 // The result of -abs(X) is <= 0.
7174 Lower = APInt::getSignedMinValue(BitWidth);
7175 Upper = APInt(BitWidth, 1);
7180 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
7191 Lower = APInt::getSignedMinValue(BitWidth);
7196 Upper = APInt::getSignedMaxValue(BitWidth) + 1;
7203 static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper) {
7204 // The maximum representable value of a half is 65504. For floats the maximum
7205 // value is 3.4e38 which requires roughly 129 bits.
7206 unsigned BitWidth = I->getType()->getScalarSizeInBits();
7207 if (!I->getOperand(0)->getType()->getScalarType()->isHalfTy())
7209 if (isa<FPToSIInst>(I) && BitWidth >= 17) {
7210 Lower = APInt(BitWidth, -65504);
7211 Upper = APInt(BitWidth, 65505);
7214 if (isa<FPToUIInst>(I) && BitWidth >= 16) {
7215 // For a fptoui the lower limit is left as 0.
7216 Upper = APInt(BitWidth, 65505);
7220 ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned,
7221 bool UseInstrInfo, AssumptionCache *AC,
7222 const Instruction *CtxI,
7223 const DominatorTree *DT,
7225 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
7227 if (Depth == MaxAnalysisRecursionDepth)
7228 return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
7231 if (match(V, m_APInt(C)))
7232 return ConstantRange(*C);
7234 InstrInfoQuery IIQ(UseInstrInfo);
7235 unsigned BitWidth = V->getType()->getScalarSizeInBits();
7236 APInt Lower = APInt(BitWidth, 0);
7237 APInt Upper = APInt(BitWidth, 0);
7238 if (auto *BO = dyn_cast<BinaryOperator>(V))
7239 setLimitsForBinOp(*BO, Lower, Upper, IIQ, ForSigned);
7240 else if (auto *II = dyn_cast<IntrinsicInst>(V))
7241 setLimitsForIntrinsic(*II, Lower, Upper);
7242 else if (auto *SI = dyn_cast<SelectInst>(V))
7243 setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
7244 else if (isa<FPToUIInst>(V) || isa<FPToSIInst>(V))
7245 setLimitForFPToI(cast<Instruction>(V), Lower, Upper);
7247 ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
7249 if (auto *I = dyn_cast<Instruction>(V))
7250 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
7251 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
7254 // Try to restrict the range based on information from assumptions.
7255 for (auto &AssumeVH : AC->assumptionsFor(V)) {
7258 CallInst *I = cast<CallInst>(AssumeVH);
7259 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
7260 "Got assumption for the wrong function!");
7261 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume &&
7262 "must be an assume intrinsic");
7264 if (!isValidAssumeForContext(I, CtxI, DT))
7266 Value *Arg = I->getArgOperand(0);
7267 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
7268 // Currently we just use information from comparisons.
7269 if (!Cmp || Cmp->getOperand(0) != V)
7271 // TODO: Set "ForSigned" parameter via Cmp->isSigned()?
7273 computeConstantRange(Cmp->getOperand(1), /* ForSigned */ false,
7274 UseInstrInfo, AC, I, DT, Depth + 1);
7275 CR = CR.intersectWith(
7276 ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS));
7283 static Optional<int64_t>
7284 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
7285 // Skip over the first indices.
7286 gep_type_iterator GTI = gep_type_begin(GEP);
7287 for (unsigned i = 1; i != Idx; ++i, ++GTI)
7290 // Compute the offset implied by the rest of the indices.
7292 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
7293 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
7297 continue; // No offset.
7299 // Handle struct indices, which add their field offset to the pointer.
7300 if (StructType *STy = GTI.getStructTypeOrNull()) {
7301 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
7305 // Otherwise, we have a sequential type like an array or fixed-length
7306 // vector. Multiply the index by the ElementSize.
7307 TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
7308 if (Size.isScalable())
7310 Offset += Size.getFixedSize() * OpC->getSExtValue();
7316 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2,
7317 const DataLayout &DL) {
7318 APInt Offset1(DL.getIndexTypeSizeInBits(Ptr1->getType()), 0);
7319 APInt Offset2(DL.getIndexTypeSizeInBits(Ptr2->getType()), 0);
7320 Ptr1 = Ptr1->stripAndAccumulateConstantOffsets(DL, Offset1, true);
7321 Ptr2 = Ptr2->stripAndAccumulateConstantOffsets(DL, Offset2, true);
7323 // Handle the trivial case first.
7325 return Offset2.getSExtValue() - Offset1.getSExtValue();
7327 const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
7328 const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
7330 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
7331 // base. After that base, they may have some number of common (and
7332 // potentially variable) indices. After that they handle some constant
7333 // offset, which determines their offset from each other. At this point, we
7334 // handle no other case.
7335 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0) ||
7336 GEP1->getSourceElementType() != GEP2->getSourceElementType())
7339 // Skip any common indices and track the GEP types.
7341 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
7342 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
7345 auto IOffset1 = getOffsetFromIndex(GEP1, Idx, DL);
7346 auto IOffset2 = getOffsetFromIndex(GEP2, Idx, DL);
7347 if (!IOffset1 || !IOffset2)
7349 return *IOffset2 - *IOffset1 + Offset2.getSExtValue() -
7350 Offset1.getSExtValue();