#ifndef LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
#define LLVM_ANALYSIS_INSTRUCTIONSIMPLIFY_H
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Operator.h"
#include "llvm/IR/User.h"
namespace llvm {
template <class T> class ArrayRef;
class AssumptionCache;
class DominatorTree;
-class Instruction;
class ImmutableCallSite;
class DataLayout;
class FastMathFlags;
class TargetLibraryInfo;
class Type;
class Value;
+class MDNode;
+class BinaryOperator;
+
+/// InstrInfoQuery provides an interface to query additional information for
+/// instructions like metadata or keywords like nsw, which provides conservative
+/// results if the users specified it is safe to use.
+struct InstrInfoQuery {
+ InstrInfoQuery(bool UMD) : UseInstrInfo(UMD) {}
+ InstrInfoQuery() : UseInstrInfo(true) {}
+ bool UseInstrInfo = true;
+
+ MDNode *getMetadata(const Instruction *I, unsigned KindID) const {
+ if (UseInstrInfo)
+ return I->getMetadata(KindID);
+ return nullptr;
+ }
+
+ template <class InstT> bool hasNoUnsignedWrap(const InstT *Op) const {
+ if (UseInstrInfo)
+ return Op->hasNoUnsignedWrap();
+ return false;
+ }
+
+ template <class InstT> bool hasNoSignedWrap(const InstT *Op) const {
+ if (UseInstrInfo)
+ return Op->hasNoSignedWrap();
+ return false;
+ }
+
+ bool isExact(const BinaryOperator *Op) const {
+ if (UseInstrInfo && isa<PossiblyExactOperator>(Op))
+ return cast<PossiblyExactOperator>(Op)->isExact();
+ return false;
+ }
+};
struct SimplifyQuery {
const DataLayout &DL;
AssumptionCache *AC = nullptr;
const Instruction *CxtI = nullptr;
+ // Wrapper to query additional information for instructions like metadata or
+ // keywords like nsw, which provides conservative results if those cannot
+ // be safely used.
+ const InstrInfoQuery IIQ;
+
SimplifyQuery(const DataLayout &DL, const Instruction *CXTI = nullptr)
: DL(DL), CxtI(CXTI) {}
SimplifyQuery(const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr,
- const Instruction *CXTI = nullptr)
- : DL(DL), TLI(TLI), DT(DT), AC(AC), CxtI(CXTI) {}
+ const Instruction *CXTI = nullptr, bool UseInstrInfo = true)
+ : DL(DL), TLI(TLI), DT(DT), AC(AC), CxtI(CXTI), IIQ(UseInstrInfo) {}
SimplifyQuery getWithInstruction(Instruction *I) const {
SimplifyQuery Copy(*this);
Copy.CxtI = I;
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr,
- OptimizationRemarkEmitter *ORE = nullptr);
+ OptimizationRemarkEmitter *ORE = nullptr,
+ bool UseInstrInfo = true);
/// Returns the known bits rather than passing by reference.
KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr,
- OptimizationRemarkEmitter *ORE = nullptr);
+ OptimizationRemarkEmitter *ORE = nullptr,
+ bool UseInstrInfo = true);
/// Compute known bits from the range metadata.
/// \p KnownZero the set of bits that are known to be zero
const DataLayout &DL,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
/// Return true if the given value is known to have exactly one bit set when
/// defined. For vectors return true if every element is known to be a power
bool OrZero = false, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI);
bool isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
/// Return true if the two given values are negation.
/// Currently can recoginze Value pair:
unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
/// Returns true if the given value is known be positive (i.e. non-negative
/// and non-zero).
bool isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
/// Returns true if the given value is known be negative (i.e. non-positive
/// and non-zero).
bool isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
/// Return true if the given values are known to be non-equal when defined.
/// Supports scalar integer types only.
bool isKnownNonEqual(const Value *V1, const Value *V2, const DataLayout &DL,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ AssumptionCache *AC = nullptr,
+ const Instruction *CxtI = nullptr,
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
/// Return true if 'V & Mask' is known to be zero. We use this predicate to
/// simplify operations downstream. Mask is known to be zero for bits that V
const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
/// Return the number of times the sign bit of the register is replicated into
/// the other bits. We know that at least 1 bit is always equal to the sign
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
- const DominatorTree *DT = nullptr);
+ const DominatorTree *DT = nullptr,
+ bool UseInstrInfo = true);
/// This function computes the integer multiple of Base that equals V. If
/// successful, it returns true and returns the multiple in Multiple. If
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
- const DominatorTree *DT);
+ const DominatorTree *DT,
+ bool UseInstrInfo = true);
OverflowResult computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
- const DominatorTree *DT);
+ const DominatorTree *DT,
+ bool UseInstrInfo = true);
OverflowResult computeOverflowForUnsignedAdd(const Value *LHS,
const Value *RHS,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
- const DominatorTree *DT);
+ const DominatorTree *DT,
+ bool UseInstrInfo = true);
OverflowResult computeOverflowForSignedAdd(const Value *LHS, const Value *RHS,
const DataLayout &DL,
AssumptionCache *AC = nullptr,
// (X / Y) * Y -> X if the division is exact.
Value *X = nullptr;
- if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
- match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y)
+ if (Q.IIQ.UseInstrInfo &&
+ (match(Op0,
+ m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
+ match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y)
return X;
// i1 mul -> and.
if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
auto *Mul = cast<OverflowingBinaryOperator>(Op0);
// If the Mul does not overflow, then we are good to go.
- if ((IsSigned && Mul->hasNoSignedWrap()) ||
- (!IsSigned && Mul->hasNoUnsignedWrap()))
+ if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
+ (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)))
return X;
// If X has the form X = A / Y, then X * Y cannot overflow.
if ((IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
return Op0;
// (X << Y) % X -> 0
- if ((Opcode == Instruction::SRem &&
- match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
- (Opcode == Instruction::URem &&
- match(Op0, m_NUWShl(m_Specific(Op1), m_Value()))))
+ if (Q.IIQ.UseInstrInfo &&
+ ((Opcode == Instruction::SRem &&
+ match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
+ (Opcode == Instruction::URem &&
+ match(Op0, m_NUWShl(m_Specific(Op1), m_Value())))))
return Constant::getNullValue(Op0->getType());
// If the operation is with the result of a select instruction, check whether
// (X >> A) << A -> X
Value *X;
- if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
+ if (Q.IIQ.UseInstrInfo &&
+ match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
return X;
// shl nuw i8 C, %x -> C iff C has sign bit set.
// (X << A) >> A -> X
Value *X;
- if (match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
+ if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
return X;
// Arithmetic shifting an all-sign-bit value is a no-op.
return nullptr;
}
-static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1) {
+static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
+ const InstrInfoQuery &IIQ) {
// (icmp (add V, C0), C1) & (icmp V, C0)
ICmpInst::Predicate Pred0, Pred1;
const APInt *C0, *C1;
if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
return nullptr;
- auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
+ auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0));
if (AddInst->getOperand(1) != Op1->getOperand(1))
return nullptr;
Type *ITy = Op0->getType();
- bool isNSW = AddInst->hasNoSignedWrap();
- bool isNUW = AddInst->hasNoUnsignedWrap();
+ bool isNSW = IIQ.hasNoSignedWrap(AddInst);
+ bool isNUW = IIQ.hasNoUnsignedWrap(AddInst);
const APInt Delta = *C1 - *C0;
if (C0->isStrictlyPositive()) {
return nullptr;
}
-static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
+static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1,
+ const InstrInfoQuery &IIQ) {
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
return X;
if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true))
if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, true))
return X;
- if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1))
+ if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, IIQ))
return X;
- if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0))
+ if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, IIQ))
return X;
return nullptr;
}
-static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1) {
+static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
+ const InstrInfoQuery &IIQ) {
// (icmp (add V, C0), C1) | (icmp V, C0)
ICmpInst::Predicate Pred0, Pred1;
const APInt *C0, *C1;
return nullptr;
Type *ITy = Op0->getType();
- bool isNSW = AddInst->hasNoSignedWrap();
- bool isNUW = AddInst->hasNoUnsignedWrap();
+ bool isNSW = IIQ.hasNoSignedWrap(AddInst);
+ bool isNUW = IIQ.hasNoUnsignedWrap(AddInst);
const APInt Delta = *C1 - *C0;
if (C0->isStrictlyPositive()) {
return nullptr;
}
-static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
+static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1,
+ const InstrInfoQuery &IIQ) {
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
return X;
if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false))
if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, false))
return X;
- if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1))
+ if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, IIQ))
return X;
- if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0))
+ if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, IIQ))
return X;
return nullptr;
return nullptr;
}
-static Value *simplifyAndOrOfCmps(const TargetLibraryInfo *TLI,
+static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q,
Value *Op0, Value *Op1, bool IsAnd) {
// Look through casts of the 'and' operands to find compares.
auto *Cast0 = dyn_cast<CastInst>(Op0);
auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
if (ICmp0 && ICmp1)
- V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1) :
- simplifyOrOfICmps(ICmp0, ICmp1);
+ V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q.IIQ)
+ : simplifyOrOfICmps(ICmp0, ICmp1, Q.IIQ);
auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
if (FCmp0 && FCmp1)
- V = simplifyAndOrOfFCmps(TLI, FCmp0, FCmp1, IsAnd);
+ V = simplifyAndOrOfFCmps(Q.TLI, FCmp0, FCmp1, IsAnd);
if (!V)
return nullptr;
return Op1;
}
- if (Value *V = simplifyAndOrOfCmps(Q.TLI, Op0, Op1, true))
+ if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true))
return V;
// Try some generic simplifications for associative operations.
match(Op0, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B)))))
return Op0;
- if (Value *V = simplifyAndOrOfCmps(Q.TLI, Op0, Op1, false))
+ if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false))
return V;
// Try some generic simplifications for associative operations.
computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, CmpInst::Predicate Pred,
AssumptionCache *AC, const Instruction *CxtI,
- Value *LHS, Value *RHS) {
+ const InstrInfoQuery &IIQ, Value *LHS, Value *RHS) {
// First, skip past any trivial no-ops.
LHS = LHS->stripPointerCasts();
RHS = RHS->stripPointerCasts();
// A non-null pointer is not equal to a null pointer.
- if (llvm::isKnownNonZero(LHS, DL) && isa<ConstantPointerNull>(RHS) &&
+ if (llvm::isKnownNonZero(LHS, DL, 0, nullptr, nullptr, nullptr,
+ IIQ.UseInstrInfo) &&
+ isa<ConstantPointerNull>(RHS) &&
(Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE))
return ConstantInt::get(GetCompareTy(LHS),
!CmpInst::isTrueWhenEqual(Pred));
return getTrue(ITy);
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_ULE:
- if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
+ if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
return getFalse(ITy);
break;
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_UGT:
- if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
+ if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
return getTrue(ITy);
break;
case ICmpInst::ICMP_SLT: {
/// Many binary operators with a constant operand have an easy-to-compute
/// range of outputs. This can be used to fold a comparison to always true or
/// always false.
-static void setLimitsForBinOp(BinaryOperator &BO, APInt &Lower, APInt &Upper) {
+static void setLimitsForBinOp(BinaryOperator &BO, APInt &Lower, APInt &Upper,
+ const InstrInfoQuery &IIQ) {
unsigned Width = Lower.getBitWidth();
const APInt *C;
switch (BO.getOpcode()) {
case Instruction::Add:
if (match(BO.getOperand(1), m_APInt(C)) && !C->isNullValue()) {
// FIXME: If we have both nuw and nsw, we should reduce the range further.
- if (BO.hasNoUnsignedWrap()) {
+ if (IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
// 'add nuw x, C' produces [C, UINT_MAX].
Lower = *C;
- } else if (BO.hasNoSignedWrap()) {
+ } else if (IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(&BO))) {
if (C->isNegative()) {
// 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
Lower = APInt::getSignedMinValue(Width);
Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
} else if (match(BO.getOperand(0), m_APInt(C))) {
unsigned ShiftAmount = Width - 1;
- if (!C->isNullValue() && BO.isExact())
+ if (!C->isNullValue() && IIQ.isExact(&BO))
ShiftAmount = C->countTrailingZeros();
if (C->isNegative()) {
// 'ashr C, x' produces [C, C >> (Width-1)]
} else if (match(BO.getOperand(0), m_APInt(C))) {
// 'lshr C, x' produces [C >> (Width-1), C].
unsigned ShiftAmount = Width - 1;
- if (!C->isNullValue() && BO.isExact())
+ if (!C->isNullValue() && IIQ.isExact(&BO))
ShiftAmount = C->countTrailingZeros();
Lower = C->lshr(ShiftAmount);
Upper = *C + 1;
case Instruction::Shl:
if (match(BO.getOperand(0), m_APInt(C))) {
- if (BO.hasNoUnsignedWrap()) {
+ if (IIQ.hasNoUnsignedWrap(&BO)) {
// 'shl nuw C, x' produces [C, C << CLZ(C)]
Lower = *C;
Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
}
static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
- Value *RHS) {
+ Value *RHS, const InstrInfoQuery &IIQ) {
Type *ITy = GetCompareTy(RHS); // The return type.
Value *X;
APInt Lower = APInt(Width, 0);
APInt Upper = APInt(Width, 0);
if (auto *BO = dyn_cast<BinaryOperator>(LHS))
- setLimitsForBinOp(*BO, Lower, Upper);
+ setLimitsForBinOp(*BO, Lower, Upper, IIQ);
ConstantRange LHS_CR =
Lower != Upper ? ConstantRange(Lower, Upper) : ConstantRange(Width, true);
if (auto *I = dyn_cast<Instruction>(LHS))
- if (auto *Ranges = I->getMetadata(LLVMContext::MD_range))
+ if (auto *Ranges = IIQ.getMetadata(I, LLVMContext::MD_range))
LHS_CR = LHS_CR.intersectWith(getConstantRangeFromMetadata(*Ranges));
if (!LHS_CR.isFullSet()) {
B = LBO->getOperand(1);
NoLHSWrapProblem =
ICmpInst::isEquality(Pred) ||
- (CmpInst::isUnsigned(Pred) && LBO->hasNoUnsignedWrap()) ||
- (CmpInst::isSigned(Pred) && LBO->hasNoSignedWrap());
+ (CmpInst::isUnsigned(Pred) &&
+ Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) ||
+ (CmpInst::isSigned(Pred) &&
+ Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)));
}
if (RBO && RBO->getOpcode() == Instruction::Add) {
C = RBO->getOperand(0);
D = RBO->getOperand(1);
NoRHSWrapProblem =
ICmpInst::isEquality(Pred) ||
- (CmpInst::isUnsigned(Pred) && RBO->hasNoUnsignedWrap()) ||
- (CmpInst::isSigned(Pred) && RBO->hasNoSignedWrap());
+ (CmpInst::isUnsigned(Pred) &&
+ Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) ||
+ (CmpInst::isSigned(Pred) &&
+ Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO)));
}
// icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
// - The shift is nuw, we can't shift out the one bit.
// - CI2 is one
// - CI isn't zero
- if (LBO->hasNoSignedWrap() || LBO->hasNoUnsignedWrap() ||
+ if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
+ Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
CI2Val->isOneValue() || !CI->isZero()) {
if (Pred == ICmpInst::ICMP_EQ)
return ConstantInt::getFalse(RHS->getContext());
break;
case Instruction::UDiv:
case Instruction::LShr:
- if (ICmpInst::isSigned(Pred) || !LBO->isExact() || !RBO->isExact())
+ if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) ||
+ !Q.IIQ.isExact(RBO))
break;
if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
RBO->getOperand(0), Q, MaxRecurse - 1))
return V;
break;
case Instruction::SDiv:
- if (!ICmpInst::isEquality(Pred) || !LBO->isExact() || !RBO->isExact())
+ if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) ||
+ !Q.IIQ.isExact(RBO))
break;
if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
RBO->getOperand(0), Q, MaxRecurse - 1))
return V;
break;
case Instruction::AShr:
- if (!LBO->isExact() || !RBO->isExact())
+ if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO))
break;
if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
RBO->getOperand(0), Q, MaxRecurse - 1))
return V;
break;
case Instruction::Shl: {
- bool NUW = LBO->hasNoUnsignedWrap() && RBO->hasNoUnsignedWrap();
- bool NSW = LBO->hasNoSignedWrap() && RBO->hasNoSignedWrap();
+ bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
+ bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
if (!NUW && !NSW)
break;
if (!NSW && ICmpInst::isSigned(Pred))
if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
return V;
- if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS))
+ if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q.IIQ))
return V;
// If both operands have range metadata, use the metadata
auto RHS_Instr = cast<Instruction>(RHS);
auto LHS_Instr = cast<Instruction>(LHS);
- if (RHS_Instr->getMetadata(LLVMContext::MD_range) &&
- LHS_Instr->getMetadata(LLVMContext::MD_range)) {
+ if (Q.IIQ.getMetadata(RHS_Instr, LLVMContext::MD_range) &&
+ Q.IIQ.getMetadata(LHS_Instr, LLVMContext::MD_range)) {
auto RHS_CR = getConstantRangeFromMetadata(
*RHS_Instr->getMetadata(LLVMContext::MD_range));
auto LHS_CR = getConstantRangeFromMetadata(
// icmp eq|ne X, Y -> false|true if X != Y
if (ICmpInst::isEquality(Pred) &&
- isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT)) {
+ isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) {
return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
}
// Simplify comparisons of related pointers using a powerful, recursive
// GEP-walk when we have target data available..
if (LHS->getType()->isPointerTy())
- if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, LHS,
- RHS))
+ if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI,
+ Q.IIQ, LHS, RHS))
return C;
if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) ==
Q.DL.getTypeSizeInBits(CRHS->getType()))
if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI,
- CLHS->getPointerOperand(),
+ Q.IIQ, CLHS->getPointerOperand(),
CRHS->getPointerOperand()))
return C;
//
// We can't replace %sel with %add unless we strip away the flags.
if (isa<OverflowingBinaryOperator>(B))
- if (B->hasNoSignedWrap() || B->hasNoUnsignedWrap())
- return nullptr;
- if (isa<PossiblyExactOperator>(B))
- if (B->isExact())
+ if (Q.IIQ.hasNoSignedWrap(B) || Q.IIQ.hasNoUnsignedWrap(B))
return nullptr;
+ if (isa<PossiblyExactOperator>(B) && Q.IIQ.isExact(B))
+ return nullptr;
if (MaxRecurse) {
if (B->getOperand(0) == Op)
I->getFastMathFlags(), Q);
break;
case Instruction::Add:
- Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1),
- cast<BinaryOperator>(I)->hasNoSignedWrap(),
- cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q);
+ Result =
+ SimplifyAddInst(I->getOperand(0), I->getOperand(1),
+ Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
+ Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
break;
case Instruction::FSub:
Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1),
I->getFastMathFlags(), Q);
break;
case Instruction::Sub:
- Result = SimplifySubInst(I->getOperand(0), I->getOperand(1),
- cast<BinaryOperator>(I)->hasNoSignedWrap(),
- cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q);
+ Result =
+ SimplifySubInst(I->getOperand(0), I->getOperand(1),
+ Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
+ Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
break;
case Instruction::FMul:
Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1),
I->getFastMathFlags(), Q);
break;
case Instruction::Shl:
- Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1),
- cast<BinaryOperator>(I)->hasNoSignedWrap(),
- cast<BinaryOperator>(I)->hasNoUnsignedWrap(), Q);
+ Result =
+ SimplifyShlInst(I->getOperand(0), I->getOperand(1),
+ Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
+ Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
break;
case Instruction::LShr:
Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
- cast<BinaryOperator>(I)->isExact(), Q);
+ Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
break;
case Instruction::AShr:
Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
- cast<BinaryOperator>(I)->isExact(), Q);
+ Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
break;
case Instruction::And:
Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), Q);
/// (all of which can call computeKnownBits), and so on.
std::array<const Value *, MaxDepth> Excluded;
+ /// If true, it is safe to use metadata during simplification.
+ InstrInfoQuery IIQ;
+
unsigned NumExcluded = 0;
Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT, OptimizationRemarkEmitter *ORE = nullptr)
- : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE) {}
+ const DominatorTree *DT, bool UseInstrInfo,
+ OptimizationRemarkEmitter *ORE = nullptr)
+ : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
Query(const Query &Q, const Value *NewExcl)
- : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE),
+ : DL(Q.DL), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT), ORE(Q.ORE), IIQ(Q.IIQ),
NumExcluded(Q.NumExcluded) {
Excluded = Q.Excluded;
Excluded[NumExcluded++] = NewExcl;
const DataLayout &DL, unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT,
- OptimizationRemarkEmitter *ORE) {
+ OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
::computeKnownBits(V, Known, Depth,
- Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
+ Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
}
static KnownBits computeKnownBits(const Value *V, unsigned Depth,
unsigned Depth, AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT,
- OptimizationRemarkEmitter *ORE) {
- return ::computeKnownBits(V, Depth,
- Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
+ OptimizationRemarkEmitter *ORE,
+ bool UseInstrInfo) {
+ return ::computeKnownBits(
+ V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
}
bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
- const DataLayout &DL,
- AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT) {
+ const DataLayout &DL, AssumptionCache *AC,
+ const Instruction *CxtI, const DominatorTree *DT,
+ bool UseInstrInfo) {
assert(LHS->getType() == RHS->getType() &&
"LHS and RHS should have the same type");
assert(LHS->getType()->isIntOrIntVectorTy() &&
IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
KnownBits LHSKnown(IT->getBitWidth());
KnownBits RHSKnown(IT->getBitWidth());
- computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT);
- computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT);
+ computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
+ computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
return (LHSKnown.Zero | RHSKnown.Zero).isAllOnesValue();
}
const Query &Q);
bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
- bool OrZero,
- unsigned Depth, AssumptionCache *AC,
- const Instruction *CxtI,
- const DominatorTree *DT) {
- return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
- Query(DL, AC, safeCxtI(V, CxtI), DT));
+ bool OrZero, unsigned Depth,
+ AssumptionCache *AC, const Instruction *CxtI,
+ const DominatorTree *DT, bool UseInstrInfo) {
+ return ::isKnownToBeAPowerOfTwo(
+ V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
}
static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT) {
- return ::isKnownNonZero(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
+ const DominatorTree *DT, bool UseInstrInfo) {
+ return ::isKnownNonZero(V, Depth,
+ Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
}
bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
- unsigned Depth,
- AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT) {
- KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
+ unsigned Depth, AssumptionCache *AC,
+ const Instruction *CxtI, const DominatorTree *DT,
+ bool UseInstrInfo) {
+ KnownBits Known =
+ computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
return Known.isNonNegative();
}
bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT) {
+ const DominatorTree *DT, bool UseInstrInfo) {
if (auto *CI = dyn_cast<ConstantInt>(V))
return CI->getValue().isStrictlyPositive();
// TODO: We'd doing two recursive queries here. We should factor this such
// that only a single query is needed.
- return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT) &&
- isKnownNonZero(V, DL, Depth, AC, CxtI, DT);
+ return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
+ isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
}
bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT) {
- KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT);
+ const DominatorTree *DT, bool UseInstrInfo) {
+ KnownBits Known =
+ computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
return Known.isNegative();
}
static bool isKnownNonEqual(const Value *V1, const Value *V2, const Query &Q);
bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
- const DataLayout &DL,
- AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT) {
- return ::isKnownNonEqual(V1, V2, Query(DL, AC,
- safeCxtI(V1, safeCxtI(V2, CxtI)),
- DT));
+ const DataLayout &DL, AssumptionCache *AC,
+ const Instruction *CxtI, const DominatorTree *DT,
+ bool UseInstrInfo) {
+ return ::isKnownNonEqual(V1, V2,
+ Query(DL, AC, safeCxtI(V1, safeCxtI(V2, CxtI)), DT,
+ UseInstrInfo, /*ORE=*/nullptr));
}
static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
const Query &Q);
bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
- const DataLayout &DL,
- unsigned Depth, AssumptionCache *AC,
- const Instruction *CxtI, const DominatorTree *DT) {
- return ::MaskedValueIsZero(V, Mask, Depth,
- Query(DL, AC, safeCxtI(V, CxtI), DT));
+ const DataLayout &DL, unsigned Depth,
+ AssumptionCache *AC, const Instruction *CxtI,
+ const DominatorTree *DT, bool UseInstrInfo) {
+ return ::MaskedValueIsZero(
+ V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
}
static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
unsigned Depth, AssumptionCache *AC,
const Instruction *CxtI,
- const DominatorTree *DT) {
- return ::ComputeNumSignBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
+ const DominatorTree *DT, bool UseInstrInfo) {
+ return ::ComputeNumSignBits(
+ V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
}
static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
switch (I->getOpcode()) {
default: break;
case Instruction::Load:
- if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
+ if (MDNode *MD =
+ Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
computeKnownBitsFromRangeMetadata(*MD, Known);
break;
case Instruction::And: {
break;
}
case Instruction::Mul: {
- bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
+ bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, Known,
Known2, Depth, Q);
break;
// RHS from matchSelectPattern returns the negation part of abs pattern.
// If the negate has an NSW flag we can assume the sign bit of the result
// will be 0 because that makes abs(INT_MIN) undefined.
- if (cast<Instruction>(RHS)->hasNoSignedWrap())
+ if (Q.IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
MaxHighZeros = 1;
}
}
case Instruction::Shl: {
// (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
- bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
+ bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
auto KZF = [NSW](const APInt &KnownZero, unsigned ShiftAmt) {
APInt KZResult = KnownZero << ShiftAmt;
KZResult.setLowBits(ShiftAmt); // Low bits known 0.
break;
}
case Instruction::Sub: {
- bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
+ bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
Known, Known2, Depth, Q);
break;
}
case Instruction::Add: {
- bool NSW = cast<OverflowingBinaryOperator>(I)->hasNoSignedWrap();
+ bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
Known, Known2, Depth, Q);
break;
Known3.countMinTrailingZeros()));
auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(LU);
- if (OverflowOp && OverflowOp->hasNoSignedWrap()) {
+ if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
// If initial value of recurrence is nonnegative, and we are adding
// a nonnegative number with nsw, the result can only be nonnegative
// or poison value regardless of the number of times we execute the
// If range metadata is attached to this call, set known bits from that,
// and then intersect with known bits based on other properties of the
// function.
- if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
+ if (MDNode *MD =
+ Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
computeKnownBitsFromRangeMetadata(*MD, Known);
if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
computeKnownBits(RV, Known2, Depth + 1, Q);
// either the original power-of-two, a larger power-of-two or zero.
if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
- if (OrZero || VOBO->hasNoUnsignedWrap() || VOBO->hasNoSignedWrap()) {
+ if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
+ Q.IIQ.hasNoSignedWrap(VOBO)) {
if (match(X, m_And(m_Specific(Y), m_Value())) ||
match(X, m_And(m_Value(), m_Specific(Y))))
if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
}
if (auto *I = dyn_cast<Instruction>(V)) {
- if (MDNode *Ranges = I->getMetadata(LLVMContext::MD_range)) {
+ if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
// If the possible ranges don't contain zero, then the value is
// definitely non-zero.
if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
// A Load tagged with nonnull metadata is never null.
if (const LoadInst *LI = dyn_cast<LoadInst>(V))
- if (LI->getMetadata(LLVMContext::MD_nonnull))
+ if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
return true;
if (auto CS = ImmutableCallSite(V)) {
if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
// shl nuw can't remove any non-zero bits.
const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
- if (BO->hasNoUnsignedWrap())
+ if (Q.IIQ.hasNoUnsignedWrap(BO))
return isKnownNonZero(X, Depth, Q);
KnownBits Known(BitWidth);
const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
// If X and Y are non-zero then so is X * Y as long as the multiplication
// does not overflow.
- if ((BO->hasNoSignedWrap() || BO->hasNoUnsignedWrap()) &&
+ if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
isKnownNonZero(X, Depth, Q) && isKnownNonZero(Y, Depth, Q))
return true;
}
if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
if (!C->isZero() && !C->isNegative()) {
ConstantInt *X;
- if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
+ if (Q.IIQ.UseInstrInfo &&
+ (match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
!X->isNegative())
return true;
return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
}
-OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
- const Value *RHS,
- const DataLayout &DL,
- AssumptionCache *AC,
- const Instruction *CxtI,
- const DominatorTree *DT) {
+OverflowResult llvm::computeOverflowForUnsignedMul(
+ const Value *LHS, const Value *RHS, const DataLayout &DL,
+ AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
+ bool UseInstrInfo) {
// Multiplying n * m significant bits yields a result of n + m significant
// bits. If the total number of significant bits does not exceed the
// result bit width (minus 1), there is no overflow.
unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
KnownBits LHSKnown(BitWidth);
KnownBits RHSKnown(BitWidth);
- computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
- computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT);
+ computeKnownBits(LHS, LHSKnown, DL, /*Depth=*/0, AC, CxtI, DT, nullptr,
+ UseInstrInfo);
+ computeKnownBits(RHS, RHSKnown, DL, /*Depth=*/0, AC, CxtI, DT, nullptr,
+ UseInstrInfo);
// Note that underestimating the number of zero bits gives a more
// conservative answer.
unsigned ZeroBits = LHSKnown.countMinLeadingZeros() +
return OverflowResult::MayOverflow;
}
-OverflowResult llvm::computeOverflowForSignedMul(const Value *LHS,
- const Value *RHS,
- const DataLayout &DL,
- AssumptionCache *AC,
- const Instruction *CxtI,
- const DominatorTree *DT) {
+OverflowResult
+llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
+ const DataLayout &DL, AssumptionCache *AC,
+ const Instruction *CxtI,
+ const DominatorTree *DT, bool UseInstrInfo) {
// Multiplying n * m significant bits yields a result of n + m significant
// bits. If the total number of significant bits does not exceed the
// result bit width (minus 1), there is no overflow.
// product is exactly the minimum negative number.
// E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
// For simplicity we just check if at least one side is not negative.
- KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
- KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
+ KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
+ nullptr, UseInstrInfo);
+ KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
+ nullptr, UseInstrInfo);
if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
return OverflowResult::NeverOverflows;
}
return OverflowResult::MayOverflow;
}
-OverflowResult llvm::computeOverflowForUnsignedAdd(const Value *LHS,
- const Value *RHS,
- const DataLayout &DL,
- AssumptionCache *AC,
- const Instruction *CxtI,
- const DominatorTree *DT) {
- KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT);
+OverflowResult llvm::computeOverflowForUnsignedAdd(
+ const Value *LHS, const Value *RHS, const DataLayout &DL,
+ AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
+ bool UseInstrInfo) {
+ KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
+ nullptr, UseInstrInfo);
if (LHSKnown.isNonNegative() || LHSKnown.isNegative()) {
- KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT);
+ KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
+ nullptr, UseInstrInfo);
if (LHSKnown.isNegative() && RHSKnown.isNegative()) {
// The sign bit is set in both cases: this MUST overflow.
TargetLibraryInfo *TLI, AliasAnalysis *AA, MemorySSA *MSSA,
const DataLayout &DL)
: F(F), DT(DT), TLI(TLI), AA(AA), MSSA(MSSA), DL(DL),
- PredInfo(make_unique<PredicateInfo>(F, *DT, *AC)), SQ(DL, TLI, DT, AC) {
- }
+ PredInfo(make_unique<PredicateInfo>(F, *DT, *AC)),
+ SQ(DL, TLI, DT, AC, /*CtxI=*/nullptr, /*UseInstrInfo=*/false) {}
bool runGVN();
; CHECK: cond.end.i:
; CHECK-NEXT: [[COND_I:%.*]] = phi i32 [ [[DIV_I]], [[COND_TRUE_I]] ], [ 0, [[FOR_BODY_I]] ]
; CHECK-NEXT: [[INC_I]] = add nuw nsw i32 [[F_08_I]], 1
-; CHECK-NEXT: [[CALL5:%.*]] = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str4, i64 0, i64 0), i32 0)
+; CHECK-NEXT: [[TEST:%.*]] = udiv i32 [[MUL_I]], [[INC_I]]
+; CHECK-NEXT: [[CALL5:%.*]] = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str4, i64 0, i64 0), i32 [[TEST]])
; CHECK-NEXT: [[EXITCOND_I:%.*]] = icmp eq i32 [[INC_I]], 4
; CHECK-NEXT: br i1 [[EXITCOND_I]], label [[FN1_EXIT:%.*]], label [[FOR_BODY_I]]
; CHECK: fn1.exit: