class Loop;
class LoopInfo;
class MDNode;
-class OptimizationRemarkEmitter;
class StringRef;
class TargetLibraryInfo;
class Value;
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr,
- OptimizationRemarkEmitter *ORE = nullptr,
bool UseInstrInfo = true);
/// Determine which bits of V are known to be either zero or one and return
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr,
- OptimizationRemarkEmitter *ORE = nullptr,
bool UseInstrInfo = true);
/// Returns the known bits rather than passing by reference.
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr,
- OptimizationRemarkEmitter *ORE = nullptr,
bool UseInstrInfo = true);
/// Returns the known bits rather than passing by reference.
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr,
- OptimizationRemarkEmitter *ORE = nullptr,
bool UseInstrInfo = true);
/// Compute known bits from the range metadata.
const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS,
unsigned Depth, const DataLayout &DL, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
- OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true);
+ bool UseInstrInfo = true);
/// Return true if LHS and RHS have no common bits set.
bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
- OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true);
+ bool UseInstrInfo = true);
KnownFPClass computeKnownFPClass(
const Value *V, const DataLayout &DL,
FPClassTest InterestedClasses = fcAllFlags, unsigned Depth = 0,
const TargetLibraryInfo *TLI = nullptr, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr, const DominatorTree *DT = nullptr,
- OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true);
+ bool UseInstrInfo = true);
/// Return true if we can prove that the specified FP value is never equal to
/// -0.0.
AssumptionCache *AC = nullptr,
const Instruction *CtxI = nullptr,
const DominatorTree *DT = nullptr,
- OptimizationRemarkEmitter *ORE = nullptr,
bool UseInstrInfo = true) {
KnownFPClass Known = computeKnownFPClass(V, DL, fcInf, Depth, TLI, AC, CtxI,
- DT, ORE, UseInstrInfo);
+ DT, UseInstrInfo);
return Known.isKnownNeverInfinity();
}
const Value *V, const DataLayout &DL, const TargetLibraryInfo *TLI,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CtxI = nullptr, const DominatorTree *DT = nullptr,
- OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
+ bool UseInstrInfo = true) {
KnownFPClass Known = computeKnownFPClass(V, DL, fcInf | fcNan, Depth, TLI, AC,
- CtxI, DT, ORE, UseInstrInfo);
+ CtxI, DT, UseInstrInfo);
return Known.isKnownNeverNaN() && Known.isKnownNeverInfinity();
}
AssumptionCache *AC = nullptr,
const Instruction *CtxI = nullptr,
const DominatorTree *DT = nullptr,
- OptimizationRemarkEmitter *ORE = nullptr,
bool UseInstrInfo = true) {
KnownFPClass Known = computeKnownFPClass(V, DL, fcNan, Depth, TLI, AC, CtxI,
- DT, ORE, UseInstrInfo);
+ DT, UseInstrInfo);
return Known.isKnownNeverNaN();
}
const Instruction *CxtI;
const DominatorTree *DT;
- // Unlike the other analyses, this may be a nullptr because not all clients
- // provide it currently.
- OptimizationRemarkEmitter *ORE;
-
/// If true, it is safe to use metadata during simplification.
InstrInfoQuery IIQ;
Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT, bool UseInstrInfo,
- OptimizationRemarkEmitter *ORE = nullptr)
- : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
+ const DominatorTree *DT, bool UseInstrInfo)
+ : DL(DL), AC(AC), CxtI(CxtI), DT(DT), IIQ(UseInstrInfo) {}
};
} // end anonymous namespace
void llvm::computeKnownBits(const Value *V, KnownBits &Known,
const DataLayout &DL, unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT,
- OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
+ const DominatorTree *DT, bool UseInstrInfo) {
::computeKnownBits(V, Known, Depth,
- Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
+ Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
}
void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
KnownBits &Known, const DataLayout &DL,
unsigned Depth, AssumptionCache *AC,
const Instruction *CxtI, const DominatorTree *DT,
- OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
+ bool UseInstrInfo) {
::computeKnownBits(V, DemandedElts, Known, Depth,
- Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
+ Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
}
static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
unsigned Depth, AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT,
- OptimizationRemarkEmitter *ORE,
bool UseInstrInfo) {
return ::computeKnownBits(
- V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
+ V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
}
KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
const DataLayout &DL, unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI,
- const DominatorTree *DT,
- OptimizationRemarkEmitter *ORE,
- bool UseInstrInfo) {
+ const DominatorTree *DT, bool UseInstrInfo) {
return ::computeKnownBits(
V, DemandedElts, Depth,
- Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
+ Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
}
bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
KnownBits LHSKnown(IT->getBitWidth());
KnownBits RHSKnown(IT->getBitWidth());
- computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
- computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
+ computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, UseInstrInfo);
+ computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, UseInstrInfo);
return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown);
}
unsigned Depth, AssumptionCache *AC,
const Instruction *CxtI, const DominatorTree *DT,
bool UseInstrInfo) {
- KnownBits Known =
- computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
+ KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
return Known.isNonNegative();
}
bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT, bool UseInstrInfo) {
- KnownBits Known =
- computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
+ KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
return Known.isNegative();
}
bool UseInstrInfo) {
return ::isKnownNonEqual(V1, V2, 0,
Query(DL, AC, safeCxtI(V2, V1, CxtI), DT,
- UseInstrInfo, /*ORE=*/nullptr));
+ UseInstrInfo));
}
static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
computeKnownBitsFromCmp(V, Cmp, Known, Depth, Q);
}
- // If assumptions conflict with each other or previous known bits, then we
- // have a logical fallacy. It's possible that the assumption is not reachable,
- // so this isn't a real bug. On the other hand, the program may have undefined
- // behavior, or we might have a bug in the compiler. We can't assert/crash, so
- // clear out the known bits, try to warn the user, and hope for the best.
- if (Known.Zero.intersects(Known.One)) {
+ // Conflicting assumption: Undefined behavior will occur on this execution
+ // path.
+ if (Known.hasConflict())
Known.resetAll();
-
- if (Q.ORE)
- Q.ORE->emit([&]() {
- auto *CxtI = const_cast<Instruction *>(Q.CxtI);
- return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
- CxtI)
- << "Detected conflicting code assumptions. Program may "
- "have undefined behavior, or compiler may have "
- "internal error.";
- });
- }
}
/// Compute known bits from a shift operator, including those with a
KnownBits llvm::analyzeKnownBitsFromAndXorOr(
const Operator *I, const KnownBits &KnownLHS, const KnownBits &KnownRHS,
unsigned Depth, const DataLayout &DL, AssumptionCache *AC,
- const Instruction *CxtI, const DominatorTree *DT,
- OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
+ const Instruction *CxtI, const DominatorTree *DT, bool UseInstrInfo) {
auto *FVTy = dyn_cast<FixedVectorType>(I->getType());
APInt DemandedElts =
FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
return getKnownBitsFromAndXorOr(
I, DemandedElts, KnownLHS, KnownRHS, Depth,
- Query(DL, AC, safeCxtI(I, CxtI), DT, UseInstrInfo, ORE));
+ Query(DL, AC, safeCxtI(I, CxtI), DT, UseInstrInfo));
}
ConstantRange llvm::getVScaleRange(const Function *F, unsigned BitWidth) {
const Value *V, const APInt &DemandedElts, const DataLayout &DL,
FPClassTest InterestedClasses, unsigned Depth, const TargetLibraryInfo *TLI,
AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
- OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
+ bool UseInstrInfo) {
KnownFPClass KnownClasses;
::computeKnownFPClass(V, DemandedElts, InterestedClasses, KnownClasses, Depth,
- Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE),
+ Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo),
TLI);
return KnownClasses;
}
FPClassTest InterestedClasses, unsigned Depth,
const TargetLibraryInfo *TLI, AssumptionCache *AC,
const Instruction *CxtI, const DominatorTree *DT,
- OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
+ bool UseInstrInfo) {
KnownFPClass Known;
::computeKnownFPClass(V, Known, InterestedClasses, Depth,
- Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE),
+ Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo),
TLI);
return Known;
}
static ConstantRange computeConstantRangeIncludingKnownBits(
const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
- OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
- KnownBits Known = computeKnownBits(
- V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
+ bool UseInstrInfo = true) {
+ KnownBits Known = computeKnownBits(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
ConstantRange CR2 = computeConstantRange(V, ForSigned, UseInstrInfo);
ConstantRange::PreferredRangeType RangeType =
AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
bool UseInstrInfo) {
KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
- nullptr, UseInstrInfo);
+ UseInstrInfo);
KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
- nullptr, UseInstrInfo);
+ UseInstrInfo);
ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
// E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
// For simplicity we just check if at least one side is not negative.
KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
- nullptr, UseInstrInfo);
+ UseInstrInfo);
KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
- nullptr, UseInstrInfo);
+ UseInstrInfo);
if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
return OverflowResult::NeverOverflows;
}
AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
bool UseInstrInfo) {
ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
- LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
- nullptr, UseInstrInfo);
+ LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT, UseInstrInfo);
ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
- RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
- nullptr, UseInstrInfo);
+ RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT, UseInstrInfo);
return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
}