return replaceInstUsesWith(I, V);
if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
- I.hasNoUnsignedWrap(), DL, TLI, DT, AC))
+ I.hasNoUnsignedWrap(), DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
// (A*B)+(A*C) -> A*(B+C) etc
return replaceInstUsesWith(I, V);
// A+B --> A|B iff A and B have no bits set in common.
- if (haveNoCommonBitsSet(LHS, RHS, DL, AC, &I, DT))
+ if (haveNoCommonBitsSet(LHS, RHS, DL, &AC, &I, &DT))
return BinaryOperator::CreateOr(LHS, RHS);
if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
return replaceInstUsesWith(I, V);
if (Value *V =
- SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), DL, TLI, DT, AC))
+ SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
if (isa<Constant>(RHS)) {
return replaceInstUsesWith(I, V);
if (Value *V = SimplifySubInst(Op0, Op1, I.hasNoSignedWrap(),
- I.hasNoUnsignedWrap(), DL, TLI, DT, AC))
+ I.hasNoUnsignedWrap(), DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
// (A*B)-(A*C) -> A*(B-C) etc
return replaceInstUsesWith(I, V);
if (Value *V =
- SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), DL, TLI, DT, AC))
+ SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
// fsub nsz 0, X ==> fsub nsz -0.0, X
if (Value *V = SimplifyVectorOp(I))
return replaceInstUsesWith(I, V);
- if (Value *V = SimplifyAndInst(Op0, Op1, DL, TLI, DT, AC))
+ if (Value *V = SimplifyAndInst(Op0, Op1, DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
// (A|B)&(A|C) -> A|(B&C) etc
Value *Mask = nullptr;
Value *Masked = nullptr;
if (LAnd->getOperand(0) == RAnd->getOperand(0) &&
- isKnownToBeAPowerOfTwo(LAnd->getOperand(1), DL, false, 0, AC, CxtI,
- DT) &&
- isKnownToBeAPowerOfTwo(RAnd->getOperand(1), DL, false, 0, AC, CxtI,
- DT)) {
+ isKnownToBeAPowerOfTwo(LAnd->getOperand(1), DL, false, 0, &AC, CxtI,
+ &DT) &&
+ isKnownToBeAPowerOfTwo(RAnd->getOperand(1), DL, false, 0, &AC, CxtI,
+ &DT)) {
Mask = Builder->CreateOr(LAnd->getOperand(1), RAnd->getOperand(1));
Masked = Builder->CreateAnd(LAnd->getOperand(0), Mask);
} else if (LAnd->getOperand(1) == RAnd->getOperand(1) &&
- isKnownToBeAPowerOfTwo(LAnd->getOperand(0), DL, false, 0, AC,
- CxtI, DT) &&
- isKnownToBeAPowerOfTwo(RAnd->getOperand(0), DL, false, 0, AC,
- CxtI, DT)) {
+ isKnownToBeAPowerOfTwo(LAnd->getOperand(0), DL, false, 0, &AC,
+ CxtI, &DT) &&
+ isKnownToBeAPowerOfTwo(RAnd->getOperand(0), DL, false, 0, &AC,
+ CxtI, &DT)) {
Mask = Builder->CreateOr(LAnd->getOperand(0), RAnd->getOperand(0));
Masked = Builder->CreateAnd(LAnd->getOperand(1), Mask);
}
if (Value *V = SimplifyVectorOp(I))
return replaceInstUsesWith(I, V);
- if (Value *V = SimplifyOrInst(Op0, Op1, DL, TLI, DT, AC))
+ if (Value *V = SimplifyOrInst(Op0, Op1, DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
// (A&B)|(A&C) -> A&(B|C) etc
if (Value *V = SimplifyVectorOp(I))
return replaceInstUsesWith(I, V);
- if (Value *V = SimplifyXorInst(Op0, Op1, DL, TLI, DT, AC))
+ if (Value *V = SimplifyXorInst(Op0, Op1, DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
// (A&B)^(A&C) -> A&(B^C) etc
}
Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
- unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, MI, AC, DT);
- unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, MI, AC, DT);
+ unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, MI, &AC, &DT);
+ unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, MI, &AC, &DT);
unsigned MinAlign = std::min(DstAlign, SrcAlign);
unsigned CopyAlign = MI->getAlignment();
}
Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
- unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, AC, DT);
+ unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
if (MI->getAlignment() < Alignment) {
MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
Alignment, false));
Instruction *InstCombiner::visitCallInst(CallInst &CI) {
auto Args = CI.arg_operands();
if (Value *V = SimplifyCall(CI.getCalledValue(), Args.begin(), Args.end(), DL,
- TLI, DT, AC))
+ &TLI, &DT, &AC))
return replaceInstUsesWith(CI, V);
- if (isFreeCall(&CI, TLI))
+ if (isFreeCall(&CI, &TLI))
return visitFree(CI);
// If the caller function is nounwind, mark the call as nounwind, even if the
default: break;
case Intrinsic::objectsize: {
uint64_t Size;
- if (getObjectSize(II->getArgOperand(0), Size, DL, TLI)) {
+ if (getObjectSize(II->getArgOperand(0), Size, DL, &TLI)) {
APInt APSize(II->getType()->getIntegerBitWidth(), Size);
// Equality check to be sure that `Size` can fit in a value of type
// `II->getType()`
case Intrinsic::ppc_altivec_lvx:
case Intrinsic::ppc_altivec_lvxl:
// Turn PPC lvx -> load if the pointer is known aligned.
- if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
- 16) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
+ &DT) >= 16) {
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr);
case Intrinsic::ppc_altivec_stvx:
case Intrinsic::ppc_altivec_stvxl:
// Turn stvx -> store if the pointer is known aligned.
- if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
- 16) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
+ &DT) >= 16) {
Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
}
case Intrinsic::ppc_qpx_qvlfs:
// Turn PPC QPX qvlfs -> load if the pointer is known aligned.
- if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, AC, DT) >=
- 16) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
+ &DT) >= 16) {
Type *VTy = VectorType::get(Builder->getFloatTy(),
II->getType()->getVectorNumElements());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
break;
case Intrinsic::ppc_qpx_qvlfd:
// Turn PPC QPX qvlfd -> load if the pointer is known aligned.
- if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, AC, DT) >=
- 32) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC,
+ &DT) >= 32) {
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr);
break;
case Intrinsic::ppc_qpx_qvstfs:
// Turn PPC QPX qvstfs -> store if the pointer is known aligned.
- if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, AC, DT) >=
- 16) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
+ &DT) >= 16) {
Type *VTy = VectorType::get(Builder->getFloatTy(),
II->getArgOperand(0)->getType()->getVectorNumElements());
Value *TOp = Builder->CreateFPTrunc(II->getArgOperand(0), VTy);
break;
case Intrinsic::ppc_qpx_qvstfd:
// Turn PPC QPX qvstfd -> store if the pointer is known aligned.
- if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, AC, DT) >=
- 32) {
+ if (getOrEnforceKnownAlignment(II->getArgOperand(1), 32, DL, II, &AC,
+ &DT) >= 32) {
Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
case Intrinsic::arm_neon_vst2lane:
case Intrinsic::arm_neon_vst3lane:
case Intrinsic::arm_neon_vst4lane: {
- unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL, II, AC, DT);
+ unsigned MemAlign =
+ getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
unsigned AlignArg = II->getNumArgOperands() - 1;
ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
RHS->getType()->isPointerTy() &&
cast<Constant>(RHS)->isNullValue()) {
LoadInst* LI = cast<LoadInst>(LHS);
- if (isValidAssumeForContext(II, LI, DT)) {
+ if (isValidAssumeForContext(II, LI, &DT)) {
MDNode *MD = MDNode::get(II->getContext(), None);
LI->setMetadata(LLVMContext::MD_nonnull, MD);
return eraseInstFromFunction(*II);
return replaceInstUsesWith(*II, ConstantPointerNull::get(PT));
// isKnownNonNull -> nonnull attribute
- if (isKnownNonNullAt(DerivedPtr, II, DT))
+ if (isKnownNonNullAt(DerivedPtr, II, &DT))
II->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
}
auto InstCombineRAUW = [this](Instruction *From, Value *With) {
replaceInstUsesWith(*From, With);
};
- LibCallSimplifier Simplifier(DL, TLI, InstCombineRAUW);
+ LibCallSimplifier Simplifier(DL, &TLI, InstCombineRAUW);
if (Value *With = Simplifier.optimizeCall(CI)) {
++NumSimplified;
return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
/// Improvements for call and invoke instructions.
Instruction *InstCombiner::visitCallSite(CallSite CS) {
- if (isAllocLikeFn(CS.getInstruction(), TLI))
+ if (isAllocLikeFn(CS.getInstruction(), &TLI))
return visitAllocSite(*CS.getInstruction());
bool Changed = false;
for (Value *V : CS.args()) {
if (V->getType()->isPointerTy() &&
!CS.paramHasAttr(ArgNo + 1, Attribute::NonNull) &&
- isKnownNonNullAt(V, CS.getInstruction(), DT))
+ isKnownNonNullAt(V, CS.getInstruction(), &DT))
Indices.push_back(ArgNo + 1);
ArgNo++;
}
if (Constant *C = dyn_cast<Constant>(V)) {
C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
// If we got a constantexpr back, try to simplify it with DL info.
- if (Constant *FoldedC = ConstantFoldConstant(C, DL, TLI))
+ if (Constant *FoldedC = ConstantFoldConstant(C, DL, &TLI))
C = FoldedC;
return C;
}
// Find out if the comparison would be true or false for the i'th element.
Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
- CompareRHS, DL, TLI);
+ CompareRHS, DL, &TLI);
// If the result is undef for this element, ignore it.
if (isa<UndefValue>(C)) {
// Extend range state machines to cover this element in case there is an
// Protect from self-referencing blocks
if (DI->getParent() == DB)
return false;
- // DominatorTree available?
- if (!DT)
- return false;
for (const User *U : DI->users()) {
auto *Usr = cast<Instruction>(U);
- if (Usr != UI && !DT->dominates(DB, Usr->getParent()))
+ if (Usr != UI && !DT.dominates(DB, Usr->getParent()))
return false;
}
return true;
}
if (Value *V =
- SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL, TLI, DT, AC, &I))
+ SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL, &TLI, &DT, &AC, &I))
return replaceInstUsesWith(I, V);
// comparing -val or val with non-zero is the same as just comparing val
// if A is a power of 2.
if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
match(Op1, m_Zero()) &&
- isKnownToBeAPowerOfTwo(A, DL, false, 0, AC, &I, DT) && I.isEquality())
+ isKnownToBeAPowerOfTwo(A, DL, false, 0, &AC, &I, &DT) && I.isEquality())
return new ICmpInst(I.getInversePredicate(),
Builder->CreateAnd(A, B),
Op1);
Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1,
- I.getFastMathFlags(), DL, TLI, DT, AC, &I))
+ I.getFastMathFlags(), DL, &TLI, &DT, &AC, &I))
return replaceInstUsesWith(I, V);
// Simplify 'fcmp pred X, X'
break;
CallInst *CI = cast<CallInst>(LHSI);
- Intrinsic::ID IID = getIntrinsicForCallSite(CI, TLI);
+ Intrinsic::ID IID = getIntrinsicForCallSite(CI, &TLI);
if (IID != Intrinsic::fabs)
break;
AliasAnalysis *AA;
// Required analyses.
- // FIXME: These can never be null and should be references.
- AssumptionCache *AC;
- TargetLibraryInfo *TLI;
- DominatorTree *DT;
+ AssumptionCache &AC;
+ TargetLibraryInfo &TLI;
+ DominatorTree &DT;
const DataLayout &DL;
// Optional analyses. When non-null, these can both be used to do better
public:
InstCombiner(InstCombineWorklist &Worklist, BuilderTy *Builder,
bool MinimizeSize, bool ExpensiveCombines, AliasAnalysis *AA,
- AssumptionCache *AC, TargetLibraryInfo *TLI,
- DominatorTree *DT, const DataLayout &DL, LoopInfo *LI)
+ AssumptionCache &AC, TargetLibraryInfo &TLI,
+ DominatorTree &DT, const DataLayout &DL, LoopInfo *LI)
: Worklist(Worklist), Builder(Builder), MinimizeSize(MinimizeSize),
ExpensiveCombines(ExpensiveCombines), AA(AA), AC(AC), TLI(TLI), DT(DT),
DL(DL), LI(LI), MadeIRChange(false) {}
/// \returns true if the IR is changed.
bool run();
- AssumptionCache *getAssumptionCache() const { return AC; }
+ AssumptionCache &getAssumptionCache() const { return AC; }
const DataLayout &getDataLayout() const { return DL; }
- DominatorTree *getDominatorTree() const { return DT; }
+ DominatorTree &getDominatorTree() const { return DT; }
LoopInfo *getLoopInfo() const { return LI; }
- TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }
+ TargetLibraryInfo &getTargetLibraryInfo() const { return TLI; }
// Visitation implementation - Implement instruction combining for different
// instruction types. The semantics are as follows:
void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
unsigned Depth, Instruction *CxtI) const {
- return llvm::computeKnownBits(V, KnownZero, KnownOne, DL, Depth, AC, CxtI,
- DT);
+ return llvm::computeKnownBits(V, KnownZero, KnownOne, DL, Depth, &AC, CxtI,
+ &DT);
}
bool MaskedValueIsZero(Value *V, const APInt &Mask, unsigned Depth = 0,
Instruction *CxtI = nullptr) const {
- return llvm::MaskedValueIsZero(V, Mask, DL, Depth, AC, CxtI, DT);
+ return llvm::MaskedValueIsZero(V, Mask, DL, Depth, &AC, CxtI, &DT);
}
unsigned ComputeNumSignBits(Value *Op, unsigned Depth = 0,
Instruction *CxtI = nullptr) const {
- return llvm::ComputeNumSignBits(Op, DL, Depth, AC, CxtI, DT);
+ return llvm::ComputeNumSignBits(Op, DL, Depth, &AC, CxtI, &DT);
}
void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
unsigned Depth = 0, Instruction *CxtI = nullptr) const {
- return llvm::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth, AC, CxtI,
- DT);
+ return llvm::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth, &AC, CxtI,
+ &DT);
}
OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
const Instruction *CxtI) {
- return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, AC, CxtI, DT);
+ return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, &AC, CxtI, &DT);
}
OverflowResult computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
const Instruction *CxtI) {
- return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, AC, CxtI, DT);
+ return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, &AC, CxtI, &DT);
}
private:
SmallVector<Instruction *, 4> ToDelete;
if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(&AI, ToDelete)) {
unsigned SourceAlign = getOrEnforceKnownAlignment(
- Copy->getSource(), AI.getAlignment(), DL, &AI, AC, DT);
+ Copy->getSource(), AI.getAlignment(), DL, &AI, &AC, &DT);
if (AI.getAlignment() <= SourceAlign) {
DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
// Attempt to improve the alignment.
unsigned KnownAlign = getOrEnforceKnownAlignment(
- Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, AC, DT);
+ Op, DL.getPrefTypeAlignment(LI.getType()), DL, &LI, &AC, &DT);
unsigned LoadAlign = LI.getAlignment();
unsigned EffectiveLoadAlign =
LoadAlign != 0 ? LoadAlign : DL.getABITypeAlignment(LI.getType());
// Attempt to improve the alignment.
unsigned KnownAlign = getOrEnforceKnownAlignment(
- Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, AC, DT);
+ Ptr, DL.getPrefTypeAlignment(Val->getType()), DL, &SI, &AC, &DT);
unsigned StoreAlign = SI.getAlignment();
unsigned EffectiveStoreAlign =
StoreAlign != 0 ? StoreAlign : DL.getABITypeAlignment(Val->getType());
BinaryOperator *I = dyn_cast<BinaryOperator>(V);
if (I && I->isLogicalShift() &&
isKnownToBeAPowerOfTwo(I->getOperand(0), IC.getDataLayout(), false, 0,
- IC.getAssumptionCache(), &CxtI,
- IC.getDominatorTree())) {
+ &IC.getAssumptionCache(), &CxtI,
+ &IC.getDominatorTree())) {
// We know that this is an exact/nuw shift and that the input is a
// non-zero context as well.
if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) {
if (Value *V = SimplifyVectorOp(I))
return replaceInstUsesWith(I, V);
- if (Value *V = SimplifyMulInst(Op0, Op1, DL, TLI, DT, AC))
+ if (Value *V = SimplifyMulInst(Op0, Op1, DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
if (Value *V = SimplifyUsingDistributiveLaws(I))
std::swap(Op0, Op1);
if (Value *V =
- SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), DL, TLI, DT, AC))
+ SimplifyFMulInst(Op0, Op1, I.getFastMathFlags(), DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
bool AllowReassociate = I.hasUnsafeAlgebra();
if (Value *V = SimplifyVectorOp(I))
return replaceInstUsesWith(I, V);
- if (Value *V = SimplifyUDivInst(Op0, Op1, DL, TLI, DT, AC))
+ if (Value *V = SimplifyUDivInst(Op0, Op1, DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
// Handle the integer div common cases
if (Value *V = SimplifyVectorOp(I))
return replaceInstUsesWith(I, V);
- if (Value *V = SimplifySDivInst(Op0, Op1, DL, TLI, DT, AC))
+ if (Value *V = SimplifySDivInst(Op0, Op1, DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
// Handle the integer div common cases
return BO;
}
- if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, AC, &I, DT)) {
+ if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, &AC, &I, &DT)) {
// X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
// Safe because the only negative value (1 << Y) can take on is
// INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
return replaceInstUsesWith(I, V);
if (Value *V = SimplifyFDivInst(Op0, Op1, I.getFastMathFlags(),
- DL, TLI, DT, AC))
+ DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
if (isa<Constant>(Op0))
if (Value *V = SimplifyVectorOp(I))
return replaceInstUsesWith(I, V);
- if (Value *V = SimplifyURemInst(Op0, Op1, DL, TLI, DT, AC))
+ if (Value *V = SimplifyURemInst(Op0, Op1, DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
if (Instruction *common = commonIRemTransforms(I))
I.getType());
// X urem Y -> X and Y-1, where Y is a power of 2,
- if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, AC, &I, DT)) {
+ if (isKnownToBeAPowerOfTwo(Op1, DL, /*OrZero*/ true, 0, &AC, &I, &DT)) {
Constant *N1 = Constant::getAllOnesValue(I.getType());
Value *Add = Builder->CreateAdd(Op1, N1);
return BinaryOperator::CreateAnd(Op0, Add);
if (Value *V = SimplifyVectorOp(I))
return replaceInstUsesWith(I, V);
- if (Value *V = SimplifySRemInst(Op0, Op1, DL, TLI, DT, AC))
+ if (Value *V = SimplifySRemInst(Op0, Op1, DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
// Handle the integer rem common cases
return replaceInstUsesWith(I, V);
if (Value *V = SimplifyFRemInst(Op0, Op1, I.getFastMathFlags(),
- DL, TLI, DT, AC))
+ DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
// Handle cases involving: rem X, (select Cond, Y, Z)
// PHINode simplification
//
Instruction *InstCombiner::visitPHINode(PHINode &PN) {
- if (Value *V = SimplifyInstruction(&PN, DL, TLI, DT, AC))
+ if (Value *V = SimplifyInstruction(&PN, DL, &TLI, &DT, &AC))
return replaceInstUsesWith(PN, V);
if (Instruction *Result = FoldPHIArgZextsIntoPHI(PN))
for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
Instruction *CtxI = PN.getIncomingBlock(i)->getTerminator();
Value *VA = PN.getIncomingValue(i);
- if (isKnownNonZero(VA, DL, 0, AC, CtxI, DT)) {
+ if (isKnownNonZero(VA, DL, 0, &AC, CtxI, &DT)) {
if (!NonZeroConst)
NonZeroConst = GetAnyNonZeroConstInt(PN);
PN.setIncomingValue(i, NonZeroConst);
Type *SelType = SI.getType();
if (Value *V =
- SimplifySelectInst(CondVal, TrueVal, FalseVal, DL, TLI, DT, AC))
+ SimplifySelectInst(CondVal, TrueVal, FalseVal, DL, &TLI, &DT, &AC))
return replaceInstUsesWith(SI, V);
if (SelType->getScalarType()->isIntegerTy(1) &&
// If we got a constantexpr back, try to simplify it with TD info.
if (auto *C = dyn_cast<Constant>(V))
if (auto *FoldedC =
- ConstantFoldConstant(C, DL, IC.getTargetLibraryInfo()))
+ ConstantFoldConstant(C, DL, &IC.getTargetLibraryInfo()))
V = FoldedC;
return V;
}
if (Value *V =
SimplifyShlInst(I.getOperand(0), I.getOperand(1), I.hasNoSignedWrap(),
- I.hasNoUnsignedWrap(), DL, TLI, DT, AC))
+ I.hasNoUnsignedWrap(), DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
if (Instruction *V = commonShiftTransforms(I))
return replaceInstUsesWith(I, V);
if (Value *V = SimplifyLShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
- DL, TLI, DT, AC))
+ DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
if (Instruction *R = commonShiftTransforms(I))
return replaceInstUsesWith(I, V);
if (Value *V = SimplifyAShrInst(I.getOperand(0), I.getOperand(1), I.isExact(),
- DL, TLI, DT, AC))
+ DL, &TLI, &DT, &AC))
return replaceInstUsesWith(I, V);
if (Instruction *R = commonShiftTransforms(I))
Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
if (Value *V = SimplifyExtractElementInst(
- EI.getVectorOperand(), EI.getIndexOperand(), DL, TLI, DT, AC))
+ EI.getVectorOperand(), EI.getIndexOperand(), DL, &TLI, &DT, &AC))
return replaceInstUsesWith(EI, V);
// If vector val is constant with all elements the same, replace EI with
if (SI0->getCondition() == SI1->getCondition()) {
Value *SI = nullptr;
if (Value *V = SimplifyBinOp(TopLevelOpcode, SI0->getFalseValue(),
- SI1->getFalseValue(), DL, TLI, DT, AC))
+ SI1->getFalseValue(), DL, &TLI, &DT, &AC))
SI = Builder->CreateSelect(SI0->getCondition(),
Builder->CreateBinOp(TopLevelOpcode,
SI0->getTrueValue(),
SI1->getTrueValue()),
V);
if (Value *V = SimplifyBinOp(TopLevelOpcode, SI0->getTrueValue(),
- SI1->getTrueValue(), DL, TLI, DT, AC))
+ SI1->getTrueValue(), DL, &TLI, &DT, &AC))
SI = Builder->CreateSelect(
SI0->getCondition(), V,
Builder->CreateBinOp(TopLevelOpcode, SI0->getFalseValue(),
// If the incoming non-constant value is in I's block, we will remove one
// instruction, but insert another equivalent one, leading to infinite
// instcombine.
- if (isPotentiallyReachable(I.getParent(), NonConstBB, DT, LI))
+ if (isPotentiallyReachable(I.getParent(), NonConstBB, &DT, LI))
return nullptr;
}
Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end());
- if (Value *V = SimplifyGEPInst(GEP.getSourceElementType(), Ops, DL, TLI, DT, AC))
+ if (Value *V =
+ SimplifyGEPInst(GEP.getSourceElementType(), Ops, DL, &TLI, &DT, &AC))
return replaceInstUsesWith(GEP, V);
Value *PtrOp = GEP.getOperand(0);
if (!Offset) {
// If the bitcast is of an allocation, and the allocation will be
// converted to match the type of the cast, don't touch this.
- if (isa<AllocaInst>(Operand) || isAllocationFn(Operand, TLI)) {
+ if (isa<AllocaInst>(Operand) || isAllocationFn(Operand, &TLI)) {
// See if the bitcast simplifies, if so, don't nuke this GEP yet.
if (Instruction *I = visitBitCast(*BCI)) {
if (I != BCI) {
// to null and free calls, delete the calls and replace the comparisons with
// true or false as appropriate.
SmallVector<WeakVH, 64> Users;
- if (isAllocSiteRemovable(&MI, Users, TLI)) {
+ if (isAllocSiteRemovable(&MI, Users, &TLI)) {
for (unsigned i = 0, e = Users.size(); i != e; ++i) {
// Lowering all @llvm.objectsize calls first because they may
// use a bitcast/GEP of the alloca we are removing.
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
if (II->getIntrinsicID() == Intrinsic::objectsize) {
uint64_t Size;
- if (!getObjectSize(II->getArgOperand(0), Size, DL, TLI)) {
+ if (!getObjectSize(II->getArgOperand(0), Size, DL, &TLI)) {
ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
Size = CI->isZero() ? -1ULL : 0;
}
return replaceInstUsesWith(EV, Agg);
if (Value *V =
- SimplifyExtractValueInst(Agg, EV.getIndices(), DL, TLI, DT, AC))
+ SimplifyExtractValueInst(Agg, EV.getIndices(), DL, &TLI, &DT, &AC))
return replaceInstUsesWith(EV, V);
if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) {
if (I == nullptr) continue; // skip null values.
// Check to see if we can DCE the instruction.
- if (isInstructionTriviallyDead(I, TLI)) {
+ if (isInstructionTriviallyDead(I, &TLI)) {
DEBUG(dbgs() << "IC: DCE: " << *I << '\n');
eraseInstFromFunction(*I);
++NumDeadInst;
// Instruction isn't dead, see if we can constant propagate it.
if (!I->use_empty() &&
(I->getNumOperands() == 0 || isa<Constant>(I->getOperand(0)))) {
- if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
+ if (Constant *C = ConstantFoldInstruction(I, DL, &TLI)) {
DEBUG(dbgs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
// Add operands to the worklist.
replaceInstUsesWith(*I, C);
++NumConstProp;
- if (isInstructionTriviallyDead(I, TLI))
+ if (isInstructionTriviallyDead(I, &TLI))
eraseInstFromFunction(*I);
MadeIRChange = true;
continue;
// Add operands to the worklist.
replaceInstUsesWith(*I, C);
++NumConstProp;
- if (isInstructionTriviallyDead(I, TLI))
+ if (isInstructionTriviallyDead(I, &TLI))
eraseInstFromFunction(*I);
MadeIRChange = true;
continue;
// If the instruction was modified, it's possible that it is now dead.
// if so, remove it.
- if (isInstructionTriviallyDead(I, TLI)) {
+ if (isInstructionTriviallyDead(I, &TLI)) {
eraseInstFromFunction(*I);
} else {
Worklist.Add(I);
bool Changed = prepareICWorklistFromFunction(F, DL, &TLI, Worklist);
InstCombiner IC(Worklist, &Builder, F.optForMinSize(), ExpensiveCombines,
- AA, &AC, &TLI, &DT, DL, LI);
+ AA, AC, TLI, DT, DL, LI);
Changed |= IC.run();
if (!Changed)