// FIXME: When we can selectively preserve passes, preserve the domtree.
AU.addRequired<ProfileSummaryInfoWrapperPass>();
AU.addRequired<TargetLibraryInfoWrapperPass>();
+ AU.addRequired<TargetPassConfig>();
AU.addRequired<TargetTransformInfoWrapperPass>();
AU.addRequired<LoopInfoWrapperPass>();
}
InsertedInsts.clear();
PromotedInsts.clear();
- if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
- TM = &TPC->getTM<TargetMachine>();
- SubtargetInfo = TM->getSubtargetImpl(F);
- TLI = SubtargetInfo->getTargetLowering();
- TRI = SubtargetInfo->getRegisterInfo();
- }
+ TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
+ SubtargetInfo = TM->getSubtargetImpl(F);
+ TLI = SubtargetInfo->getTargetLowering();
+ TRI = SubtargetInfo->getRegisterInfo();
TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
/// This optimization identifies DIV instructions that can be
/// profitably bypassed and carried out with a shorter, faster divide.
- if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI &&
- TLI->isSlowDivBypassed()) {
+ if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) {
const DenseMap<unsigned int, unsigned int> &BypassWidths =
- TLI->getBypassSlowDivWidths();
+ TLI->getBypassSlowDivWidths();
BasicBlock* BB = &*F.begin();
while (BB != nullptr) {
// bypassSlowDivision may create new BBs, but we don't want to reapply the
const TargetLowering *TLI,
const DataLayout *DL,
bool &ModifiedDT) {
- if (!TLI || !DL)
+ if (!DL)
return false;
// If a zero input is undefined, it doesn't make sense to despeculate that.
// Lower inline assembly if we can.
// If we found an inline asm expession, and if the target knows how to
// lower it to normal LLVM code, do so now.
- if (TLI && isa<InlineAsm>(CI->getCalledValue())) {
+ if (isa<InlineAsm>(CI->getCalledValue())) {
if (TLI->ExpandInlineAsm(CI)) {
// Avoid invalidating the iterator.
CurInstIterator = BB->begin();
// Align the pointer arguments to this call if the target thinks it's a good
// idea
unsigned MinSize, PrefAlign;
- if (TLI && TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
+ if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
for (auto &Arg : CI->arg_operands()) {
// We want to align both objects whose address is used directly and
// objects whose address is used in casts and GEPs, though it only makes
}
}
- if (TLI) {
- SmallVector<Value*, 2> PtrOps;
- Type *AccessTy;
- if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
- while (!PtrOps.empty()) {
- Value *PtrVal = PtrOps.pop_back_val();
- unsigned AS = PtrVal->getType()->getPointerAddressSpace();
- if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
- return true;
- }
- }
+ SmallVector<Value *, 2> PtrOps;
+ Type *AccessTy;
+ if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
+ while (!PtrOps.empty()) {
+ Value *PtrVal = PtrOps.pop_back_val();
+ unsigned AS = PtrVal->getType()->getPointerAddressSpace();
+ if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
+ return true;
+ }
}
// From here on out we're working with named functions.
/// ret i32 %tmp2
/// @endcode
bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB, bool &ModifiedDT) {
- if (!TLI)
- return false;
-
ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
if (!RetI)
return false;
if (SunkAddr->getType() != Addr->getType())
SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
} else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() &&
- TM && SubtargetInfo->addrSinkUsingGEPs())) {
+ SubtargetInfo->addrSinkUsingGEPs())) {
// By default, we use the GEP-based method when AA is used later. This
// prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
// this check inside the for loop is to catch the case where an extension
// is directly fed by a load because in such case the extension can be moved
// up without any promotion on its operands.
- if (!TLI || !TLI->enableExtLdPromotion() || DisableExtLdPromotion)
+ if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion)
return false;
// Get the action to perform the promotion.
/// \p Inst[in/out] the extension may be modified during the process if some
/// promotions apply.
bool CodeGenPrepare::optimizeExt(Instruction *&Inst) {
- // ExtLoad formation and address type promotion infrastructure requires TLI to
- // be effective.
- if (!TLI)
- return false;
-
bool AllowPromotionWithoutCommonHeader = false;
/// See if it is an interesting sext operations for the address type
/// promotion before trying to promote it, e.g., the ones with the right
return false;
// Only do this xform if truncating is free.
- if (TLI && !TLI->isTruncateFree(I->getType(), Src->getType()))
+ if (!TLI->isTruncateFree(I->getType(), Src->getType()))
return false;
// Only safe to perform the optimization if the source is also defined in
/// turn it into a branch.
bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
// If branch conversion isn't desirable, exit early.
- if (DisableSelectToBranch ||
- OptSize || llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get()) ||
- !TLI)
+ if (DisableSelectToBranch || OptSize ||
+ llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get()))
return false;
// Find all consecutive select instructions that share the same condition.
BasicBlock *DefBB = SVI->getParent();
// Only do this xform if variable vector shifts are particularly expensive.
- if (!TLI || !TLI->isVectorShiftByScalarCheap(SVI->getType()))
+ if (!TLI->isVectorShiftByScalarCheap(SVI->getType()))
return false;
// We only expect better codegen by sinking a shuffle if we can recognise a
// If the operands of I can be folded into a target instruction together with
// I, duplicate and sink them.
SmallVector<Use *, 4> OpsToSink;
- if (!TLI || !TLI->shouldSinkOperands(I, OpsToSink))
+ if (!TLI->shouldSinkOperands(I, OpsToSink))
return false;
// OpsToSink can contain multiple uses in a use chain (e.g.
}
bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) {
- if (!TLI || !DL)
+ if (!DL)
return false;
Value *Cond = SI->getCondition();
/// has this feature and this is profitable.
bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) {
unsigned CombineCost = std::numeric_limits<unsigned>::max();
- if (DisableStoreExtract || !TLI ||
+ if (DisableStoreExtract ||
(!StressStoreExtract &&
!TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(),
Inst->getOperand(1), CombineCost)))
if (isa<Constant>(CI->getOperand(0)))
return false;
- if (TLI && OptimizeNoopCopyExpression(CI, *TLI, *DL))
+ if (OptimizeNoopCopyExpression(CI, *TLI, *DL))
return true;
if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
/// Sink a zext or sext into its user blocks if the target type doesn't
/// fit in one register
- if (TLI &&
- TLI->getTypeAction(CI->getContext(),
+ if (TLI->getTypeAction(CI->getContext(),
TLI->getValueType(*DL, CI->getType())) ==
- TargetLowering::TypeExpandInteger) {
+ TargetLowering::TypeExpandInteger) {
return SinkCast(CI);
} else {
bool MadeChange = optimizeExt(I);
}
if (auto *Cmp = dyn_cast<CmpInst>(I))
- if (TLI && optimizeCmp(Cmp, ModifiedDT))
+ if (optimizeCmp(Cmp, ModifiedDT))
return true;
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
LI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
- if (TLI) {
- bool Modified = optimizeLoadExt(LI);
- unsigned AS = LI->getPointerAddressSpace();
- Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
- return Modified;
- }
- return false;
+ bool Modified = optimizeLoadExt(LI);
+ unsigned AS = LI->getPointerAddressSpace();
+ Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
+ return Modified;
}
if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
- if (TLI && splitMergedValStore(*SI, *DL, *TLI))
+ if (splitMergedValStore(*SI, *DL, *TLI))
return true;
SI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
- if (TLI) {
- unsigned AS = SI->getPointerAddressSpace();
- return optimizeMemoryInst(I, SI->getOperand(1),
- SI->getOperand(0)->getType(), AS);
- }
- return false;
+ unsigned AS = SI->getPointerAddressSpace();
+ return optimizeMemoryInst(I, SI->getOperand(1),
+ SI->getOperand(0)->getType(), AS);
}
if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
- if (BinOp && (BinOp->getOpcode() == Instruction::And) &&
- EnableAndCmpSinking && TLI)
+ if (BinOp && (BinOp->getOpcode() == Instruction::And) && EnableAndCmpSinking)
return sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts);
// TODO: Move this into the switch on opcode - it handles shifts already.
if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
BinOp->getOpcode() == Instruction::LShr)) {
ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
- if (TLI && CI && TLI->hasExtractBitsInsn())
+ if (CI && TLI->hasExtractBitsInsn())
if (OptimizeExtractBits(BinOp, CI, *TLI, *DL))
return true;
}
}
bool MadeBitReverse = true;
- while (TLI && MadeBitReverse) {
+ while (MadeBitReverse) {
MadeBitReverse = false;
for (auto &I : reverse(BB)) {
if (makeBitReverse(I, *DL, *TLI)) {
/// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
///
bool CodeGenPrepare::splitBranchCondition(Function &F, bool &ModifiedDT) {
- if (!TM || !TM->Options.EnableFastISel || !TLI || TLI->isJumpExpensive())
+ if (!TM->Options.EnableFastISel || TLI->isJumpExpensive())
return false;
bool MadeChange = false;