From 85bd3978ae4e1c16fa8fa4bc27d0393d1d2265ea Mon Sep 17 00:00:00 2001 From: Evandro Menezes Date: Thu, 4 Apr 2019 22:40:06 +0000 Subject: [PATCH] [IR] Refactor attribute methods in Function class (NFC) Rename the functions that query the optimization kind attributes. Differential revision: https://reviews.llvm.org/D60287 llvm-svn: 357731 --- clang/lib/CodeGen/CGCall.cpp | 2 +- llvm/include/llvm/CodeGen/TargetLowering.h | 2 +- llvm/include/llvm/IR/Function.h | 8 ++++---- llvm/lib/Analysis/GlobalsModRef.cpp | 4 ++-- llvm/lib/Analysis/InlineCost.cpp | 12 ++++++------ llvm/lib/Analysis/LoopPass.cpp | 2 +- llvm/lib/Analysis/RegionPass.cpp | 2 +- llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp | 2 +- llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp | 2 +- llvm/lib/CodeGen/AtomicExpandPass.cpp | 4 ++-- llvm/lib/CodeGen/BranchFolding.cpp | 4 ++-- llvm/lib/CodeGen/CodeGenPrepare.cpp | 4 ++-- llvm/lib/CodeGen/ExpandMemCmp.cpp | 4 ++-- llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp | 2 +- llvm/lib/CodeGen/GlobalMerge.cpp | 2 +- llvm/lib/CodeGen/MachineBlockPlacement.cpp | 8 ++++---- llvm/lib/CodeGen/MachineCombiner.cpp | 2 +- llvm/lib/CodeGen/MachineFunction.cpp | 2 +- llvm/lib/CodeGen/SafeStack.cpp | 2 +- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 8 ++++---- llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp | 2 +- llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 6 +++--- .../CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 6 +++--- llvm/lib/CodeGen/TailDuplicator.cpp | 2 +- llvm/lib/IR/Pass.cpp | 4 ++-- .../Target/AArch64/AArch64CompressJumpTables.cpp | 2 +- .../Target/AArch64/AArch64ConditionalCompares.cpp | 2 +- llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp | 2 +- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 2 +- llvm/lib/Target/AArch64/AArch64ISelLowering.h | 2 +- llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 2 +- llvm/lib/Target/AArch64/AArch64InstrInfo.td | 6 +++--- llvm/lib/Target/ARM/ARMAsmPrinter.cpp | 6 +++--- llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp | 6 +++--- llvm/lib/Target/ARM/ARMISelLowering.cpp | 8 ++++---- llvm/lib/Target/ARM/ARMInstrInfo.td | 2 +- llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp | 2 +- llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp | 2 +- llvm/lib/Target/ARM/ARMSubtarget.h | 2 +- llvm/lib/Target/ARM/ARMTargetMachine.cpp | 4 ++-- llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp | 2 +- llvm/lib/Target/ARM/ARMTargetTransformInfo.h | 2 +- llvm/lib/Target/ARM/Thumb2SizeReduction.cpp | 4 ++-- llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp | 6 +++--- llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 2 +- llvm/lib/Target/X86/X86FixupBWInsts.cpp | 2 +- llvm/lib/Target/X86/X86FixupLEAs.cpp | 2 +- llvm/lib/Target/X86/X86FrameLowering.cpp | 2 +- llvm/lib/Target/X86/X86ISelDAGToDAG.cpp | 4 ++-- llvm/lib/Target/X86/X86ISelLowering.cpp | 22 +++++++++++----------- llvm/lib/Target/X86/X86ISelLowering.h | 2 +- llvm/lib/Target/X86/X86InstrInfo.cpp | 10 +++++----- llvm/lib/Target/X86/X86InstrInfo.td | 10 +++++----- llvm/lib/Target/X86/X86OptimizeLEAs.cpp | 2 +- llvm/lib/Target/X86/X86PadShortFunction.cpp | 2 +- llvm/lib/Target/X86/X86SelectionDAGInfo.cpp | 2 +- llvm/lib/Transforms/IPO/FunctionAttrs.cpp | 4 ++-- llvm/lib/Transforms/IPO/HotColdSplitting.cpp | 4 ++-- llvm/lib/Transforms/IPO/InferFunctionAttrs.cpp | 2 +- llvm/lib/Transforms/IPO/Inliner.cpp | 2 +- .../InstCombine/InstructionCombining.cpp | 2 +- .../Instrumentation/IndirectCallPromotion.cpp | 2 +- llvm/lib/Transforms/Scalar/ConstantHoisting.cpp | 2 +- llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp | 2 +- llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp | 2 +- llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp | 2 +- llvm/lib/Transforms/Scalar/LoopUnswitch.cpp | 2 +- .../lib/Transforms/Scalar/WarnMissedTransforms.cpp | 2 +- llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp | 2 +- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp | 4 ++-- 70 files changed, 129 insertions(+), 129 deletions(-) diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index 7dd3128..aa52e1e 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -1809,7 +1809,7 @@ void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone, void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) { llvm::AttrBuilder FuncAttrs; - ConstructDefaultFnAttrList(F.getName(), F.optForNone(), + ConstructDefaultFnAttrList(F.getName(), F.hasOptNone(), /* AttrOnCallsite = */ false, FuncAttrs); F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs); } diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index cb1ae57..a9fa9f7 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -953,7 +953,7 @@ public: /// getEstimatedNumberOfCaseClusters() in BasicTTIImpl. virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range) const { - const bool OptForSize = SI->getParent()->getParent()->optForSize(); + const bool OptForSize = SI->getParent()->getParent()->hasOptSize(); const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize); const unsigned MaxJumpTableSize = OptForSize ? UINT_MAX : getMaximumJumpTableSize(); diff --git a/llvm/include/llvm/IR/Function.h b/llvm/include/llvm/IR/Function.h index 4830796..efa1428 100644 --- a/llvm/include/llvm/IR/Function.h +++ b/llvm/include/llvm/IR/Function.h @@ -591,14 +591,14 @@ public: } /// Do not optimize this function (-O0). - bool optForNone() const { return hasFnAttribute(Attribute::OptimizeNone); } + bool hasOptNone() const { return hasFnAttribute(Attribute::OptimizeNone); } /// Optimize this function for minimum size (-Oz). - bool optForMinSize() const { return hasFnAttribute(Attribute::MinSize); } + bool hasMinSize() const { return hasFnAttribute(Attribute::MinSize); } /// Optimize this function for size (-Os) or minimum size (-Oz). - bool optForSize() const { - return hasFnAttribute(Attribute::OptimizeForSize) || optForMinSize(); + bool hasOptSize() const { + return hasFnAttribute(Attribute::OptimizeForSize) || hasMinSize(); } /// copyAttributesFrom - copy all additional attributes (those not needed to diff --git a/llvm/lib/Analysis/GlobalsModRef.cpp b/llvm/lib/Analysis/GlobalsModRef.cpp index 2ded6be..2c967c8 100644 --- a/llvm/lib/Analysis/GlobalsModRef.cpp +++ b/llvm/lib/Analysis/GlobalsModRef.cpp @@ -513,7 +513,7 @@ void GlobalsAAResult::AnalyzeCallGraph(CallGraph &CG, Module &M) { break; } - if (F->isDeclaration() || F->optForNone()) { + if (F->isDeclaration() || F->hasOptNone()) { // Try to get mod/ref behaviour from function attributes. if (F->doesNotAccessMemory()) { // Can't do better than that! @@ -566,7 +566,7 @@ void GlobalsAAResult::AnalyzeCallGraph(CallGraph &CG, Module &M) { // Don't prove any properties based on the implementation of an optnone // function. Function attributes were already used as a best approximation // above. - if (Node->getFunction()->optForNone()) + if (Node->getFunction()->hasOptNone()) continue; for (Instruction &I : instructions(Node->getFunction())) { diff --git a/llvm/lib/Analysis/InlineCost.cpp b/llvm/lib/Analysis/InlineCost.cpp index d1fbfaf..d8b87f9 100644 --- a/llvm/lib/Analysis/InlineCost.cpp +++ b/llvm/lib/Analysis/InlineCost.cpp @@ -897,7 +897,7 @@ void CallAnalyzer::updateThreshold(CallSite CS, Function &Callee) { // Use the OptMinSizeThreshold or OptSizeThreshold knob if they are available // and reduce the threshold if the caller has the necessary attribute. - if (Caller->optForMinSize()) { + if (Caller->hasMinSize()) { Threshold = MinIfValid(Threshold, Params.OptMinSizeThreshold); // For minsize, we want to disable the single BB bonus and the vector // bonuses, but not the last-call-to-static bonus. Inlining the last call to @@ -905,12 +905,12 @@ void CallAnalyzer::updateThreshold(CallSite CS, Function &Callee) { // call/return instructions. SingleBBBonusPercent = 0; VectorBonusPercent = 0; - } else if (Caller->optForSize()) + } else if (Caller->hasOptSize()) Threshold = MinIfValid(Threshold, Params.OptSizeThreshold); // Adjust the threshold based on inlinehint attribute and profile based // hotness information if the caller does not have MinSize attribute. - if (!Caller->optForMinSize()) { + if (!Caller->hasMinSize()) { if (Callee.hasFnAttribute(Attribute::InlineHint)) Threshold = MaxIfValid(Threshold, Params.HintThreshold); @@ -923,7 +923,7 @@ void CallAnalyzer::updateThreshold(CallSite CS, Function &Callee) { // BlockFrequencyInfo is available. BlockFrequencyInfo *CallerBFI = GetBFI ? &((*GetBFI)(*Caller)) : nullptr; auto HotCallSiteThreshold = getHotCallSiteThreshold(CS, CallerBFI); - if (!Caller->optForSize() && HotCallSiteThreshold) { + if (!Caller->hasOptSize() && HotCallSiteThreshold) { LLVM_DEBUG(dbgs() << "Hot callsite.\n"); // FIXME: This should update the threshold only if it exceeds the // current threshold, but AutoFDO + ThinLTO currently relies on this @@ -1899,7 +1899,7 @@ InlineResult CallAnalyzer::analyzeCall(CallSite CS) { // size, we penalise any call sites that perform loops. We do this after all // other costs here, so will likely only be dealing with relatively small // functions (and hence DT and LI will hopefully be cheap). - if (Caller->optForMinSize()) { + if (Caller->hasMinSize()) { DominatorTree DT(F); LoopInfo LI(DT); int NumLoops = 0; @@ -2036,7 +2036,7 @@ InlineCost llvm::getInlineCost( return llvm::InlineCost::getNever("conflicting attributes"); // Don't inline this call if the caller has the optnone attribute. - if (Caller->optForNone()) + if (Caller->hasOptNone()) return llvm::InlineCost::getNever("optnone attribute"); // Don't inline a function that treats null pointer as valid into a caller diff --git a/llvm/lib/Analysis/LoopPass.cpp b/llvm/lib/Analysis/LoopPass.cpp index 0fd9849..c57ec2a 100644 --- a/llvm/lib/Analysis/LoopPass.cpp +++ b/llvm/lib/Analysis/LoopPass.cpp @@ -396,7 +396,7 @@ bool LoopPass::skipLoop(const Loop *L) const { if (Gate.isEnabled() && !Gate.shouldRunPass(this, getDescription(*L))) return true; // Check for the OptimizeNone attribute. - if (F->optForNone()) { + if (F->hasOptNone()) { // FIXME: Report this to dbgs() only once per function. LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName() << "' in function " << F->getName() << "\n"); diff --git a/llvm/lib/Analysis/RegionPass.cpp b/llvm/lib/Analysis/RegionPass.cpp index ea117ea..6c0d17b 100644 --- a/llvm/lib/Analysis/RegionPass.cpp +++ b/llvm/lib/Analysis/RegionPass.cpp @@ -288,7 +288,7 @@ bool RegionPass::skipRegion(Region &R) const { if (Gate.isEnabled() && !Gate.shouldRunPass(this, getDescription(R))) return true; - if (F.optForNone()) { + if (F.hasOptNone()) { // Report this only once per function. if (R.getEntry() == &F.getEntryBlock()) LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName() diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index cbdd463..7887ce1 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -2865,7 +2865,7 @@ void AsmPrinter::setupCodePaddingContext(const MachineBasicBlock &MBB, MCCodePaddingContext &Context) const { assert(MF != nullptr && "Machine function must be valid"); Context.IsPaddingActive = !MF->hasInlineAsm() && - !MF->getFunction().optForSize() && + !MF->getFunction().hasOptSize() && TM.getOptLevel() != CodeGenOpt::None; Context.IsBasicBlockReachableViaFallthrough = std::find(MBB.pred_begin(), MBB.pred_end(), MBB.getPrevNode()) != diff --git a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp index 6693fe4..a57f0f5 100644 --- a/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp @@ -1342,7 +1342,7 @@ void CodeViewDebug::beginFunctionImpl(const MachineFunction *MF) { FPO |= FrameProcedureOptions(uint32_t(CurFn->EncodedLocalFramePtrReg) << 14U); FPO |= FrameProcedureOptions(uint32_t(CurFn->EncodedParamFramePtrReg) << 16U); if (Asm->TM.getOptLevel() != CodeGenOpt::None && - !GV.optForSize() && !GV.optForNone()) + !GV.hasOptSize() && !GV.hasOptNone()) FPO |= FrameProcedureOptions::OptimizedForSpeed; // FIXME: Set GuardCfg when it is implemented. CurFn->FrameProcOpts = FPO; diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp index 7a8013a..18f76d9 100644 --- a/llvm/lib/CodeGen/AtomicExpandPass.cpp +++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp @@ -1111,11 +1111,11 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic && SuccessOrder != AtomicOrdering::Monotonic && SuccessOrder != AtomicOrdering::Acquire && - !F->optForMinSize(); + !F->hasMinSize(); // There's no overhead for sinking the release barrier in a weak cmpxchg, so // do it even on minsize. - bool UseUnconditionalReleaseBarrier = F->optForMinSize() && !CI->isWeak(); + bool UseUnconditionalReleaseBarrier = F->hasMinSize() && !CI->isWeak(); // Given: cmpxchg some_op iN* %addr, iN %desired, iN %new success_ord fail_ord // diff --git a/llvm/lib/CodeGen/BranchFolding.cpp b/llvm/lib/CodeGen/BranchFolding.cpp index 95b0dde..93fd6f9 100644 --- a/llvm/lib/CodeGen/BranchFolding.cpp +++ b/llvm/lib/CodeGen/BranchFolding.cpp @@ -721,7 +721,7 @@ ProfitableToMerge(MachineBasicBlock *MBB1, MachineBasicBlock *MBB2, // branch instruction, which is likely to be smaller than the 2 // instructions that would be deleted in the merge. MachineFunction *MF = MBB1->getParent(); - return EffectiveTailLen >= 2 && MF->getFunction().optForSize() && + return EffectiveTailLen >= 2 && MF->getFunction().hasOptSize() && (I1 == MBB1->begin() || I2 == MBB2->begin()); } @@ -1574,7 +1574,7 @@ ReoptimizeBlock: } if (!IsEmptyBlock(MBB) && MBB->pred_size() == 1 && - MF.getFunction().optForSize()) { + MF.getFunction().hasOptSize()) { // Changing "Jcc foo; foo: jmp bar;" into "Jcc bar;" might change the branch // direction, thereby defeating careful block placement and regressing // performance. Therefore, only consider this for optsize functions. diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index 20a9030..2d7d0be 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -426,7 +426,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) { LI = &getAnalysis().getLoopInfo(); BPI.reset(new BranchProbabilityInfo(F, *LI)); BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI)); - OptSize = F.optForSize(); + OptSize = F.hasOptSize(); ProfileSummaryInfo *PSI = &getAnalysis().getPSI(); @@ -4454,7 +4454,7 @@ static bool FindAllMemoryUses( if (!MightBeFoldableInst(I)) return true; - const bool OptSize = I->getFunction()->optForSize(); + const bool OptSize = I->getFunction()->hasOptSize(); // Loop over all the uses, recursively processing them. for (Use &U : I->uses()) { diff --git a/llvm/lib/CodeGen/ExpandMemCmp.cpp b/llvm/lib/CodeGen/ExpandMemCmp.cpp index 9a05427..6c80c17 100644 --- a/llvm/lib/CodeGen/ExpandMemCmp.cpp +++ b/llvm/lib/CodeGen/ExpandMemCmp.cpp @@ -721,7 +721,7 @@ static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI, NumMemCmpCalls++; // Early exit from expansion if -Oz. - if (CI->getFunction()->optForMinSize()) + if (CI->getFunction()->hasMinSize()) return false; // Early exit from expansion if size is not a constant. @@ -742,7 +742,7 @@ static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI, if (!Options) return false; const unsigned MaxNumLoads = - TLI->getMaxExpandSizeMemcmp(CI->getFunction()->optForSize()); + TLI->getMaxExpandSizeMemcmp(CI->getFunction()->hasOptSize()); unsigned NumLoadsPerBlock = MemCmpEqZeroNumLoadsPerBlock.getNumOccurrences() ? MemCmpEqZeroNumLoadsPerBlock diff --git a/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp b/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp index 250a1db..6bc4cfa 100644 --- a/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp +++ b/llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp @@ -657,7 +657,7 @@ bool RegBankSelect::runOnMachineFunction(MachineFunction &MF) { LLVM_DEBUG(dbgs() << "Assign register banks for: " << MF.getName() << '\n'); const Function &F = MF.getFunction(); Mode SaveOptMode = OptMode; - if (F.optForNone()) + if (F.hasOptNone()) OptMode = Mode::Fast; init(MF); diff --git a/llvm/lib/CodeGen/GlobalMerge.cpp b/llvm/lib/CodeGen/GlobalMerge.cpp index 8b25d48..d4cc1da 100644 --- a/llvm/lib/CodeGen/GlobalMerge.cpp +++ b/llvm/lib/CodeGen/GlobalMerge.cpp @@ -330,7 +330,7 @@ bool GlobalMerge::doMerge(SmallVectorImpl &Globals, Function *ParentFn = I->getParent()->getParent(); // If we're only optimizing for size, ignore non-minsize functions. - if (OnlyOptimizeForSize && !ParentFn->optForMinSize()) + if (OnlyOptimizeForSize && !ParentFn->hasMinSize()) continue; size_t UGSIdx = GlobalUsesByFunction[ParentFn]; diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp index 265aeab..b1806ad 100644 --- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp +++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp @@ -1813,7 +1813,7 @@ MachineBlockPlacement::findBestLoopTop(const MachineLoop &L, // i.e. when the layout predecessor does not fallthrough to the loop header. // In practice this never happens though: there always seems to be a preheader // that can fallthrough and that is also placed before the header. - if (F->getFunction().optForSize()) + if (F->getFunction().hasOptSize()) return L.getHeader(); // Check that the header hasn't been fused with a preheader block due to @@ -2561,8 +2561,8 @@ void MachineBlockPlacement::alignBlocks() { // exclusively on the loop info here so that we can align backedges in // unnatural CFGs and backedges that were introduced purely because of the // loop rotations done during this layout pass. - if (F->getFunction().optForMinSize() || - (F->getFunction().optForSize() && !TLI->alignLoopsWithOptSize())) + if (F->getFunction().hasMinSize() || + (F->getFunction().hasOptSize() && !TLI->alignLoopsWithOptSize())) return; BlockChain &FunctionChain = *BlockToChain[&F->front()]; if (FunctionChain.begin() == FunctionChain.end()) @@ -2837,7 +2837,7 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &MF) { if (allowTailDupPlacement()) { MPDT = &getAnalysis(); - if (MF.getFunction().optForSize()) + if (MF.getFunction().hasOptSize()) TailDupSize = 1; bool PreRegAlloc = false; TailDup.initMF(MF, PreRegAlloc, MBPI, /* LayoutMode */ true, TailDupSize); diff --git a/llvm/lib/CodeGen/MachineCombiner.cpp b/llvm/lib/CodeGen/MachineCombiner.cpp index f35f113..2184847 100644 --- a/llvm/lib/CodeGen/MachineCombiner.cpp +++ b/llvm/lib/CodeGen/MachineCombiner.cpp @@ -637,7 +637,7 @@ bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) { MLI = &getAnalysis(); Traces = &getAnalysis(); MinInstr = nullptr; - OptSize = MF.getFunction().optForSize(); + OptSize = MF.getFunction().hasOptSize(); LLVM_DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n'); if (!TII->useMachineCombiner()) { diff --git a/llvm/lib/CodeGen/MachineFunction.cpp b/llvm/lib/CodeGen/MachineFunction.cpp index 84a2076..f58ade8 100644 --- a/llvm/lib/CodeGen/MachineFunction.cpp +++ b/llvm/lib/CodeGen/MachineFunction.cpp @@ -174,7 +174,7 @@ void MachineFunction::init() { Alignment = STI->getTargetLowering()->getMinFunctionAlignment(); // FIXME: Shouldn't use pref alignment if explicit alignment is set on F. - // FIXME: Use Function::optForSize(). + // FIXME: Use Function::hasOptSize(). if (!F.hasFnAttribute(Attribute::OptimizeForSize)) Alignment = std::max(Alignment, STI->getTargetLowering()->getPrefFunctionAlignment()); diff --git a/llvm/lib/CodeGen/SafeStack.cpp b/llvm/lib/CodeGen/SafeStack.cpp index fc110c6..6de72bb 100644 --- a/llvm/lib/CodeGen/SafeStack.cpp +++ b/llvm/lib/CodeGen/SafeStack.cpp @@ -728,7 +728,7 @@ void SafeStack::TryInlinePointerAddress() { if (!isa(UnsafeStackPtr)) return; - if(F.optForNone()) + if(F.hasOptNone()) return; CallSite CS(UnsafeStackPtr); diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index cad2a5c..6306ad5 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -196,7 +196,7 @@ namespace { DAGCombiner(SelectionDAG &D, AliasAnalysis *AA, CodeGenOpt::Level OL) : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes), OptLevel(OL), AA(AA) { - ForCodeSize = DAG.getMachineFunction().getFunction().optForSize(); + ForCodeSize = DAG.getMachineFunction().getFunction().hasOptSize(); MaximumLegalStoreInBits = 0; for (MVT VT : MVT::all_valuetypes()) @@ -12188,7 +12188,7 @@ SDValue DAGCombiner::visitFPOW(SDNode *N) { // Assume that libcalls are the smallest code. // TODO: This restriction should probably be lifted for vectors. - if (DAG.getMachineFunction().getFunction().optForSize()) + if (DAG.getMachineFunction().getFunction().hasOptSize()) return SDValue(); // pow(X, 0.25) --> sqrt(sqrt(X)) @@ -19213,7 +19213,7 @@ SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, SDValue DAGCombiner::BuildSDIV(SDNode *N) { // when optimising for minimum size, we don't want to expand a div to a mul // and a shift. - if (DAG.getMachineFunction().getFunction().optForMinSize()) + if (DAG.getMachineFunction().getFunction().hasMinSize()) return SDValue(); SmallVector Built; @@ -19254,7 +19254,7 @@ SDValue DAGCombiner::BuildSDIVPow2(SDNode *N) { SDValue DAGCombiner::BuildUDIV(SDNode *N) { // when optimising for minimum size, we don't want to expand a div to a mul // and a shift. - if (DAG.getMachineFunction().getFunction().optForMinSize()) + if (DAG.getMachineFunction().getFunction().hasMinSize()) return SDValue(); SmallVector Built; diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index cd01a95..3c362177 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -3092,7 +3092,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) { // Check to see if this FP immediate is already legal. // If this is a legal constant, turn it into a TargetConstantFP node. if (!TLI.isFPImmLegal(CFP->getValueAPF(), Node->getValueType(0), - DAG.getMachineFunction().getFunction().optForSize())) + DAG.getMachineFunction().getFunction().hasOptSize())) Results.push_back(ExpandConstantFP(CFP, true)); break; } diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 0ee308b..87ace6f 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -1418,7 +1418,7 @@ SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT, assert((TargetFlags == 0 || isTarget) && "Cannot set target flags on target-independent globals"); if (Alignment == 0) - Alignment = MF->getFunction().optForSize() + Alignment = MF->getFunction().hasOptSize() ? getDataLayout().getABITypeAlignment(C->getType()) : getDataLayout().getPrefTypeAlignment(C->getType()); unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; @@ -5657,8 +5657,8 @@ static bool shouldLowerMemFuncForSize(const MachineFunction &MF) { // On Darwin, -Os means optimize for size without hurting performance, so // only really optimize for size when -Oz (MinSize) is used. if (MF.getTarget().getTargetTriple().isOSDarwin()) - return MF.getFunction().optForMinSize(); - return MF.getFunction().optForSize(); + return MF.getFunction().hasMinSize(); + return MF.getFunction().hasOptSize(); } static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl, diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 05dca5f..2bf68ac 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -5220,7 +5220,7 @@ static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, return DAG.getConstantFP(1.0, DL, LHS.getValueType()); const Function &F = DAG.getMachineFunction().getFunction(); - if (!F.optForSize() || + if (!F.hasOptSize() || // If optimizing for size, don't insert too many multiplies. // This inserts up to 5 multiplies. countPopulation(Val) + Log2_32(Val) < 7) { @@ -10617,7 +10617,7 @@ MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster( // Don't perform if there is only one cluster or optimizing for size. if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 || TM.getOptLevel() == CodeGenOpt::None || - SwitchMBB->getParent()->getFunction().optForMinSize()) + SwitchMBB->getParent()->getFunction().hasMinSize()) return SwitchMBB; BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100); @@ -10740,7 +10740,7 @@ void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) { unsigned NumClusters = W.LastCluster - W.FirstCluster + 1; if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None && - !DefaultMBB->getParent()->getFunction().optForMinSize()) { + !DefaultMBB->getParent()->getFunction().hasMinSize()) { // For optimized builds, lower large range as a balanced binary tree. splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB); continue; diff --git a/llvm/lib/CodeGen/TailDuplicator.cpp b/llvm/lib/CodeGen/TailDuplicator.cpp index d3cd6c6..15cbff4 100644 --- a/llvm/lib/CodeGen/TailDuplicator.cpp +++ b/llvm/lib/CodeGen/TailDuplicator.cpp @@ -557,7 +557,7 @@ bool TailDuplicator::shouldTailDuplicate(bool IsSimple, unsigned MaxDuplicateCount; if (TailDupSize == 0 && TailDuplicateSize.getNumOccurrences() == 0 && - MF->getFunction().optForSize()) + MF->getFunction().hasOptSize()) MaxDuplicateCount = 1; else if (TailDupSize == 0) MaxDuplicateCount = TailDuplicateSize; diff --git a/llvm/lib/IR/Pass.cpp b/llvm/lib/IR/Pass.cpp index f781558..699a7e1 100644 --- a/llvm/lib/IR/Pass.cpp +++ b/llvm/lib/IR/Pass.cpp @@ -168,7 +168,7 @@ bool FunctionPass::skipFunction(const Function &F) const { if (Gate.isEnabled() && !Gate.shouldRunPass(this, getDescription(F))) return true; - if (F.optForNone()) { + if (F.hasOptNone()) { LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName() << "' on function " << F.getName() << "\n"); return true; @@ -207,7 +207,7 @@ bool BasicBlockPass::skipBasicBlock(const BasicBlock &BB) const { OptPassGate &Gate = F->getContext().getOptPassGate(); if (Gate.isEnabled() && !Gate.shouldRunPass(this, getDescription(BB))) return true; - if (F->optForNone()) { + if (F->hasOptNone()) { // Report this only once per function. if (&BB == &F->getEntryBlock()) LLVM_DEBUG(dbgs() << "Skipping pass '" << getPassName() diff --git a/llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp b/llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp index bcb6e4a..bbb25c8 100644 --- a/llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp +++ b/llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp @@ -140,7 +140,7 @@ bool AArch64CompressJumpTables::runOnMachineFunction(MachineFunction &MFIn) { const auto &ST = MF->getSubtarget(); TII = ST.getInstrInfo(); - if (ST.force32BitJumpTables() && !MF->getFunction().optForMinSize()) + if (ST.force32BitJumpTables() && !MF->getFunction().hasMinSize()) return false; scanFunction(); diff --git a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp index b136222..2cfbcc5 100644 --- a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp +++ b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp @@ -940,7 +940,7 @@ bool AArch64ConditionalCompares::runOnMachineFunction(MachineFunction &MF) { MBPI = &getAnalysis(); Traces = &getAnalysis(); MinInstr = nullptr; - MinSize = MF.getFunction().optForMinSize(); + MinSize = MF.getFunction().hasMinSize(); bool Changed = false; CmpConv.runOnMachineFunction(MF, MBPI); diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 0380891..8c794b9 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -52,7 +52,7 @@ public: } bool runOnMachineFunction(MachineFunction &MF) override { - ForCodeSize = MF.getFunction().optForSize(); + ForCodeSize = MF.getFunction().hasOptSize(); Subtarget = &MF.getSubtarget(); return SelectionDAGISel::runOnMachineFunction(MF); } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index ff6d30b..84a66a2 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -10382,7 +10382,7 @@ static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, return SDValue(); // Don't split at -Oz. - if (DAG.getMachineFunction().getFunction().optForMinSize()) + if (DAG.getMachineFunction().getFunction().hasMinSize()) return SDValue(); // Don't split v2i64 vectors. Memcpy lowering produces those and splitting diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index 4898912..c4ef6b3 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -474,7 +474,7 @@ public: } bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override { - if (DAG.getMachineFunction().getFunction().optForMinSize()) + if (DAG.getMachineFunction().getFunction().hasMinSize()) return false; return true; } diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 79b6034..12f2576 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -5486,7 +5486,7 @@ MachineBasicBlock::iterator AArch64InstrInfo::insertOutlinedCall( bool AArch64InstrInfo::shouldOutlineFromFunctionByDefault( MachineFunction &MF) const { - return MF.getFunction().optForMinSize(); + return MF.getFunction().hasMinSize(); } #define GET_INSTRINFO_HELPERS diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td index ee496fa..c549703 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -407,10 +407,10 @@ def AArch64umaxv : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>; // the Function object through the Subtarget and objections were raised // to that (see post-commit review comments for r301750). let RecomputePerFunction = 1 in { - def ForCodeSize : Predicate<"MF->getFunction().optForSize()">; - def NotForCodeSize : Predicate<"!MF->getFunction().optForSize()">; + def ForCodeSize : Predicate<"MF->getFunction().hasOptSize()">; + def NotForCodeSize : Predicate<"!MF->getFunction().hasOptSize()">; // Avoid generating STRQro if it is slow, unless we're optimizing for code size. - def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction().optForSize()">; + def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction().hasOptSize()">; def UseBTI : Predicate<[{ MF->getFunction().hasFnAttribute("branch-target-enforcement") }]>; def NotUseBTI : Predicate<[{ !MF->getFunction().hasFnAttribute("branch-target-enforcement") }]>; diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp index 300bc86..954c107 100644 --- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp @@ -119,13 +119,13 @@ bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) { // Calculate this function's optimization goal. unsigned OptimizationGoal; - if (F.optForNone()) + if (F.hasOptNone()) // For best debugging illusion, speed and small size sacrificed OptimizationGoal = 6; - else if (F.optForMinSize()) + else if (F.hasMinSize()) // Aggressively for small size, speed and debug illusion sacrificed OptimizationGoal = 4; - else if (F.optForSize()) + else if (F.hasOptSize()) // For small size, but speed and debugging illusion preserved OptimizationGoal = 3; else if (TM.getOptLevel() == CodeGenOpt::Aggressive) diff --git a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp index 427bdd9..490bf5f 100644 --- a/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1899,7 +1899,7 @@ isProfitableToIfCvt(MachineBasicBlock &MBB, // If we are optimizing for size, see if the branch in the predecessor can be // lowered to cbn?z by the constant island lowering pass, and return false if // so. This results in a shorter instruction sequence. - if (MBB.getParent()->getFunction().optForSize()) { + if (MBB.getParent()->getFunction().hasOptSize()) { MachineBasicBlock *Pred = *MBB.pred_begin(); if (!Pred->empty()) { MachineInstr *LastMI = &*Pred->rbegin(); @@ -2267,7 +2267,7 @@ bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, unsigned NumBytes) { // This optimisation potentially adds lots of load and store // micro-operations, it's only really a great benefit to code-size. - if (!Subtarget.optForMinSize()) + if (!Subtarget.hasMinSize()) return false; // If only one register is pushed/popped, LLVM can use an LDR/STR @@ -4163,7 +4163,7 @@ int ARMBaseInstrInfo::getOperandLatencyImpl( // instructions). if (Latency > 0 && Subtarget.isThumb2()) { const MachineFunction *MF = DefMI.getParent()->getParent(); - // FIXME: Use Function::optForSize(). + // FIXME: Use Function::hasOptSize(). if (MF->getFunction().hasFnAttribute(Attribute::OptimizeForSize)) --Latency; } diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index bf44f07..cbf4796 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -2074,7 +2074,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, auto *GV = cast(Callee)->getGlobal(); auto *BB = CLI.CS.getParent(); bool PreferIndirect = - Subtarget->isThumb() && Subtarget->optForMinSize() && + Subtarget->isThumb() && Subtarget->hasMinSize() && count_if(GV->users(), [&BB](const User *U) { return isa(U) && cast(U)->getParent() == BB; }) > 2; @@ -2146,7 +2146,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, CallOpc = ARMISD::CALL_NOLINK; else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() && // Emit regular call when code size is the priority - !Subtarget->optForMinSize()) + !Subtarget->hasMinSize()) // "mov lr, pc; b _foo" to avoid confusing the RSP CallOpc = ARMISD::CALL_NOLINK; else @@ -7818,7 +7818,7 @@ ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, return SDValue(); const auto &ST = static_cast(DAG.getSubtarget()); - const bool MinSize = ST.optForMinSize(); + const bool MinSize = ST.hasMinSize(); const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode() : ST.hasDivideInARMMode(); @@ -14826,7 +14826,7 @@ bool ARMTargetLowering::isCheapToSpeculateCtlz() const { } bool ARMTargetLowering::shouldExpandShift(SelectionDAG &DAG, SDNode *N) const { - return !Subtarget->optForMinSize(); + return !Subtarget->hasMinSize(); } Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.td b/llvm/lib/Target/ARM/ARMInstrInfo.td index b2f0fef..f55e73a 100644 --- a/llvm/lib/Target/ARM/ARMInstrInfo.td +++ b/llvm/lib/Target/ARM/ARMInstrInfo.td @@ -361,7 +361,7 @@ let RecomputePerFunction = 1 in { def UseFPVMLx: Predicate<"((Subtarget->useFPVMLx() &&" " TM.Options.AllowFPOpFusion != FPOpFusion::Fast) ||" - "Subtarget->optForMinSize())">; + "Subtarget->hasMinSize())">; } def UseMulOps : Predicate<"Subtarget->useMulOps()">; diff --git a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index 21aa3e0..90a1ce2 100644 --- a/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1294,7 +1294,7 @@ bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineInstr *MI) { // can still change to a writeback form as that will save us 2 bytes // of code size. It can create WAW hazards though, so only do it if // we're minimizing code size. - if (!STI->optForMinSize() || !BaseKill) + if (!STI->hasMinSize() || !BaseKill) return false; bool HighRegsUsed = false; diff --git a/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp index 332e4e7..cade06e 100644 --- a/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp +++ b/llvm/lib/Target/ARM/ARMSelectionDAGInfo.cpp @@ -170,7 +170,7 @@ SDValue ARMSelectionDAGInfo::EmitTargetCodeForMemcpy( // Code size optimisation: do not inline memcpy if expansion results in // more instructions than the libary call. - if (NumMEMCPYs > 1 && Subtarget.optForMinSize()) { + if (NumMEMCPYs > 1 && Subtarget.hasMinSize()) { return SDValue(); } diff --git a/llvm/lib/Target/ARM/ARMSubtarget.h b/llvm/lib/Target/ARM/ARMSubtarget.h index 131cd63..9500a9f 100644 --- a/llvm/lib/Target/ARM/ARMSubtarget.h +++ b/llvm/lib/Target/ARM/ARMSubtarget.h @@ -715,7 +715,7 @@ public: bool disablePostRAScheduler() const { return DisablePostRAScheduler; } bool useSoftFloat() const { return UseSoftFloat; } bool isThumb() const { return InThumbMode; } - bool optForMinSize() const { return OptMinSize; } + bool hasMinSize() const { return OptMinSize; } bool isThumb1Only() const { return InThumbMode && !HasThumb2; } bool isThumb2() const { return InThumbMode && HasThumb2; } bool hasThumb2() const { return HasThumb2; } diff --git a/llvm/lib/Target/ARM/ARMTargetMachine.cpp b/llvm/lib/Target/ARM/ARMTargetMachine.cpp index d013827..d2663ac 100644 --- a/llvm/lib/Target/ARM/ARMTargetMachine.cpp +++ b/llvm/lib/Target/ARM/ARMTargetMachine.cpp @@ -270,7 +270,7 @@ ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const { // Use the optminsize to identify the subtarget, but don't use it in the // feature string. std::string Key = CPU + FS; - if (F.optForMinSize()) + if (F.hasMinSize()) Key += "+minsize"; auto &I = SubtargetMap[Key]; @@ -280,7 +280,7 @@ ARMBaseTargetMachine::getSubtargetImpl(const Function &F) const { // function that reside in TargetOptions. resetTargetOptions(F); I = llvm::make_unique(TargetTriple, CPU, FS, *this, isLittle, - F.optForMinSize()); + F.hasMinSize()); if (!I->isThumb() && !I->hasARMOps()) F.getContext().emitError("Function '" + F.getName() + "' uses ARM " diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp index 1d1d92c..fe95c05 100644 --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -602,7 +602,7 @@ void ARMTTIImpl::getUnrollingPreferences(Loop *L, ScalarEvolution &SE, // Disable loop unrolling for Oz and Os. UP.OptSizeThreshold = 0; UP.PartialOptSizeThreshold = 0; - if (L->getHeader()->getParent()->optForSize()) + if (L->getHeader()->getParent()->hasOptSize()) return; // Only enable on Thumb-2 targets. diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h index 9084264..ba69353 100644 --- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.h +++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.h @@ -94,7 +94,7 @@ public: bool enableInterleavedAccessVectorization() { return true; } bool shouldFavorBackedgeIndex(const Loop *L) const { - if (L->getHeader()->getParent()->optForSize()) + if (L->getHeader()->getParent()->hasOptSize()) return false; return ST->isMClass() && ST->isThumb2() && L->getNumBlocks() == 1; } diff --git a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp index be9c1eb..37a85fa 100644 --- a/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp +++ b/llvm/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -1127,8 +1127,8 @@ bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) { TII = static_cast(STI->getInstrInfo()); // Optimizing / minimizing size? Minimizing size implies optimizing for size. - OptimizeSize = MF.getFunction().optForSize(); - MinimizeSize = STI->optForMinSize(); + OptimizeSize = MF.getFunction().hasOptSize(); + MinimizeSize = STI->hasMinSize(); BlockInfo.clear(); BlockInfo.resize(MF.getNumBlockIDs()); diff --git a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp index 537da2a..3368ee4 100644 --- a/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonFrameLowering.cpp @@ -374,17 +374,17 @@ static bool isRestoreCall(unsigned Opc) { } static inline bool isOptNone(const MachineFunction &MF) { - return MF.getFunction().optForNone() || + return MF.getFunction().hasOptNone() || MF.getTarget().getOptLevel() == CodeGenOpt::None; } static inline bool isOptSize(const MachineFunction &MF) { const Function &F = MF.getFunction(); - return F.optForSize() && !F.optForMinSize(); + return F.hasOptSize() && !F.hasMinSize(); } static inline bool isMinSize(const MachineFunction &MF) { - return MF.getFunction().optForMinSize(); + return MF.getFunction().hasMinSize(); } /// Implements shrink-wrapping of the stack frame. By default, stack frame diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 9effe3d..d5a5cc3 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -14707,7 +14707,7 @@ SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const { return SDValue(); // An imul is usually smaller than the alternative sequence for legal type. - if (DAG.getMachineFunction().getFunction().optForMinSize() && + if (DAG.getMachineFunction().getFunction().hasMinSize() && isOperationLegal(ISD::MUL, N->getValueType(0))) return SDValue(); diff --git a/llvm/lib/Target/X86/X86FixupBWInsts.cpp b/llvm/lib/Target/X86/X86FixupBWInsts.cpp index be4479f..a0282ff 100644 --- a/llvm/lib/Target/X86/X86FixupBWInsts.cpp +++ b/llvm/lib/Target/X86/X86FixupBWInsts.cpp @@ -150,7 +150,7 @@ bool FixupBWInstPass::runOnMachineFunction(MachineFunction &MF) { this->MF = &MF; TII = MF.getSubtarget().getInstrInfo(); - OptForSize = MF.getFunction().optForSize(); + OptForSize = MF.getFunction().hasOptSize(); MLI = &getAnalysis(); LiveRegs.init(TII->getRegisterInfo()); diff --git a/llvm/lib/Target/X86/X86FixupLEAs.cpp b/llvm/lib/Target/X86/X86FixupLEAs.cpp index 9a22731..311957a 100644 --- a/llvm/lib/Target/X86/X86FixupLEAs.cpp +++ b/llvm/lib/Target/X86/X86FixupLEAs.cpp @@ -200,7 +200,7 @@ bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) { bool IsSlowLEA = ST.slowLEA(); bool IsSlow3OpsLEA = ST.slow3OpsLEA(); - OptIncDec = !ST.slowIncDec() || Func.getFunction().optForSize(); + OptIncDec = !ST.slowIncDec() || Func.getFunction().hasOptSize(); OptLEA = ST.LEAusesAG() || IsSlowLEA || IsSlow3OpsLEA; if (!OptLEA && !OptIncDec) diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp index ebe7392..6d8bcd7 100644 --- a/llvm/lib/Target/X86/X86FrameLowering.cpp +++ b/llvm/lib/Target/X86/X86FrameLowering.cpp @@ -2810,7 +2810,7 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, StackAdjustment += mergeSPUpdates(MBB, InsertPos, false); if (StackAdjustment) { - if (!(F.optForMinSize() && + if (!(F.hasMinSize() && adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment))) BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment, /*InEpilogue=*/false); diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp index 77c3aa7..3a9ef41 100644 --- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -183,8 +183,8 @@ namespace { "indirect-tls-seg-refs"); // OptFor[Min]Size are used in pattern predicates that isel is matching. - OptForSize = MF.getFunction().optForSize(); - OptForMinSize = MF.getFunction().optForMinSize(); + OptForSize = MF.getFunction().hasOptSize(); + OptForMinSize = MF.getFunction().hasMinSize(); assert((!OptForMinSize || OptForSize) && "OptForMinSize implies OptForSize"); diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index d599a61..71ea61b 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -7759,7 +7759,7 @@ static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp, // TODO: If multiple splats are generated to load the same constant, // it may be detrimental to overall size. There needs to be a way to detect // that condition to know if this is truly a size win. - bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize(); // Handle broadcasting a single constant scalar from the constant pool // into a vector. @@ -10666,7 +10666,7 @@ static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1, case MVT::v32i16: case MVT::v64i8: { // Attempt to lower to a bitmask if we can. Only if not optimizing for size. - bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize(); if (!OptForSize) { if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG)) @@ -16982,7 +16982,7 @@ SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, // Bits [3:0] of the constant are the zero mask. The DAG Combiner may // combine either bitwise AND or insert of float 0.0 to set these bits. - bool MinSize = DAG.getMachineFunction().getFunction().optForMinSize(); + bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize(); if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) { // If this is an insertion of 32-bits into the low 32-bits of // a vector, we prefer to generate a blend with immediate rather @@ -17636,7 +17636,7 @@ static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget, "Unexpected funnel shift type!"); // Expand slow SHLD/SHRD cases if we are not optimizing for size. - bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize(); if (!OptForSize && Subtarget.isSHLDSlow()) return SDValue(); @@ -18895,7 +18895,7 @@ static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) { /// implementation, and likely shuffle complexity of the alternate sequence. static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG, const X86Subtarget &Subtarget) { - bool IsOptimizingSize = DAG.getMachineFunction().getFunction().optForSize(); + bool IsOptimizingSize = DAG.getMachineFunction().getFunction().hasOptSize(); bool HasFastHOps = Subtarget.hasFastHorizontalOps(); return !IsSingleSource || IsOptimizingSize || HasFastHOps; } @@ -19376,7 +19376,7 @@ SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, !cast(Op0)->getAPIntValue().isSignedIntN(8)) || (isa(Op1) && !cast(Op1)->getAPIntValue().isSignedIntN(8))) && - !DAG.getMachineFunction().getFunction().optForMinSize() && + !DAG.getMachineFunction().getFunction().hasMinSize() && !Subtarget.isAtom()) { unsigned ExtendOp = isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND; @@ -19550,7 +19550,7 @@ static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, } else { // Use BT if the immediate can't be encoded in a TEST instruction or we // are optimizing for size and the immedaite won't fit in a byte. - bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize(); if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) && isPowerOf2_64(AndRHSVal)) { Src = AndLHS; @@ -35932,7 +35932,7 @@ static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG, // pmulld is supported since SSE41. It is better to use pmulld // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than // the expansion. - bool OptForMinSize = DAG.getMachineFunction().getFunction().optForMinSize(); + bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize(); if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow())) return SDValue(); @@ -36240,7 +36240,7 @@ static SDValue combineMul(SDNode *N, SelectionDAG &DAG, if (!MulConstantOptimization) return SDValue(); // An imul is usually smaller than the alternative sequence. - if (DAG.getMachineFunction().getFunction().optForMinSize()) + if (DAG.getMachineFunction().getFunction().hasMinSize()) return SDValue(); if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) @@ -37659,7 +37659,7 @@ static SDValue combineOr(SDNode *N, SelectionDAG &DAG, return SDValue(); // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) - bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); + bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize(); unsigned Bits = VT.getScalarSizeInBits(); // SHLD/SHRD instructions have lower register pressure, but on some @@ -39938,7 +39938,7 @@ static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG, // If we have to respect NaN inputs, this takes at least 3 instructions. // Favor a library call when operating on a scalar and minimizing code size. - if (!VT.isVector() && DAG.getMachineFunction().getFunction().optForMinSize()) + if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize()) return SDValue(); EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index 7042a29..c481de0 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -829,7 +829,7 @@ namespace llvm { } bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override { - if (DAG.getMachineFunction().getFunction().optForMinSize()) + if (DAG.getMachineFunction().getFunction().hasMinSize()) return false; return true; } diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 4aa365c..3ed88d9 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -1453,7 +1453,7 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, case X86::VBLENDPDrri: case X86::VBLENDPSrri: // If we're optimizing for size, try to use MOVSD/MOVSS. - if (MI.getParent()->getParent()->getFunction().optForSize()) { + if (MI.getParent()->getParent()->getFunction().hasOptSize()) { unsigned Mask, Opc; switch (MI.getOpcode()) { default: llvm_unreachable("Unreachable!"); @@ -4820,14 +4820,14 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl( // For CPUs that favor the register form of a call or push, // do not fold loads into calls or pushes, unless optimizing for size // aggressively. - if (isSlowTwoMemOps && !MF.getFunction().optForMinSize() && + if (isSlowTwoMemOps && !MF.getFunction().hasMinSize() && (MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r || MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r || MI.getOpcode() == X86::PUSH64r)) return nullptr; // Avoid partial and undef register update stalls unless optimizing for size. - if (!MF.getFunction().optForSize() && + if (!MF.getFunction().hasOptSize() && (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || shouldPreventUndefRegUpdateMemFold(MF, MI))) return nullptr; @@ -4995,7 +4995,7 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, return nullptr; // Avoid partial and undef register update stalls unless optimizing for size. - if (!MF.getFunction().optForSize() && + if (!MF.getFunction().hasOptSize() && (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || shouldPreventUndefRegUpdateMemFold(MF, MI))) return nullptr; @@ -5195,7 +5195,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl( if (NoFusing) return nullptr; // Avoid partial and undef register update stalls unless optimizing for size. - if (!MF.getFunction().optForSize() && + if (!MF.getFunction().hasOptSize() && (hasPartialRegUpdate(MI.getOpcode(), Subtarget, /*ForLoadFold*/true) || shouldPreventUndefRegUpdateMemFold(MF, MI))) return nullptr; diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td index e07553f..f5ff8d2 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -925,12 +925,12 @@ def IsNotPIC : Predicate<"!TM.isPositionIndependent()">; // the Function object through the Subtarget and objections were raised // to that (see post-commit review comments for r301750). let RecomputePerFunction = 1 in { - def OptForSize : Predicate<"MF->getFunction().optForSize()">; - def OptForMinSize : Predicate<"MF->getFunction().optForMinSize()">; - def OptForSpeed : Predicate<"!MF->getFunction().optForSize()">; + def OptForSize : Predicate<"MF->getFunction().hasOptSize()">; + def OptForMinSize : Predicate<"MF->getFunction().hasMinSize()">; + def OptForSpeed : Predicate<"!MF->getFunction().hasOptSize()">; def UseIncDec : Predicate<"!Subtarget->slowIncDec() || " - "MF->getFunction().optForSize()">; - def NoSSE41_Or_OptForSize : Predicate<"MF->getFunction().optForSize() || " + "MF->getFunction().hasOptSize()">; + def NoSSE41_Or_OptForSize : Predicate<"MF->getFunction().hasOptSize() || " "!Subtarget->hasSSE41()">; } diff --git a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp index 6cec0b80..d415197 100644 --- a/llvm/lib/Target/X86/X86OptimizeLEAs.cpp +++ b/llvm/lib/Target/X86/X86OptimizeLEAs.cpp @@ -700,7 +700,7 @@ bool OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) { // Remove redundant address calculations. Do it only for -Os/-Oz since only // a code size gain is expected from this part of the pass. - if (MF.getFunction().optForSize()) + if (MF.getFunction().hasOptSize()) Changed |= removeRedundantAddrCalc(LEAs); } diff --git a/llvm/lib/Target/X86/X86PadShortFunction.cpp b/llvm/lib/Target/X86/X86PadShortFunction.cpp index 99df3d6..fb8b0af 100644 --- a/llvm/lib/Target/X86/X86PadShortFunction.cpp +++ b/llvm/lib/Target/X86/X86PadShortFunction.cpp @@ -97,7 +97,7 @@ bool PadShortFunc::runOnMachineFunction(MachineFunction &MF) { if (skipFunction(MF.getFunction())) return false; - if (MF.getFunction().optForSize()) + if (MF.getFunction().hasOptSize()) return false; if (!MF.getSubtarget().padShortFunctions()) diff --git a/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp b/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp index fea4d84..fb18525 100644 --- a/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp +++ b/llvm/lib/Target/X86/X86SelectionDAGInfo.cpp @@ -248,7 +248,7 @@ SDValue X86SelectionDAGInfo::EmitTargetCodeForMemcpy( Repeats.AVT = Subtarget.is64Bit() ? MVT::i64 : MVT::i32; if (Repeats.BytesLeft() > 0 && - DAG.getMachineFunction().getFunction().optForMinSize()) { + DAG.getMachineFunction().getFunction().hasMinSize()) { // When aggressively optimizing for size, avoid generating the code to // handle BytesLeft. Repeats.AVT = MVT::i8; diff --git a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp index d396882..28982d6 100644 --- a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp +++ b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp @@ -1366,7 +1366,7 @@ PreservedAnalyses PostOrderFunctionAttrsPass::run(LazyCallGraph::SCC &C, bool HasUnknownCall = false; for (LazyCallGraph::Node &N : C) { Function &F = N.getFunction(); - if (F.optForNone() || F.hasFnAttribute(Attribute::Naked)) { + if (F.hasOptNone() || F.hasFnAttribute(Attribute::Naked)) { // Treat any function we're trying not to optimize as if it were an // indirect call and omit it from the node set used below. HasUnknownCall = true; @@ -1439,7 +1439,7 @@ static bool runImpl(CallGraphSCC &SCC, AARGetterT AARGetter) { bool ExternalNode = false; for (CallGraphNode *I : SCC) { Function *F = I->getFunction(); - if (!F || F->optForNone() || F->hasFnAttribute(Attribute::Naked)) { + if (!F || F->hasOptNone() || F->hasFnAttribute(Attribute::Naked)) { // External node or function we're trying not to optimize - we both avoid // transform them and avoid leveraging information they provide. ExternalNode = true; diff --git a/llvm/lib/Transforms/IPO/HotColdSplitting.cpp b/llvm/lib/Transforms/IPO/HotColdSplitting.cpp index c4348c3..ab1a9a7 100644 --- a/llvm/lib/Transforms/IPO/HotColdSplitting.cpp +++ b/llvm/lib/Transforms/IPO/HotColdSplitting.cpp @@ -149,7 +149,7 @@ static bool mayExtractBlock(const BasicBlock &BB) { /// module has profile data), set entry count to 0 to ensure treated as cold. /// Return true if the function is changed. static bool markFunctionCold(Function &F, bool UpdateEntryCount = false) { - assert(!F.optForNone() && "Can't mark this cold"); + assert(!F.hasOptNone() && "Can't mark this cold"); bool Changed = false; if (!F.hasFnAttribute(Attribute::Cold)) { F.addFnAttr(Attribute::Cold); @@ -673,7 +673,7 @@ bool HotColdSplitting::run(Module &M) { continue; // Do not modify `optnone` functions. - if (F.optForNone()) + if (F.hasOptNone()) continue; // Detect inherently cold functions and mark them as such. diff --git a/llvm/lib/Transforms/IPO/InferFunctionAttrs.cpp b/llvm/lib/Transforms/IPO/InferFunctionAttrs.cpp index dfe375d..7f5511e 100644 --- a/llvm/lib/Transforms/IPO/InferFunctionAttrs.cpp +++ b/llvm/lib/Transforms/IPO/InferFunctionAttrs.cpp @@ -25,7 +25,7 @@ static bool inferAllPrototypeAttributes(Module &M, for (Function &F : M.functions()) // We only infer things using the prototype and the name; we don't need // definitions. - if (F.isDeclaration() && !F.optForNone()) + if (F.isDeclaration() && !F.hasOptNone()) Changed |= inferLibFuncAttributes(F, TLI); return Changed; diff --git a/llvm/lib/Transforms/IPO/Inliner.cpp b/llvm/lib/Transforms/IPO/Inliner.cpp index b647b27..4047c0a 100644 --- a/llvm/lib/Transforms/IPO/Inliner.cpp +++ b/llvm/lib/Transforms/IPO/Inliner.cpp @@ -973,7 +973,7 @@ PreservedAnalyses InlinerPass::run(LazyCallGraph::SCC &InitialC, LazyCallGraph::Node &N = *CG.lookup(F); if (CG.lookupSCC(N) != C) continue; - if (F.optForNone()) { + if (F.hasOptNone()) { setInlineRemark(Calls[i].first, "optnone attribute"); continue; } diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index 230a429..6ee94a9 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -3508,7 +3508,7 @@ static bool combineInstructionsOverFunction( MadeIRChange |= prepareICWorklistFromFunction(F, DL, &TLI, Worklist); - InstCombiner IC(Worklist, Builder, F.optForMinSize(), ExpensiveCombines, AA, + InstCombiner IC(Worklist, Builder, F.hasMinSize(), ExpensiveCombines, AA, AC, TLI, DT, ORE, DL, LI); IC.MaxArraySizeForCombine = MaxArraySize; diff --git a/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp b/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp index c9e3a15..7c9e2ad 100644 --- a/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp +++ b/llvm/lib/Transforms/Instrumentation/IndirectCallPromotion.cpp @@ -393,7 +393,7 @@ static bool promoteIndirectCalls(Module &M, ProfileSummaryInfo *PSI, } bool Changed = false; for (auto &F : M) { - if (F.isDeclaration() || F.optForNone()) + if (F.isDeclaration() || F.hasOptNone()) continue; std::unique_ptr OwnedORE; diff --git a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp index 0764778..f1cc5e4 100644 --- a/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp +++ b/llvm/lib/Transforms/Scalar/ConstantHoisting.cpp @@ -548,7 +548,7 @@ ConstantHoistingPass::maximizeConstantsInRange(ConstCandVecType::iterator S, ConstCandVecType::iterator &MaxCostItr) { unsigned NumUses = 0; - if(!Entry->getParent()->optForSize() || std::distance(S,E) > 100) { + if(!Entry->getParent()->hasOptSize() || std::distance(S,E) > 100) { for (auto ConstCand = S; ConstCand != E; ++ConstCand) { NumUses += ConstCand->Uses.size(); if (ConstCand->CumulativeCost > MaxCostItr->CumulativeCost) diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index 5f9f54c..151fe09 100644 --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -284,7 +284,7 @@ bool LoopIdiomRecognize::runOnLoop(Loop *L) { // Determine if code size heuristics need to be applied. ApplyCodeSizeHeuristics = - L->getHeader()->getParent()->optForSize() && UseLIRCodeSizeHeurs; + L->getHeader()->getParent()->hasOptSize() && UseLIRCodeSizeHeurs; HasMemset = TLI->has(LibFunc_memset); HasMemsetPattern = TLI->has(LibFunc_memset_pattern16); diff --git a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp index 60d27d3..7526610 100644 --- a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp +++ b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp @@ -529,7 +529,7 @@ public: } if (!Checks.empty() || !LAI.getPSE().getUnionPredicate().isAlwaysTrue()) { - if (L->getHeader()->getParent()->optForSize()) { + if (L->getHeader()->getParent()->hasOptSize()) { LLVM_DEBUG( dbgs() << "Versioning is needed but not allowed when optimizing " "for size.\n"); diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp index 55d4590..ef2831d 100644 --- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp +++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -198,7 +198,7 @@ TargetTransformInfo::UnrollingPreferences llvm::gatherUnrollingPreferences( TTI.getUnrollingPreferences(L, SE, UP); // Apply size attributes - if (L->getHeader()->getParent()->optForSize()) { + if (L->getHeader()->getParent()->hasOptSize()) { UP.Threshold = UP.OptSizeThreshold; UP.PartialThreshold = UP.PartialOptSizeThreshold; } diff --git a/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp b/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp index 51d113e..87ca5f0 100644 --- a/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp +++ b/llvm/lib/Transforms/Scalar/LoopUnswitch.cpp @@ -657,7 +657,7 @@ bool LoopUnswitch::processCurrentLoop() { } // Do not do non-trivial unswitch while optimizing for size. - // FIXME: Use Function::optForSize(). + // FIXME: Use Function::hasOptSize(). if (OptimizeForSize || loopHeader->getParent()->hasFnAttribute(Attribute::OptimizeForSize)) return false; diff --git a/llvm/lib/Transforms/Scalar/WarnMissedTransforms.cpp b/llvm/lib/Transforms/Scalar/WarnMissedTransforms.cpp index bcb877a..707adf4 100644 --- a/llvm/lib/Transforms/Scalar/WarnMissedTransforms.cpp +++ b/llvm/lib/Transforms/Scalar/WarnMissedTransforms.cpp @@ -92,7 +92,7 @@ PreservedAnalyses WarnMissedTransformationsPass::run(Function &F, FunctionAnalysisManager &AM) { // Do not warn about not applied transformations if optimizations are // disabled. - if (F.optForNone()) + if (F.hasOptNone()) return PreservedAnalyses::all(); auto &ORE = AM.getResult(F); diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp index 0af79f4..f35328ca 100644 --- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp @@ -2375,7 +2375,7 @@ Value *LibCallSimplifier::optimizeFPuts(CallInst *CI, IRBuilder<> &B) { // Don't rewrite fputs to fwrite when optimising for size because fwrite // requires more arguments and thus extra MOVs are required. - if (CI->getFunction()->optForSize()) + if (CI->getFunction()->hasOptSize()) return nullptr; // Check if has any use diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 1ff32d9..7f16d96 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -7162,7 +7162,7 @@ static bool processLoopInVPlanNativePath( // Check the function attributes to find out if this function should be // optimized for size. bool OptForSize = - Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); + Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->hasOptSize(); // Plan how to best vectorize, return the best VF and its cost. const VectorizationFactor VF = LVP.planInVPlanNativePath(OptForSize, UserVF); @@ -7245,7 +7245,7 @@ bool LoopVectorizePass::processLoop(Loop *L) { // Check the function attributes to find out if this function should be // optimized for size. bool OptForSize = - Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->optForSize(); + Hints.getForce() != LoopVectorizeHints::FK_Enabled && F->hasOptSize(); // Entrance to the VPlan-native vectorization path. Outer loops are processed // here. They may require CFG and instruction level transformations before -- 2.7.4