From be2f67c4d87deb513ba200ab3f4dba385faf709a Mon Sep 17 00:00:00 2001 From: Amir Ayupov Date: Mon, 6 Feb 2023 17:38:20 -0800 Subject: [PATCH] [BOLT][NFC] Replace anonymous namespace functions with static Follow LLVM Coding Standards guideline on using anonymous namespaces (https://llvm.org/docs/CodingStandards.html#anonymous-namespaces) and use `static` modifier for function definitions. Reviewed By: #bolt, maksfb Differential Revision: https://reviews.llvm.org/D143124 --- bolt/lib/Core/BinaryFunction.cpp | 32 +++++++++--------------- bolt/lib/Core/DebugData.cpp | 11 +++----- bolt/lib/Core/Relocation.cpp | 43 ++++++++++++++++---------------- bolt/lib/Passes/Aligner.cpp | 9 +++---- bolt/lib/Passes/AllocCombiner.cpp | 16 +++++------- bolt/lib/Passes/BinaryPasses.cpp | 10 ++------ bolt/lib/Passes/CallGraph.cpp | 10 +++----- bolt/lib/Passes/IdenticalCodeFolding.cpp | 32 +++++++++++------------- bolt/lib/Passes/Instrumentation.cpp | 9 +++---- bolt/lib/Passes/LongJmp.cpp | 11 +++----- 10 files changed, 73 insertions(+), 110 deletions(-) diff --git a/bolt/lib/Core/BinaryFunction.cpp b/bolt/lib/Core/BinaryFunction.cpp index 1de8131..d1a0467 100644 --- a/bolt/lib/Core/BinaryFunction.cpp +++ b/bolt/lib/Core/BinaryFunction.cpp @@ -166,9 +166,7 @@ namespace bolt { constexpr unsigned BinaryFunction::MinAlign; -namespace { - -template bool emptyRange(const R &Range) { +template static bool emptyRange(const R &Range) { return Range.begin() == Range.end(); } @@ -177,7 +175,7 @@ template bool emptyRange(const R &Range) { /// to point to this information, which is represented by a /// DebugLineTableRowRef. The returned pointer is null if no debug line /// information for this instruction was found. -SMLoc findDebugLineInformationForInstructionAt( +static SMLoc findDebugLineInformationForInstructionAt( uint64_t Address, DWARFUnit *Unit, const DWARFDebugLine::LineTable *LineTable) { // We use the pointer in SMLoc to store an instance of DebugLineTableRowRef, @@ -206,15 +204,16 @@ SMLoc findDebugLineInformationForInstructionAt( return SMLoc::getFromPointer(Ptr); } -std::string buildSectionName(StringRef Prefix, StringRef Name, - const BinaryContext &BC) { +static std::string buildSectionName(StringRef Prefix, StringRef Name, + const BinaryContext &BC) { if (BC.isELF()) return (Prefix + Name).str(); static NameShortener NS; return (Prefix + Twine(NS.getID(Name))).str(); } -raw_ostream &operator<<(raw_ostream &OS, const BinaryFunction::State State) { +static raw_ostream &operator<<(raw_ostream &OS, + const BinaryFunction::State State) { switch (State) { case BinaryFunction::State::Empty: OS << "empty"; break; case BinaryFunction::State::Disassembled: OS << "disassembled"; break; @@ -227,8 +226,6 @@ raw_ostream &operator<<(raw_ostream &OS, const BinaryFunction::State State) { return OS; } -} // namespace - std::string BinaryFunction::buildCodeSectionName(StringRef Name, const BinaryContext &BC) { return buildSectionName(BC.isELF() ? ".local.text." : ".l.text.", Name, BC); @@ -669,9 +666,8 @@ void BinaryFunction::printRelocations(raw_ostream &OS, uint64_t Offset, } } -namespace { -std::string mutateDWARFExpressionTargetReg(const MCCFIInstruction &Instr, - MCPhysReg NewReg) { +static std::string mutateDWARFExpressionTargetReg(const MCCFIInstruction &Instr, + MCPhysReg NewReg) { StringRef ExprBytes = Instr.getValues(); assert(ExprBytes.size() > 1 && "DWARF expression CFI is too short"); uint8_t Opcode = ExprBytes[0]; @@ -694,7 +690,6 @@ std::string mutateDWARFExpressionTargetReg(const MCCFIInstruction &Instr, .concat(ExprBytes.drop_front(1 + Size)) .str(); } -} // namespace void BinaryFunction::mutateCFIRegisterFor(const MCInst &Instr, MCPhysReg NewReg) { @@ -2999,14 +2994,13 @@ void BinaryFunction::duplicateConstantIslands() { } } -namespace { - #ifndef MAX_PATH #define MAX_PATH 255 #endif -std::string constructFilename(std::string Filename, std::string Annotation, - std::string Suffix) { +static std::string constructFilename(std::string Filename, + std::string Annotation, + std::string Suffix) { std::replace(Filename.begin(), Filename.end(), '/', '-'); if (!Annotation.empty()) Annotation.insert(0, "-"); @@ -3023,7 +3017,7 @@ std::string constructFilename(std::string Filename, std::string Annotation, return Filename; } -std::string formatEscapes(const std::string &Str) { +static std::string formatEscapes(const std::string &Str) { std::string Result; for (unsigned I = 0; I < Str.size(); ++I) { char C = Str[I]; @@ -3041,8 +3035,6 @@ std::string formatEscapes(const std::string &Str) { return Result; } -} // namespace - void BinaryFunction::dumpGraph(raw_ostream &OS) const { OS << "digraph \"" << getPrintName() << "\" {\n" << "node [fontname=courier, shape=box, style=filled, colorscheme=brbg9]\n"; diff --git a/bolt/lib/Core/DebugData.cpp b/bolt/lib/Core/DebugData.cpp index a1250177..41289fb 100644 --- a/bolt/lib/Core/DebugData.cpp +++ b/bolt/lib/Core/DebugData.cpp @@ -90,8 +90,6 @@ std::optional findAttributeInfo(const DWARFDie DIE, const DebugLineTableRowRef DebugLineTableRowRef::NULL_ROW{0, 0}; -namespace { - LLVM_ATTRIBUTE_UNUSED static void printLE64(const std::string &S) { for (uint32_t I = 0, Size = S.size(); I < Size; ++I) { @@ -106,9 +104,10 @@ static void printLE64(const std::string &S) { // the form (begin address, range size), otherwise (begin address, end address). // Terminates the list by writing a pair of two zeroes. // Returns the number of written bytes. -uint64_t writeAddressRanges(raw_svector_ostream &Stream, - const DebugAddressRangesVector &AddressRanges, - const bool WriteRelativeRanges = false) { +static uint64_t +writeAddressRanges(raw_svector_ostream &Stream, + const DebugAddressRangesVector &AddressRanges, + const bool WriteRelativeRanges = false) { for (const DebugAddressRange &Range : AddressRanges) { support::endian::write(Stream, Range.LowPC, support::little); support::endian::write( @@ -121,8 +120,6 @@ uint64_t writeAddressRanges(raw_svector_ostream &Stream, return AddressRanges.size() * 16 + 16; } -} // namespace - DebugRangesSectionWriter::DebugRangesSectionWriter() { RangesBuffer = std::make_unique(); RangesStream = std::make_unique(*RangesBuffer); diff --git a/bolt/lib/Core/Relocation.cpp b/bolt/lib/Core/Relocation.cpp index ee90be5..e8a889f 100644 --- a/bolt/lib/Core/Relocation.cpp +++ b/bolt/lib/Core/Relocation.cpp @@ -22,9 +22,7 @@ using namespace bolt; Triple::ArchType Relocation::Arch; -namespace { - -bool isSupportedX86(uint64_t Type) { +static bool isSupportedX86(uint64_t Type) { switch (Type) { default: return false; @@ -46,7 +44,7 @@ bool isSupportedX86(uint64_t Type) { } } -bool isSupportedAArch64(uint64_t Type) { +static bool isSupportedAArch64(uint64_t Type) { switch (Type) { default: return false; @@ -91,7 +89,7 @@ bool isSupportedAArch64(uint64_t Type) { } } -size_t getSizeForTypeX86(uint64_t Type) { +static size_t getSizeForTypeX86(uint64_t Type) { switch (Type) { default: errs() << object::getELFRelocationTypeName(ELF::EM_X86_64, Type) << '\n'; @@ -117,7 +115,7 @@ size_t getSizeForTypeX86(uint64_t Type) { } } -size_t getSizeForTypeAArch64(uint64_t Type) { +static size_t getSizeForTypeAArch64(uint64_t Type) { switch (Type) { default: errs() << object::getELFRelocationTypeName(ELF::EM_AARCH64, Type) << '\n'; @@ -165,17 +163,19 @@ size_t getSizeForTypeAArch64(uint64_t Type) { } } -bool skipRelocationTypeX86(uint64_t Type) { return Type == ELF::R_X86_64_NONE; } +static bool skipRelocationTypeX86(uint64_t Type) { + return Type == ELF::R_X86_64_NONE; +} -bool skipRelocationTypeAArch64(uint64_t Type) { +static bool skipRelocationTypeAArch64(uint64_t Type) { return Type == ELF::R_AARCH64_NONE || Type == ELF::R_AARCH64_LD_PREL_LO19; } -bool skipRelocationProcessX86(uint64_t &Type, uint64_t Contents) { +static bool skipRelocationProcessX86(uint64_t &Type, uint64_t Contents) { return false; } -bool skipRelocationProcessAArch64(uint64_t &Type, uint64_t Contents) { +static bool skipRelocationProcessAArch64(uint64_t &Type, uint64_t Contents) { auto IsMov = [](uint64_t Contents) -> bool { // The bits 28-23 are 0b100101 return (Contents & 0x1f800000) == 0x12800000; @@ -262,7 +262,7 @@ bool skipRelocationProcessAArch64(uint64_t &Type, uint64_t Contents) { return false; } -uint64_t adjustValueX86(uint64_t Type, uint64_t Value, uint64_t PC) { +static uint64_t adjustValueX86(uint64_t Type, uint64_t Value, uint64_t PC) { switch (Type) { default: llvm_unreachable("not supported relocation"); @@ -275,7 +275,7 @@ uint64_t adjustValueX86(uint64_t Type, uint64_t Value, uint64_t PC) { return Value; } -uint64_t adjustValueAArch64(uint64_t Type, uint64_t Value, uint64_t PC) { +static uint64_t adjustValueAArch64(uint64_t Type, uint64_t Value, uint64_t PC) { switch (Type) { default: llvm_unreachable("not supported relocation"); @@ -290,7 +290,7 @@ uint64_t adjustValueAArch64(uint64_t Type, uint64_t Value, uint64_t PC) { return Value; } -uint64_t extractValueX86(uint64_t Type, uint64_t Contents, uint64_t PC) { +static uint64_t extractValueX86(uint64_t Type, uint64_t Contents, uint64_t PC) { if (Type == ELF::R_X86_64_32S) return SignExtend64<32>(Contents); if (Relocation::isPCRelative(Type)) @@ -298,7 +298,8 @@ uint64_t extractValueX86(uint64_t Type, uint64_t Contents, uint64_t PC) { return Contents; } -uint64_t extractValueAArch64(uint64_t Type, uint64_t Contents, uint64_t PC) { +static uint64_t extractValueAArch64(uint64_t Type, uint64_t Contents, + uint64_t PC) { switch (Type) { default: errs() << object::getELFRelocationTypeName(ELF::EM_AARCH64, Type) << '\n'; @@ -405,7 +406,7 @@ uint64_t extractValueAArch64(uint64_t Type, uint64_t Contents, uint64_t PC) { } } -bool isGOTX86(uint64_t Type) { +static bool isGOTX86(uint64_t Type) { switch (Type) { default: return false; @@ -425,7 +426,7 @@ bool isGOTX86(uint64_t Type) { } } -bool isGOTAArch64(uint64_t Type) { +static bool isGOTAArch64(uint64_t Type) { switch (Type) { default: return false; @@ -442,7 +443,7 @@ bool isGOTAArch64(uint64_t Type) { } } -bool isTLSX86(uint64_t Type) { +static bool isTLSX86(uint64_t Type) { switch (Type) { default: return false; @@ -453,7 +454,7 @@ bool isTLSX86(uint64_t Type) { } } -bool isTLSAArch64(uint64_t Type) { +static bool isTLSAArch64(uint64_t Type) { switch (Type) { default: return false; @@ -470,7 +471,7 @@ bool isTLSAArch64(uint64_t Type) { } } -bool isPCRelativeX86(uint64_t Type) { +static bool isPCRelativeX86(uint64_t Type) { switch (Type) { default: llvm_unreachable("Unknown relocation type"); @@ -495,7 +496,7 @@ bool isPCRelativeX86(uint64_t Type) { } } -bool isPCRelativeAArch64(uint64_t Type) { +static bool isPCRelativeAArch64(uint64_t Type) { switch (Type) { default: llvm_unreachable("Unknown relocation type"); @@ -541,8 +542,6 @@ bool isPCRelativeAArch64(uint64_t Type) { } } -} // end anonymous namespace - bool Relocation::isSupported(uint64_t Type) { if (Arch == Triple::aarch64) return isSupportedAArch64(Type); diff --git a/bolt/lib/Passes/Aligner.cpp b/bolt/lib/Passes/Aligner.cpp index e55db5d..ef419bb 100644 --- a/bolt/lib/Passes/Aligner.cpp +++ b/bolt/lib/Passes/Aligner.cpp @@ -64,11 +64,9 @@ cl::opt namespace llvm { namespace bolt { -namespace { - // Align function to the specified byte-boundary (typically, 64) offsetting // the fuction by not more than the corresponding value -void alignMaxBytes(BinaryFunction &Function) { +static void alignMaxBytes(BinaryFunction &Function) { Function.setAlignment(opts::AlignFunctions); Function.setMaxAlignmentBytes(opts::AlignFunctionsMaxBytes); Function.setMaxColdAlignmentBytes(opts::AlignFunctionsMaxBytes); @@ -78,7 +76,8 @@ void alignMaxBytes(BinaryFunction &Function) { // the fuction by not more than the minimum over // -- the size of the function // -- the specified number of bytes -void alignCompact(BinaryFunction &Function, const MCCodeEmitter *Emitter) { +static void alignCompact(BinaryFunction &Function, + const MCCodeEmitter *Emitter) { const BinaryContext &BC = Function.getBinaryContext(); size_t HotSize = 0; size_t ColdSize = 0; @@ -101,8 +100,6 @@ void alignCompact(BinaryFunction &Function, const MCCodeEmitter *Emitter) { std::min(size_t(opts::AlignFunctionsMaxBytes), ColdSize)); } -} // end anonymous namespace - void AlignerPass::alignBlocks(BinaryFunction &Function, const MCCodeEmitter *Emitter) { if (!Function.hasValidProfile() || !Function.isSimple()) diff --git a/bolt/lib/Passes/AllocCombiner.cpp b/bolt/lib/Passes/AllocCombiner.cpp index dd7c14f..6d3f2a5 100644 --- a/bolt/lib/Passes/AllocCombiner.cpp +++ b/bolt/lib/Passes/AllocCombiner.cpp @@ -25,16 +25,14 @@ extern cl::opt FrameOptimization; namespace llvm { namespace bolt { -namespace { - -bool getStackAdjustmentSize(const BinaryContext &BC, const MCInst &Inst, - int64_t &Adjustment) { +static bool getStackAdjustmentSize(const BinaryContext &BC, const MCInst &Inst, + int64_t &Adjustment) { return BC.MIB->evaluateStackOffsetExpr( Inst, Adjustment, std::make_pair(BC.MIB->getStackPointer(), 0LL), std::make_pair(0, 0LL)); } -bool isIndifferentToSP(const MCInst &Inst, const BinaryContext &BC) { +static bool isIndifferentToSP(const MCInst &Inst, const BinaryContext &BC) { if (BC.MIB->isCFI(Inst)) return true; @@ -50,12 +48,12 @@ bool isIndifferentToSP(const MCInst &Inst, const BinaryContext &BC) { return true; } -bool shouldProcess(const BinaryFunction &Function) { +static bool shouldProcess(const BinaryFunction &Function) { return Function.isSimple() && Function.hasCFG() && !Function.isIgnored(); } -void runForAllWeCare(std::map &BFs, - std::function Task) { +static void runForAllWeCare(std::map &BFs, + std::function Task) { for (auto &It : BFs) { BinaryFunction &Function = It.second; if (shouldProcess(Function)) @@ -63,8 +61,6 @@ void runForAllWeCare(std::map &BFs, } } -} // end anonymous namespace - void AllocCombinerPass::combineAdjustments(BinaryFunction &BF) { BinaryContext &BC = BF.getBinaryContext(); for (BinaryBasicBlock &BB : BF) { diff --git a/bolt/lib/Passes/BinaryPasses.cpp b/bolt/lib/Passes/BinaryPasses.cpp index e50379b..377176e 100644 --- a/bolt/lib/Passes/BinaryPasses.cpp +++ b/bolt/lib/Passes/BinaryPasses.cpp @@ -26,9 +26,7 @@ using namespace llvm; using namespace bolt; -namespace { - -const char *dynoStatsOptName(const bolt::DynoStats::Category C) { +static const char *dynoStatsOptName(const bolt::DynoStats::Category C) { assert(C > bolt::DynoStats::FIRST_DYNO_STAT && C < DynoStats::LAST_DYNO_STAT && "Unexpected dyno stat category."); @@ -40,7 +38,6 @@ const char *dynoStatsOptName(const bolt::DynoStats::Category C) { return OptNames[C].c_str(); } -} namespace opts { @@ -629,8 +626,6 @@ void LowerAnnotations::runOnFunctions(BinaryContext &BC) { BC.MIB->setOffset(*Item.first, Item.second); } -namespace { - // This peephole fixes jump instructions that jump to another basic // block with a single jump instruction, e.g. // @@ -644,7 +639,7 @@ namespace { // B0: ... // jmp B2 (or jcc B2) // -uint64_t fixDoubleJumps(BinaryFunction &Function, bool MarkInvalid) { +static uint64_t fixDoubleJumps(BinaryFunction &Function, bool MarkInvalid) { uint64_t NumDoubleJumps = 0; MCContext *Ctx = Function.getBinaryContext().Ctx.get(); @@ -742,7 +737,6 @@ uint64_t fixDoubleJumps(BinaryFunction &Function, bool MarkInvalid) { return NumDoubleJumps; } -} // namespace bool SimplifyConditionalTailCalls::shouldRewriteBranch( const BinaryBasicBlock *PredBB, const MCInst &CondBranch, diff --git a/bolt/lib/Passes/CallGraph.cpp b/bolt/lib/Passes/CallGraph.cpp index 7fa6562..ee2ec26 100644 --- a/bolt/lib/Passes/CallGraph.cpp +++ b/bolt/lib/Passes/CallGraph.cpp @@ -22,9 +22,7 @@ # undef USE_SSECRC #endif -namespace { - -LLVM_ATTRIBUTE_UNUSED inline size_t hash_int64_fallback(int64_t k) { +static LLVM_ATTRIBUTE_UNUSED inline size_t hash_int64_fallback(int64_t k) { uint64_t key = (unsigned long long)k; // "64 bit Mix Functions", from Thomas Wang's "Integer Hash Function." // http://www.concentric.net/~ttwang/tech/inthash.htm @@ -37,7 +35,7 @@ LLVM_ATTRIBUTE_UNUSED inline size_t hash_int64_fallback(int64_t k) { return static_cast(static_cast(key)); } -LLVM_ATTRIBUTE_UNUSED inline size_t hash_int64(int64_t k) { +static LLVM_ATTRIBUTE_UNUSED inline size_t hash_int64(int64_t k) { #if defined(USE_SSECRC) && defined(__SSE4_2__) size_t h = 0; __asm("crc32q %1, %0\n" : "+r"(h) : "rm"(k)); @@ -47,7 +45,7 @@ LLVM_ATTRIBUTE_UNUSED inline size_t hash_int64(int64_t k) { #endif } -inline size_t hash_int64_pair(int64_t k1, int64_t k2) { +static inline size_t hash_int64_pair(int64_t k1, int64_t k2) { #if defined(USE_SSECRC) && defined(__SSE4_2__) // crc32 is commutative, so we need to perturb k1 so that (k1, k2) hashes // differently from (k2, k1). @@ -59,8 +57,6 @@ inline size_t hash_int64_pair(int64_t k1, int64_t k2) { #endif } -} - namespace llvm { namespace bolt { diff --git a/bolt/lib/Passes/IdenticalCodeFolding.cpp b/bolt/lib/Passes/IdenticalCodeFolding.cpp index fb71db0..9695eb6 100644 --- a/bolt/lib/Passes/IdenticalCodeFolding.cpp +++ b/bolt/lib/Passes/IdenticalCodeFolding.cpp @@ -43,14 +43,12 @@ TimeICF("time-icf", cl::cat(BoltOptCategory)); } // namespace opts -namespace { -using JumpTable = bolt::JumpTable; - /// Compare two jump tables in 2 functions. The function relies on consistent /// ordering of basic blocks in both binary functions (e.g. DFS). -bool equalJumpTables(const JumpTable &JumpTableA, const JumpTable &JumpTableB, - const BinaryFunction &FunctionA, - const BinaryFunction &FunctionB) { +static bool equalJumpTables(const JumpTable &JumpTableA, + const JumpTable &JumpTableB, + const BinaryFunction &FunctionA, + const BinaryFunction &FunctionB) { if (JumpTableA.EntrySize != JumpTableB.EntrySize) return false; @@ -92,9 +90,10 @@ bool equalJumpTables(const JumpTable &JumpTableA, const JumpTable &JumpTableB, /// given instruction of the given function. The functions should have /// identical CFG. template -bool isInstrEquivalentWith(const MCInst &InstA, const BinaryBasicBlock &BBA, - const MCInst &InstB, const BinaryBasicBlock &BBB, - Compare Comp) { +static bool isInstrEquivalentWith(const MCInst &InstA, + const BinaryBasicBlock &BBA, + const MCInst &InstB, + const BinaryBasicBlock &BBB, Compare Comp) { if (InstA.getOpcode() != InstB.getOpcode()) return false; @@ -148,8 +147,8 @@ bool isInstrEquivalentWith(const MCInst &InstA, const BinaryBasicBlock &BBA, /// If \p CongruentSymbols is set to true, then symbolic operands that reference /// potentially identical but different functions are ignored during the /// comparison. -bool isIdenticalWith(const BinaryFunction &A, const BinaryFunction &B, - bool CongruentSymbols) { +static bool isIdenticalWith(const BinaryFunction &A, const BinaryFunction &B, + bool CongruentSymbols) { assert(A.hasCFG() && B.hasCFG() && "both functions should have CFG"); // Compare the two functions, one basic block at a time. @@ -338,7 +337,7 @@ typedef std::unordered_map, KeyHash, KeyEqual> IdenticalBucketsMap; -std::string hashInteger(uint64_t Value) { +static std::string hashInteger(uint64_t Value) { std::string HashString; if (Value == 0) HashString.push_back(0); @@ -352,7 +351,7 @@ std::string hashInteger(uint64_t Value) { return HashString; } -std::string hashSymbol(BinaryContext &BC, const MCSymbol &Symbol) { +static std::string hashSymbol(BinaryContext &BC, const MCSymbol &Symbol) { std::string HashString; // Ignore function references. @@ -370,7 +369,7 @@ std::string hashSymbol(BinaryContext &BC, const MCSymbol &Symbol) { return HashString.append(hashInteger(*ErrorOrValue)); } -std::string hashExpr(BinaryContext &BC, const MCExpr &Expr) { +static std::string hashExpr(BinaryContext &BC, const MCExpr &Expr) { switch (Expr.getKind()) { case MCExpr::Constant: return hashInteger(cast(Expr).getValue()); @@ -394,7 +393,8 @@ std::string hashExpr(BinaryContext &BC, const MCExpr &Expr) { llvm_unreachable("invalid expression kind"); } -std::string hashInstOperand(BinaryContext &BC, const MCOperand &Operand) { +static std::string hashInstOperand(BinaryContext &BC, + const MCOperand &Operand) { if (Operand.isImm()) return hashInteger(Operand.getImm()); if (Operand.isReg()) @@ -405,8 +405,6 @@ std::string hashInstOperand(BinaryContext &BC, const MCOperand &Operand) { return std::string(); } -} // namespace - namespace llvm { namespace bolt { diff --git a/bolt/lib/Passes/Instrumentation.cpp b/bolt/lib/Passes/Instrumentation.cpp index 2e63576..c36a178 100644 --- a/bolt/lib/Passes/Instrumentation.cpp +++ b/bolt/lib/Passes/Instrumentation.cpp @@ -182,19 +182,16 @@ Instrumentation::createInstrumentationSnippet(BinaryContext &BC, bool IsLeaf) { return CounterInstrs; } -namespace { - // Helper instruction sequence insertion function -BinaryBasicBlock::iterator insertInstructions(InstructionListType &Instrs, - BinaryBasicBlock &BB, - BinaryBasicBlock::iterator Iter) { +static BinaryBasicBlock::iterator +insertInstructions(InstructionListType &Instrs, BinaryBasicBlock &BB, + BinaryBasicBlock::iterator Iter) { for (MCInst &NewInst : Instrs) { Iter = BB.insertInstruction(Iter, NewInst); ++Iter; } return Iter; } -} // namespace void Instrumentation::instrumentLeafNode(BinaryBasicBlock &BB, BinaryBasicBlock::iterator Iter, diff --git a/bolt/lib/Passes/LongJmp.cpp b/bolt/lib/Passes/LongJmp.cpp index 51308a4..31bb800 100644 --- a/bolt/lib/Passes/LongJmp.cpp +++ b/bolt/lib/Passes/LongJmp.cpp @@ -31,10 +31,9 @@ static cl::opt GroupStubs("group-stubs", namespace llvm { namespace bolt { -namespace { constexpr unsigned ColdFragAlign = 16; -void relaxStubToShortJmp(BinaryBasicBlock &StubBB, const MCSymbol *Tgt) { +static void relaxStubToShortJmp(BinaryBasicBlock &StubBB, const MCSymbol *Tgt) { const BinaryContext &BC = StubBB.getFunction()->getBinaryContext(); InstructionListType Seq; BC.MIB->createShortJmp(Seq, Tgt, BC.Ctx.get()); @@ -42,7 +41,7 @@ void relaxStubToShortJmp(BinaryBasicBlock &StubBB, const MCSymbol *Tgt) { StubBB.addInstructions(Seq.begin(), Seq.end()); } -void relaxStubToLongJmp(BinaryBasicBlock &StubBB, const MCSymbol *Tgt) { +static void relaxStubToLongJmp(BinaryBasicBlock &StubBB, const MCSymbol *Tgt) { const BinaryContext &BC = StubBB.getFunction()->getBinaryContext(); InstructionListType Seq; BC.MIB->createLongJmp(Seq, Tgt, BC.Ctx.get()); @@ -50,7 +49,7 @@ void relaxStubToLongJmp(BinaryBasicBlock &StubBB, const MCSymbol *Tgt) { StubBB.addInstructions(Seq.begin(), Seq.end()); } -BinaryBasicBlock *getBBAtHotColdSplitPoint(BinaryFunction &Func) { +static BinaryBasicBlock *getBBAtHotColdSplitPoint(BinaryFunction &Func) { if (!Func.isSplit() || Func.empty()) return nullptr; @@ -65,13 +64,11 @@ BinaryBasicBlock *getBBAtHotColdSplitPoint(BinaryFunction &Func) { llvm_unreachable("No hot-colt split point found"); } -bool shouldInsertStub(const BinaryContext &BC, const MCInst &Inst) { +static bool shouldInsertStub(const BinaryContext &BC, const MCInst &Inst) { return (BC.MIB->isBranch(Inst) || BC.MIB->isCall(Inst)) && !BC.MIB->isIndirectBranch(Inst) && !BC.MIB->isIndirectCall(Inst); } -} // end anonymous namespace - std::pair, MCSymbol *> LongJmpPass::createNewStub(BinaryBasicBlock &SourceBB, const MCSymbol *TgtSym, bool TgtIsFunc, uint64_t AtAddress) { -- 2.7.4