constexpr unsigned BinaryFunction::MinAlign;
-namespace {
-
-template <typename R> bool emptyRange(const R &Range) {
+template <typename R> static bool emptyRange(const R &Range) {
return Range.begin() == Range.end();
}
/// to point to this information, which is represented by a
/// DebugLineTableRowRef. The returned pointer is null if no debug line
/// information for this instruction was found.
-SMLoc findDebugLineInformationForInstructionAt(
+static SMLoc findDebugLineInformationForInstructionAt(
uint64_t Address, DWARFUnit *Unit,
const DWARFDebugLine::LineTable *LineTable) {
// We use the pointer in SMLoc to store an instance of DebugLineTableRowRef,
return SMLoc::getFromPointer(Ptr);
}
-std::string buildSectionName(StringRef Prefix, StringRef Name,
- const BinaryContext &BC) {
+static std::string buildSectionName(StringRef Prefix, StringRef Name,
+ const BinaryContext &BC) {
if (BC.isELF())
return (Prefix + Name).str();
static NameShortener NS;
return (Prefix + Twine(NS.getID(Name))).str();
}
-raw_ostream &operator<<(raw_ostream &OS, const BinaryFunction::State State) {
+static raw_ostream &operator<<(raw_ostream &OS,
+ const BinaryFunction::State State) {
switch (State) {
case BinaryFunction::State::Empty: OS << "empty"; break;
case BinaryFunction::State::Disassembled: OS << "disassembled"; break;
return OS;
}
-} // namespace
-
std::string BinaryFunction::buildCodeSectionName(StringRef Name,
const BinaryContext &BC) {
return buildSectionName(BC.isELF() ? ".local.text." : ".l.text.", Name, BC);
}
}
-namespace {
-std::string mutateDWARFExpressionTargetReg(const MCCFIInstruction &Instr,
- MCPhysReg NewReg) {
+static std::string mutateDWARFExpressionTargetReg(const MCCFIInstruction &Instr,
+ MCPhysReg NewReg) {
StringRef ExprBytes = Instr.getValues();
assert(ExprBytes.size() > 1 && "DWARF expression CFI is too short");
uint8_t Opcode = ExprBytes[0];
.concat(ExprBytes.drop_front(1 + Size))
.str();
}
-} // namespace
void BinaryFunction::mutateCFIRegisterFor(const MCInst &Instr,
MCPhysReg NewReg) {
}
}
-namespace {
-
#ifndef MAX_PATH
#define MAX_PATH 255
#endif
-std::string constructFilename(std::string Filename, std::string Annotation,
- std::string Suffix) {
+static std::string constructFilename(std::string Filename,
+ std::string Annotation,
+ std::string Suffix) {
std::replace(Filename.begin(), Filename.end(), '/', '-');
if (!Annotation.empty())
Annotation.insert(0, "-");
return Filename;
}
-std::string formatEscapes(const std::string &Str) {
+static std::string formatEscapes(const std::string &Str) {
std::string Result;
for (unsigned I = 0; I < Str.size(); ++I) {
char C = Str[I];
return Result;
}
-} // namespace
-
void BinaryFunction::dumpGraph(raw_ostream &OS) const {
OS << "digraph \"" << getPrintName() << "\" {\n"
<< "node [fontname=courier, shape=box, style=filled, colorscheme=brbg9]\n";
const DebugLineTableRowRef DebugLineTableRowRef::NULL_ROW{0, 0};
-namespace {
-
LLVM_ATTRIBUTE_UNUSED
static void printLE64(const std::string &S) {
for (uint32_t I = 0, Size = S.size(); I < Size; ++I) {
// the form (begin address, range size), otherwise (begin address, end address).
// Terminates the list by writing a pair of two zeroes.
// Returns the number of written bytes.
-uint64_t writeAddressRanges(raw_svector_ostream &Stream,
- const DebugAddressRangesVector &AddressRanges,
- const bool WriteRelativeRanges = false) {
+static uint64_t
+writeAddressRanges(raw_svector_ostream &Stream,
+ const DebugAddressRangesVector &AddressRanges,
+ const bool WriteRelativeRanges = false) {
for (const DebugAddressRange &Range : AddressRanges) {
support::endian::write(Stream, Range.LowPC, support::little);
support::endian::write(
return AddressRanges.size() * 16 + 16;
}
-} // namespace
-
DebugRangesSectionWriter::DebugRangesSectionWriter() {
RangesBuffer = std::make_unique<DebugBufferVector>();
RangesStream = std::make_unique<raw_svector_ostream>(*RangesBuffer);
Triple::ArchType Relocation::Arch;
-namespace {
-
-bool isSupportedX86(uint64_t Type) {
+static bool isSupportedX86(uint64_t Type) {
switch (Type) {
default:
return false;
}
}
-bool isSupportedAArch64(uint64_t Type) {
+static bool isSupportedAArch64(uint64_t Type) {
switch (Type) {
default:
return false;
}
}
-size_t getSizeForTypeX86(uint64_t Type) {
+static size_t getSizeForTypeX86(uint64_t Type) {
switch (Type) {
default:
errs() << object::getELFRelocationTypeName(ELF::EM_X86_64, Type) << '\n';
}
}
-size_t getSizeForTypeAArch64(uint64_t Type) {
+static size_t getSizeForTypeAArch64(uint64_t Type) {
switch (Type) {
default:
errs() << object::getELFRelocationTypeName(ELF::EM_AARCH64, Type) << '\n';
}
}
-bool skipRelocationTypeX86(uint64_t Type) { return Type == ELF::R_X86_64_NONE; }
+static bool skipRelocationTypeX86(uint64_t Type) {
+ return Type == ELF::R_X86_64_NONE;
+}
-bool skipRelocationTypeAArch64(uint64_t Type) {
+static bool skipRelocationTypeAArch64(uint64_t Type) {
return Type == ELF::R_AARCH64_NONE || Type == ELF::R_AARCH64_LD_PREL_LO19;
}
-bool skipRelocationProcessX86(uint64_t &Type, uint64_t Contents) {
+static bool skipRelocationProcessX86(uint64_t &Type, uint64_t Contents) {
return false;
}
-bool skipRelocationProcessAArch64(uint64_t &Type, uint64_t Contents) {
+static bool skipRelocationProcessAArch64(uint64_t &Type, uint64_t Contents) {
auto IsMov = [](uint64_t Contents) -> bool {
// The bits 28-23 are 0b100101
return (Contents & 0x1f800000) == 0x12800000;
return false;
}
-uint64_t adjustValueX86(uint64_t Type, uint64_t Value, uint64_t PC) {
+static uint64_t adjustValueX86(uint64_t Type, uint64_t Value, uint64_t PC) {
switch (Type) {
default:
llvm_unreachable("not supported relocation");
return Value;
}
-uint64_t adjustValueAArch64(uint64_t Type, uint64_t Value, uint64_t PC) {
+static uint64_t adjustValueAArch64(uint64_t Type, uint64_t Value, uint64_t PC) {
switch (Type) {
default:
llvm_unreachable("not supported relocation");
return Value;
}
-uint64_t extractValueX86(uint64_t Type, uint64_t Contents, uint64_t PC) {
+static uint64_t extractValueX86(uint64_t Type, uint64_t Contents, uint64_t PC) {
if (Type == ELF::R_X86_64_32S)
return SignExtend64<32>(Contents);
if (Relocation::isPCRelative(Type))
return Contents;
}
-uint64_t extractValueAArch64(uint64_t Type, uint64_t Contents, uint64_t PC) {
+static uint64_t extractValueAArch64(uint64_t Type, uint64_t Contents,
+ uint64_t PC) {
switch (Type) {
default:
errs() << object::getELFRelocationTypeName(ELF::EM_AARCH64, Type) << '\n';
}
}
-bool isGOTX86(uint64_t Type) {
+static bool isGOTX86(uint64_t Type) {
switch (Type) {
default:
return false;
}
}
-bool isGOTAArch64(uint64_t Type) {
+static bool isGOTAArch64(uint64_t Type) {
switch (Type) {
default:
return false;
}
}
-bool isTLSX86(uint64_t Type) {
+static bool isTLSX86(uint64_t Type) {
switch (Type) {
default:
return false;
}
}
-bool isTLSAArch64(uint64_t Type) {
+static bool isTLSAArch64(uint64_t Type) {
switch (Type) {
default:
return false;
}
}
-bool isPCRelativeX86(uint64_t Type) {
+static bool isPCRelativeX86(uint64_t Type) {
switch (Type) {
default:
llvm_unreachable("Unknown relocation type");
}
}
-bool isPCRelativeAArch64(uint64_t Type) {
+static bool isPCRelativeAArch64(uint64_t Type) {
switch (Type) {
default:
llvm_unreachable("Unknown relocation type");
}
}
-} // end anonymous namespace
-
bool Relocation::isSupported(uint64_t Type) {
if (Arch == Triple::aarch64)
return isSupportedAArch64(Type);
namespace llvm {
namespace bolt {
-namespace {
-
// Align function to the specified byte-boundary (typically, 64) offsetting
// the fuction by not more than the corresponding value
-void alignMaxBytes(BinaryFunction &Function) {
+static void alignMaxBytes(BinaryFunction &Function) {
Function.setAlignment(opts::AlignFunctions);
Function.setMaxAlignmentBytes(opts::AlignFunctionsMaxBytes);
Function.setMaxColdAlignmentBytes(opts::AlignFunctionsMaxBytes);
// the fuction by not more than the minimum over
// -- the size of the function
// -- the specified number of bytes
-void alignCompact(BinaryFunction &Function, const MCCodeEmitter *Emitter) {
+static void alignCompact(BinaryFunction &Function,
+ const MCCodeEmitter *Emitter) {
const BinaryContext &BC = Function.getBinaryContext();
size_t HotSize = 0;
size_t ColdSize = 0;
std::min(size_t(opts::AlignFunctionsMaxBytes), ColdSize));
}
-} // end anonymous namespace
-
void AlignerPass::alignBlocks(BinaryFunction &Function,
const MCCodeEmitter *Emitter) {
if (!Function.hasValidProfile() || !Function.isSimple())
namespace llvm {
namespace bolt {
-namespace {
-
-bool getStackAdjustmentSize(const BinaryContext &BC, const MCInst &Inst,
- int64_t &Adjustment) {
+static bool getStackAdjustmentSize(const BinaryContext &BC, const MCInst &Inst,
+ int64_t &Adjustment) {
return BC.MIB->evaluateStackOffsetExpr(
Inst, Adjustment, std::make_pair(BC.MIB->getStackPointer(), 0LL),
std::make_pair(0, 0LL));
}
-bool isIndifferentToSP(const MCInst &Inst, const BinaryContext &BC) {
+static bool isIndifferentToSP(const MCInst &Inst, const BinaryContext &BC) {
if (BC.MIB->isCFI(Inst))
return true;
return true;
}
-bool shouldProcess(const BinaryFunction &Function) {
+static bool shouldProcess(const BinaryFunction &Function) {
return Function.isSimple() && Function.hasCFG() && !Function.isIgnored();
}
-void runForAllWeCare(std::map<uint64_t, BinaryFunction> &BFs,
- std::function<void(BinaryFunction &)> Task) {
+static void runForAllWeCare(std::map<uint64_t, BinaryFunction> &BFs,
+ std::function<void(BinaryFunction &)> Task) {
for (auto &It : BFs) {
BinaryFunction &Function = It.second;
if (shouldProcess(Function))
}
}
-} // end anonymous namespace
-
void AllocCombinerPass::combineAdjustments(BinaryFunction &BF) {
BinaryContext &BC = BF.getBinaryContext();
for (BinaryBasicBlock &BB : BF) {
using namespace llvm;
using namespace bolt;
-namespace {
-
-const char *dynoStatsOptName(const bolt::DynoStats::Category C) {
+static const char *dynoStatsOptName(const bolt::DynoStats::Category C) {
assert(C > bolt::DynoStats::FIRST_DYNO_STAT &&
C < DynoStats::LAST_DYNO_STAT && "Unexpected dyno stat category.");
return OptNames[C].c_str();
}
-}
namespace opts {
BC.MIB->setOffset(*Item.first, Item.second);
}
-namespace {
-
// This peephole fixes jump instructions that jump to another basic
// block with a single jump instruction, e.g.
//
// B0: ...
// jmp B2 (or jcc B2)
//
-uint64_t fixDoubleJumps(BinaryFunction &Function, bool MarkInvalid) {
+static uint64_t fixDoubleJumps(BinaryFunction &Function, bool MarkInvalid) {
uint64_t NumDoubleJumps = 0;
MCContext *Ctx = Function.getBinaryContext().Ctx.get();
return NumDoubleJumps;
}
-} // namespace
bool SimplifyConditionalTailCalls::shouldRewriteBranch(
const BinaryBasicBlock *PredBB, const MCInst &CondBranch,
# undef USE_SSECRC
#endif
-namespace {
-
-LLVM_ATTRIBUTE_UNUSED inline size_t hash_int64_fallback(int64_t k) {
+static LLVM_ATTRIBUTE_UNUSED inline size_t hash_int64_fallback(int64_t k) {
uint64_t key = (unsigned long long)k;
// "64 bit Mix Functions", from Thomas Wang's "Integer Hash Function."
// http://www.concentric.net/~ttwang/tech/inthash.htm
return static_cast<size_t>(static_cast<uint32_t>(key));
}
-LLVM_ATTRIBUTE_UNUSED inline size_t hash_int64(int64_t k) {
+static LLVM_ATTRIBUTE_UNUSED inline size_t hash_int64(int64_t k) {
#if defined(USE_SSECRC) && defined(__SSE4_2__)
size_t h = 0;
__asm("crc32q %1, %0\n" : "+r"(h) : "rm"(k));
#endif
}
-inline size_t hash_int64_pair(int64_t k1, int64_t k2) {
+static inline size_t hash_int64_pair(int64_t k1, int64_t k2) {
#if defined(USE_SSECRC) && defined(__SSE4_2__)
// crc32 is commutative, so we need to perturb k1 so that (k1, k2) hashes
// differently from (k2, k1).
#endif
}
-}
-
namespace llvm {
namespace bolt {
cl::cat(BoltOptCategory));
} // namespace opts
-namespace {
-using JumpTable = bolt::JumpTable;
-
/// Compare two jump tables in 2 functions. The function relies on consistent
/// ordering of basic blocks in both binary functions (e.g. DFS).
-bool equalJumpTables(const JumpTable &JumpTableA, const JumpTable &JumpTableB,
- const BinaryFunction &FunctionA,
- const BinaryFunction &FunctionB) {
+static bool equalJumpTables(const JumpTable &JumpTableA,
+ const JumpTable &JumpTableB,
+ const BinaryFunction &FunctionA,
+ const BinaryFunction &FunctionB) {
if (JumpTableA.EntrySize != JumpTableB.EntrySize)
return false;
/// given instruction of the given function. The functions should have
/// identical CFG.
template <class Compare>
-bool isInstrEquivalentWith(const MCInst &InstA, const BinaryBasicBlock &BBA,
- const MCInst &InstB, const BinaryBasicBlock &BBB,
- Compare Comp) {
+static bool isInstrEquivalentWith(const MCInst &InstA,
+ const BinaryBasicBlock &BBA,
+ const MCInst &InstB,
+ const BinaryBasicBlock &BBB, Compare Comp) {
if (InstA.getOpcode() != InstB.getOpcode())
return false;
/// If \p CongruentSymbols is set to true, then symbolic operands that reference
/// potentially identical but different functions are ignored during the
/// comparison.
-bool isIdenticalWith(const BinaryFunction &A, const BinaryFunction &B,
- bool CongruentSymbols) {
+static bool isIdenticalWith(const BinaryFunction &A, const BinaryFunction &B,
+ bool CongruentSymbols) {
assert(A.hasCFG() && B.hasCFG() && "both functions should have CFG");
// Compare the two functions, one basic block at a time.
KeyHash, KeyEqual>
IdenticalBucketsMap;
-std::string hashInteger(uint64_t Value) {
+static std::string hashInteger(uint64_t Value) {
std::string HashString;
if (Value == 0)
HashString.push_back(0);
return HashString;
}
-std::string hashSymbol(BinaryContext &BC, const MCSymbol &Symbol) {
+static std::string hashSymbol(BinaryContext &BC, const MCSymbol &Symbol) {
std::string HashString;
// Ignore function references.
return HashString.append(hashInteger(*ErrorOrValue));
}
-std::string hashExpr(BinaryContext &BC, const MCExpr &Expr) {
+static std::string hashExpr(BinaryContext &BC, const MCExpr &Expr) {
switch (Expr.getKind()) {
case MCExpr::Constant:
return hashInteger(cast<MCConstantExpr>(Expr).getValue());
llvm_unreachable("invalid expression kind");
}
-std::string hashInstOperand(BinaryContext &BC, const MCOperand &Operand) {
+static std::string hashInstOperand(BinaryContext &BC,
+ const MCOperand &Operand) {
if (Operand.isImm())
return hashInteger(Operand.getImm());
if (Operand.isReg())
return std::string();
}
-} // namespace
-
namespace llvm {
namespace bolt {
return CounterInstrs;
}
-namespace {
-
// Helper instruction sequence insertion function
-BinaryBasicBlock::iterator insertInstructions(InstructionListType &Instrs,
- BinaryBasicBlock &BB,
- BinaryBasicBlock::iterator Iter) {
+static BinaryBasicBlock::iterator
+insertInstructions(InstructionListType &Instrs, BinaryBasicBlock &BB,
+ BinaryBasicBlock::iterator Iter) {
for (MCInst &NewInst : Instrs) {
Iter = BB.insertInstruction(Iter, NewInst);
++Iter;
}
return Iter;
}
-} // namespace
void Instrumentation::instrumentLeafNode(BinaryBasicBlock &BB,
BinaryBasicBlock::iterator Iter,
namespace llvm {
namespace bolt {
-namespace {
constexpr unsigned ColdFragAlign = 16;
-void relaxStubToShortJmp(BinaryBasicBlock &StubBB, const MCSymbol *Tgt) {
+static void relaxStubToShortJmp(BinaryBasicBlock &StubBB, const MCSymbol *Tgt) {
const BinaryContext &BC = StubBB.getFunction()->getBinaryContext();
InstructionListType Seq;
BC.MIB->createShortJmp(Seq, Tgt, BC.Ctx.get());
StubBB.addInstructions(Seq.begin(), Seq.end());
}
-void relaxStubToLongJmp(BinaryBasicBlock &StubBB, const MCSymbol *Tgt) {
+static void relaxStubToLongJmp(BinaryBasicBlock &StubBB, const MCSymbol *Tgt) {
const BinaryContext &BC = StubBB.getFunction()->getBinaryContext();
InstructionListType Seq;
BC.MIB->createLongJmp(Seq, Tgt, BC.Ctx.get());
StubBB.addInstructions(Seq.begin(), Seq.end());
}
-BinaryBasicBlock *getBBAtHotColdSplitPoint(BinaryFunction &Func) {
+static BinaryBasicBlock *getBBAtHotColdSplitPoint(BinaryFunction &Func) {
if (!Func.isSplit() || Func.empty())
return nullptr;
llvm_unreachable("No hot-colt split point found");
}
-bool shouldInsertStub(const BinaryContext &BC, const MCInst &Inst) {
+static bool shouldInsertStub(const BinaryContext &BC, const MCInst &Inst) {
return (BC.MIB->isBranch(Inst) || BC.MIB->isCall(Inst)) &&
!BC.MIB->isIndirectBranch(Inst) && !BC.MIB->isIndirectCall(Inst);
}
-} // end anonymous namespace
-
std::pair<std::unique_ptr<BinaryBasicBlock>, MCSymbol *>
LongJmpPass::createNewStub(BinaryBasicBlock &SourceBB, const MCSymbol *TgtSym,
bool TgtIsFunc, uint64_t AtAddress) {