From c3c9e45312874ff890723f54cabfd41e43b2dbc4 Mon Sep 17 00:00:00 2001 From: Fangrui Song Date: Mon, 21 Nov 2022 04:12:03 +0000 Subject: [PATCH] [ELF] Add InputSectionBase::{addRelocs,relocs} and GotSection::addConstant to add/access relocations to prepare for changing `relocations` from a SmallVector to a pointer. Also change the `isec` parameter in `addAddendOnlyRelocIfNonPreemptible` to `GotSection &`. --- lld/ELF/AArch64ErrataFix.cpp | 11 ++++---- lld/ELF/ARMErrataFix.cpp | 11 ++++---- lld/ELF/Arch/AArch64.cpp | 10 +++---- lld/ELF/Arch/PPC.cpp | 2 +- lld/ELF/Arch/PPC64.cpp | 2 +- lld/ELF/Arch/RISCV.cpp | 25 ++++++++-------- lld/ELF/Arch/X86.cpp | 2 +- lld/ELF/Arch/X86_64.cpp | 14 ++++----- lld/ELF/InputSection.cpp | 8 +++--- lld/ELF/InputSection.h | 4 +++ lld/ELF/Relocations.cpp | 66 ++++++++++++++++++++----------------------- lld/ELF/SyntheticSections.cpp | 7 +++-- lld/ELF/SyntheticSections.h | 7 ++--- lld/ELF/Target.cpp | 2 +- 14 files changed, 83 insertions(+), 88 deletions(-) diff --git a/lld/ELF/AArch64ErrataFix.cpp b/lld/ELF/AArch64ErrataFix.cpp index 9038d6d..966013a 100644 --- a/lld/ELF/AArch64ErrataFix.cpp +++ b/lld/ELF/AArch64ErrataFix.cpp @@ -544,10 +544,10 @@ static void implementPatch(uint64_t adrpAddr, uint64_t patcheeOffset, // and replace the relocation with a R_AARCH_JUMP26 branch relocation. // Case 4: No relocation. We must create a new R_AARCH64_JUMP26 branch // relocation at the offset. - auto relIt = llvm::find_if(isec->relocations, [=](const Relocation &r) { + auto relIt = llvm::find_if(isec->relocs(), [=](const Relocation &r) { return r.offset == patcheeOffset; }); - if (relIt != isec->relocations.end() && + if (relIt != isec->relocs().end() && (relIt->type == R_AARCH64_JUMP26 || relIt->expr == R_RELAX_TLS_IE_TO_LE)) return; @@ -561,12 +561,11 @@ static void implementPatch(uint64_t adrpAddr, uint64_t patcheeOffset, return Relocation{R_PC, R_AARCH64_JUMP26, offset, 0, patchSym}; }; - if (relIt != isec->relocations.end()) { - ps->relocations.push_back( - {relIt->expr, relIt->type, 0, relIt->addend, relIt->sym}); + if (relIt != isec->relocs().end()) { + ps->addReloc({relIt->expr, relIt->type, 0, relIt->addend, relIt->sym}); *relIt = makeRelToPatch(patcheeOffset, ps->patchSym); } else - isec->relocations.push_back(makeRelToPatch(patcheeOffset, ps->patchSym)); + isec->addReloc(makeRelToPatch(patcheeOffset, ps->patchSym)); } // Scan all the instructions in InputSectionDescription, for each instance of diff --git a/lld/ELF/ARMErrataFix.cpp b/lld/ELF/ARMErrataFix.cpp index a92fc76..985a8e8 100644 --- a/lld/ELF/ARMErrataFix.cpp +++ b/lld/ELF/ARMErrataFix.cpp @@ -181,7 +181,7 @@ void Patch657417Section::writeTo(uint8_t *buf) { else write32le(buf, 0x9000f000); // If we have a relocation then apply it. - if (!relocations.empty()) { + if (!relocs().empty()) { target->relocateAlloc(*this, buf); return; } @@ -281,12 +281,12 @@ static ScanResult scanCortexA8Errata657417(InputSection *isec, uint64_t &off, // Find a relocation for the branch if it exists. This will be used // to determine the target. uint64_t branchOff = off + 4; - auto relIt = llvm::find_if(isec->relocations, [=](const Relocation &r) { + auto relIt = llvm::find_if(isec->relocs(), [=](const Relocation &r) { return r.offset == branchOff && (r.type == R_ARM_THM_JUMP19 || r.type == R_ARM_THM_JUMP24 || r.type == R_ARM_THM_CALL); }); - if (relIt != isec->relocations.end()) + if (relIt != isec->relocs().end()) scanRes.rel = &(*relIt); if (branchDestInFirstRegion(isec, branchOff, instr2, scanRes.rel)) { if (patchInRange(isec, branchOff, instr2)) { @@ -451,7 +451,7 @@ static void implementPatch(ScanResult sr, InputSection *isec, patchRelType = R_ARM_JUMP24; patchRelAddend -= 4; } - psec->relocations.push_back( + psec->addReloc( Relocation{sr.rel->expr, patchRelType, 0, patchRelAddend, sr.rel->sym}); // Redirect the existing branch relocation to the patch. sr.rel->expr = R_PC; @@ -470,8 +470,7 @@ static void implementPatch(ScanResult sr, InputSection *isec, type = R_ARM_THM_JUMP24; else type = R_ARM_THM_CALL; - isec->relocations.push_back( - Relocation{R_PC, type, sr.off, -4, psec->patchSym}); + isec->addReloc(Relocation{R_PC, type, sr.off, -4, psec->patchSym}); } patches.push_back(psec); } diff --git a/lld/ELF/Arch/AArch64.cpp b/lld/ELF/Arch/AArch64.cpp index 111292b..0f5c593 100644 --- a/lld/ELF/Arch/AArch64.cpp +++ b/lld/ELF/Arch/AArch64.cpp @@ -747,9 +747,9 @@ void AArch64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const { uint64_t secAddr = sec.getOutputSection()->addr; if (auto *s = dyn_cast(&sec)) secAddr += s->outSecOff; - AArch64Relaxer relaxer(sec.relocations); - for (size_t i = 0, size = sec.relocations.size(); i != size; ++i) { - const Relocation &rel = sec.relocations[i]; + AArch64Relaxer relaxer(sec.relocs()); + for (size_t i = 0, size = sec.relocs().size(); i != size; ++i) { + const Relocation &rel = sec.relocs()[i]; uint8_t *loc = buf + rel.offset; const uint64_t val = sec.getRelocTargetVA(sec.file, rel.type, rel.addend, @@ -757,14 +757,14 @@ void AArch64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const { switch (rel.expr) { case R_AARCH64_GOT_PAGE_PC: if (i + 1 < size && - relaxer.tryRelaxAdrpLdr(rel, sec.relocations[i + 1], secAddr, buf)) { + relaxer.tryRelaxAdrpLdr(rel, sec.relocs()[i + 1], secAddr, buf)) { ++i; continue; } break; case R_AARCH64_PAGE_PC: if (i + 1 < size && - relaxer.tryRelaxAdrpAdd(rel, sec.relocations[i + 1], secAddr, buf)) { + relaxer.tryRelaxAdrpAdd(rel, sec.relocs()[i + 1], secAddr, buf)) { ++i; continue; } diff --git a/lld/ELF/Arch/PPC.cpp b/lld/ELF/Arch/PPC.cpp index cbf2545..87942c1 100644 --- a/lld/ELF/Arch/PPC.cpp +++ b/lld/ELF/Arch/PPC.cpp @@ -486,7 +486,7 @@ void PPC::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const { uint64_t secAddr = sec.getOutputSection()->addr; if (auto *s = dyn_cast(&sec)) secAddr += s->outSecOff; - for (const Relocation &rel : sec.relocations) { + for (const Relocation &rel : sec.relocs()) { uint8_t *loc = buf + rel.offset; const uint64_t val = SignExtend64( sec.getRelocTargetVA(sec.file, rel.type, rel.addend, diff --git a/lld/ELF/Arch/PPC64.cpp b/lld/ELF/Arch/PPC64.cpp index 901e69b..d9a6d33 100644 --- a/lld/ELF/Arch/PPC64.cpp +++ b/lld/ELF/Arch/PPC64.cpp @@ -1518,7 +1518,7 @@ void PPC64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const { if (auto *s = dyn_cast(&sec)) secAddr += s->outSecOff; uint64_t lastPPCRelaxedRelocOff = -1; - for (const Relocation &rel : sec.relocations) { + for (const Relocation &rel : sec.relocs()) { uint8_t *loc = buf + rel.offset; const uint64_t val = sec.getRelocTargetVA(sec.file, rel.type, rel.addend, diff --git a/lld/ELF/Arch/RISCV.cpp b/lld/ELF/Arch/RISCV.cpp index 3074455..c339f89 100644 --- a/lld/ELF/Arch/RISCV.cpp +++ b/lld/ELF/Arch/RISCV.cpp @@ -513,11 +513,11 @@ static void initSymbolAnchors() { continue; for (InputSection *sec : getInputSections(*osec, storage)) { sec->relaxAux = make(); - if (sec->relocations.size()) { + if (sec->relocs().size()) { sec->relaxAux->relocDeltas = - std::make_unique(sec->relocations.size()); + std::make_unique(sec->relocs().size()); sec->relaxAux->relocTypes = - std::make_unique(sec->relocations.size()); + std::make_unique(sec->relocs().size()); } } } @@ -617,7 +617,7 @@ static bool relax(InputSection &sec) { DenseMap valueDelta; ArrayRef sa = makeArrayRef(aux.anchors); uint32_t delta = 0; - for (auto [i, r] : llvm::enumerate(sec.relocations)) { + for (auto [i, r] : llvm::enumerate(sec.relocs())) { for (; sa.size() && sa[0].offset <= r.offset; sa = sa.slice(1)) if (!sa[0].end) valueDelta[sa[0].d] = delta; @@ -629,9 +629,9 @@ static bool relax(InputSection &sec) { sa = makeArrayRef(aux.anchors); delta = 0; - std::fill_n(aux.relocTypes.get(), sec.relocations.size(), R_RISCV_NONE); + std::fill_n(aux.relocTypes.get(), sec.relocs().size(), R_RISCV_NONE); aux.writes.clear(); - for (auto [i, r] : llvm::enumerate(sec.relocations)) { + for (auto [i, r] : llvm::enumerate(sec.relocs())) { const uint64_t loc = secAddr + r.offset - delta; uint32_t &cur = aux.relocDeltas[i], remove = 0; switch (r.type) { @@ -646,16 +646,16 @@ static bool relax(InputSection &sec) { } case R_RISCV_CALL: case R_RISCV_CALL_PLT: - if (i + 1 != sec.relocations.size() && - sec.relocations[i + 1].type == R_RISCV_RELAX) + if (i + 1 != sec.relocs().size() && + sec.relocs()[i + 1].type == R_RISCV_RELAX) relaxCall(sec, i, loc, r, remove); break; case R_RISCV_TPREL_HI20: case R_RISCV_TPREL_ADD: case R_RISCV_TPREL_LO12_I: case R_RISCV_TPREL_LO12_S: - if (i + 1 != sec.relocations.size() && - sec.relocations[i + 1].type == R_RISCV_RELAX) + if (i + 1 != sec.relocs().size() && + sec.relocs()[i + 1].type == R_RISCV_RELAX) relaxTlsLe(sec, i, loc, r, remove); break; } @@ -727,10 +727,9 @@ void elf::riscvFinalizeRelax(int passes) { if (!aux.relocDeltas) continue; - auto &rels = sec->relocations; + MutableArrayRef rels = sec->relocs(); ArrayRef old = sec->content(); - size_t newSize = - old.size() - aux.relocDeltas[sec->relocations.size() - 1]; + size_t newSize = old.size() - aux.relocDeltas[rels.size() - 1]; size_t writesIdx = 0; uint8_t *p = context().bAlloc.Allocate(newSize); uint64_t offset = 0; diff --git a/lld/ELF/Arch/X86.cpp b/lld/ELF/Arch/X86.cpp index 79ad7db..eb068f4 100644 --- a/lld/ELF/Arch/X86.cpp +++ b/lld/ELF/Arch/X86.cpp @@ -472,7 +472,7 @@ void X86::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const { uint64_t secAddr = sec.getOutputSection()->addr; if (auto *s = dyn_cast(&sec)) secAddr += s->outSecOff; - for (const Relocation &rel : sec.relocations) { + for (const Relocation &rel : sec.relocs()) { uint8_t *loc = buf + rel.offset; const uint64_t val = SignExtend64( sec.getRelocTargetVA(sec.file, rel.type, rel.addend, diff --git a/lld/ELF/Arch/X86_64.cpp b/lld/ELF/Arch/X86_64.cpp index 74456d5..349ccd2 100644 --- a/lld/ELF/Arch/X86_64.cpp +++ b/lld/ELF/Arch/X86_64.cpp @@ -151,9 +151,9 @@ static JmpInsnOpcode getJmpInsnType(const uint8_t *first, // Returns the maximum size of the vector if no such relocation is found. static unsigned getRelocationWithOffset(const InputSection &is, uint64_t offset) { - unsigned size = is.relocations.size(); + unsigned size = is.relocs().size(); for (unsigned i = size - 1; i + 1 > 0; --i) { - if (is.relocations[i].offset == offset && is.relocations[i].expr != R_NONE) + if (is.relocs()[i].offset == offset && is.relocs()[i].expr != R_NONE) return i; } return size; @@ -247,10 +247,10 @@ bool X86_64::deleteFallThruJmpInsn(InputSection &is, InputFile *file, // If this jmp insn can be removed, it is the last insn and the // relocation is 4 bytes before the end. unsigned rIndex = getRelocationWithOffset(is, is.getSize() - 4); - if (rIndex == is.relocations.size()) + if (rIndex == is.relocs().size()) return false; - Relocation &r = is.relocations[rIndex]; + Relocation &r = is.relocs()[rIndex]; // Check if the relocation corresponds to a direct jmp. const uint8_t *secContents = is.content().data(); @@ -275,10 +275,10 @@ bool X86_64::deleteFallThruJmpInsn(InputSection &is, InputFile *file, unsigned rbIndex = getRelocationWithOffset(is, (is.getSize() - sizeOfDirectJmpInsn - 4)); - if (rbIndex == is.relocations.size()) + if (rbIndex == is.relocs().size()) return false; - Relocation &rB = is.relocations[rbIndex]; + Relocation &rB = is.relocs()[rbIndex]; const uint8_t *jmpInsnB = secContents + rB.offset - 1; JmpInsnOpcode jmpOpcodeB = getJmpInsnType(jmpInsnB - 1, jmpInsnB); @@ -989,7 +989,7 @@ void X86_64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const { uint64_t secAddr = sec.getOutputSection()->addr; if (auto *s = dyn_cast(&sec)) secAddr += s->outSecOff; - for (const Relocation &rel : sec.relocations) { + for (const Relocation &rel : sec.relocs()) { if (rel.expr == R_NONE) // See deleteFallThruJmpInsn continue; uint8_t *loc = buf + rel.offset; diff --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp index 5238639..a858633 100644 --- a/lld/ELF/InputSection.cpp +++ b/lld/ELF/InputSection.cpp @@ -432,7 +432,7 @@ void InputSection::copyRelocations(uint8_t *buf, ArrayRef rels) { if (RelTy::IsRela) p->r_addend = sym.getVA(addend) - section->getOutputSection()->addr; else if (config->relocatable && type != target.noneRel) - sec->relocations.push_back({R_ABS, type, rel.r_offset, addend, &sym}); + sec->addReloc({R_ABS, type, rel.r_offset, addend, &sym}); } else if (config->emachine == EM_PPC && type == R_PPC_PLTREL24 && p->r_addend >= 0x8000 && sec->file->ppc32Got2) { // Similar to R_MIPS_GPREL{16,32}. If the addend of R_PPC_PLTREL24 @@ -561,7 +561,7 @@ static Relocation *getRISCVPCRelHi20(const Symbol *sym, uint64_t addend) { Relocation r; r.offset = d->value; auto range = - std::equal_range(isec->relocations.begin(), isec->relocations.end(), r, + std::equal_range(isec->relocs().begin(), isec->relocs().end(), r, [](const Relocation &lhs, const Relocation &rhs) { return lhs.offset < rhs.offset; }); @@ -950,7 +950,7 @@ void InputSection::relocateNonAlloc(uint8_t *buf, ArrayRef rels) { static void relocateNonAllocForRelocatable(InputSection *sec, uint8_t *buf) { const unsigned bits = config->is64 ? 64 : 32; - for (const Relocation &rel : sec->relocations) { + for (const Relocation &rel : sec->relocs()) { // InputSection::copyRelocations() adds only R_ABS relocations. assert(rel.expr == R_ABS); uint8_t *bufLoc = buf + rel.offset; @@ -1037,7 +1037,7 @@ void InputSectionBase::adjustSplitStackFunctionPrologues(uint8_t *buf, DenseSet prologues; SmallVector morestackCalls; - for (Relocation &rel : relocations) { + for (Relocation &rel : relocs()) { // Ignore calls into the split-stack api. if (rel.sym->getName().startswith("__morestack")) { if (rel.sym->getName().equals("__morestack")) diff --git a/lld/ELF/InputSection.h b/lld/ELF/InputSection.h index ed3f98a..e562f6f 100644 --- a/lld/ELF/InputSection.h +++ b/lld/ELF/InputSection.h @@ -210,6 +210,10 @@ public: // This vector contains such "cooked" relocations. SmallVector relocations; + void addReloc(const Relocation &r) { relocations.push_back(r); } + MutableArrayRef relocs() { return relocations; } + ArrayRef relocs() const { return relocations; } + union { // These are modifiers to jump instructions that are necessary when basic // block sections are enabled. Basic block sections creates opportunities diff --git a/lld/ELF/Relocations.cpp b/lld/ELF/Relocations.cpp index 5278409..015460a 100644 --- a/lld/ELF/Relocations.cpp +++ b/lld/ELF/Relocations.cpp @@ -856,7 +856,7 @@ static void addRelativeReloc(InputSectionBase &isec, uint64_t offsetInSec, // don't store the addend values, so we must write it to the relocated // address. if (part.relrDyn && isec.alignment >= 2 && offsetInSec % 2 == 0) { - isec.relocations.push_back({expr, type, offsetInSec, addend, &sym}); + isec.addReloc({expr, type, offsetInSec, addend, &sym}); if (shard) part.relrDyn->relocsVec[parallel::getThreadIndex()].push_back( {&isec, offsetInSec}); @@ -893,7 +893,7 @@ static void addGotEntry(Symbol &sym) { // Otherwise, the value is either a link-time constant or the load base // plus a constant. if (!config->isPic || isAbsolute(sym)) - in.got->relocations.push_back({R_ABS, target->symbolicRel, off, 0, &sym}); + in.got->addConstant({R_ABS, target->symbolicRel, off, 0, &sym}); else addRelativeReloc(*in.got, off, sym, 0, R_ABS, target->symbolicRel); } @@ -902,7 +902,7 @@ static void addTpOffsetGotEntry(Symbol &sym) { in.got->addEntry(sym); uint64_t off = sym.getGotOffset(); if (!sym.isPreemptible && !config->isPic) { - in.got->relocations.push_back({R_TPREL, target->symbolicRel, off, 0, &sym}); + in.got->addConstant({R_TPREL, target->symbolicRel, off, 0, &sym}); return; } mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible( @@ -1075,8 +1075,8 @@ void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset, // handling of GOT-generating relocations. if (isStaticLinkTimeConstant(expr, type, sym, offset) || (!config->isPic && sym.isUndefWeak())) { - sec->relocations.push_back({expr, type, offset, addend, &sym}); - return; + sec->addReloc({expr, type, offset, addend, &sym}); + return; } bool canWrite = (sec->flags & SHF_WRITE) || !config->zText; @@ -1132,7 +1132,7 @@ void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset, getLocation(*sec, sym, offset)); sym.setFlags(NEEDS_COPY); } - sec->relocations.push_back({expr, type, offset, addend, &sym}); + sec->addReloc({expr, type, offset, addend, &sym}); return; } @@ -1169,7 +1169,7 @@ void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset, "' cannot be preempted; recompile with -fPIE" + getLocation(*sec, sym, offset)); sym.setFlags(NEEDS_COPY | NEEDS_PLT); - sec->relocations.push_back({expr, type, offset, addend, &sym}); + sec->addReloc({expr, type, offset, addend, &sym}); return; } } @@ -1191,12 +1191,12 @@ static unsigned handleMipsTlsRelocation(RelType type, Symbol &sym, int64_t addend, RelExpr expr) { if (expr == R_MIPS_TLSLD) { in.mipsGot->addTlsIndex(*c.file); - c.relocations.push_back({expr, type, offset, addend, &sym}); + c.addReloc({expr, type, offset, addend, &sym}); return 1; } if (expr == R_MIPS_TLSGD) { in.mipsGot->addDynTlsEntry(*c.file, sym); - c.relocations.push_back({expr, type, offset, addend, &sym}); + c.addReloc({expr, type, offset, addend, &sym}); return 1; } return 0; @@ -1229,7 +1229,7 @@ static unsigned handleTlsRelocation(RelType type, Symbol &sym, config->shared) { if (expr != R_TLSDESC_CALL) { sym.setFlags(NEEDS_TLSDESC); - c.relocations.push_back({expr, type, offset, addend, &sym}); + c.addReloc({expr, type, offset, addend, &sym}); } return 1; } @@ -1259,15 +1259,14 @@ static unsigned handleTlsRelocation(RelType type, Symbol &sym, expr)) { // Local-Dynamic relocs can be relaxed to Local-Exec. if (toExecRelax) { - c.relocations.push_back( - {target->adjustTlsExpr(type, R_RELAX_TLS_LD_TO_LE), type, offset, - addend, &sym}); + c.addReloc({target->adjustTlsExpr(type, R_RELAX_TLS_LD_TO_LE), type, + offset, addend, &sym}); return target->getTlsGdRelaxSkip(type); } if (expr == R_TLSLD_HINT) return 1; ctx.needsTlsLd.store(true, std::memory_order_relaxed); - c.relocations.push_back({expr, type, offset, addend, &sym}); + c.addReloc({expr, type, offset, addend, &sym}); return 1; } @@ -1275,7 +1274,7 @@ static unsigned handleTlsRelocation(RelType type, Symbol &sym, if (expr == R_DTPREL) { if (toExecRelax) expr = target->adjustTlsExpr(type, R_RELAX_TLS_LD_TO_LE); - c.relocations.push_back({expr, type, offset, addend, &sym}); + c.addReloc({expr, type, offset, addend, &sym}); return 1; } @@ -1283,7 +1282,7 @@ static unsigned handleTlsRelocation(RelType type, Symbol &sym, // thread pointer is stored in the got. This cannot be relaxed to Local-Exec. if (expr == R_TLSLD_GOT_OFF) { sym.setFlags(NEEDS_GOT_DTPREL); - c.relocations.push_back({expr, type, offset, addend, &sym}); + c.addReloc({expr, type, offset, addend, &sym}); return 1; } @@ -1291,7 +1290,7 @@ static unsigned handleTlsRelocation(RelType type, Symbol &sym, R_TLSDESC_GOTPLT, R_TLSGD_GOT, R_TLSGD_GOTPLT, R_TLSGD_PC>(expr)) { if (!toExecRelax) { sym.setFlags(NEEDS_TLSGD); - c.relocations.push_back({expr, type, offset, addend, &sym}); + c.addReloc({expr, type, offset, addend, &sym}); return 1; } @@ -1299,13 +1298,11 @@ static unsigned handleTlsRelocation(RelType type, Symbol &sym, // depending on the symbol being locally defined or not. if (sym.isPreemptible) { sym.setFlags(NEEDS_TLSGD_TO_IE); - c.relocations.push_back( - {target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_IE), type, offset, - addend, &sym}); + c.addReloc({target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_IE), type, + offset, addend, &sym}); } else { - c.relocations.push_back( - {target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_LE), type, offset, - addend, &sym}); + c.addReloc({target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_LE), type, + offset, addend, &sym}); } return target->getTlsGdRelaxSkip(type); } @@ -1316,15 +1313,14 @@ static unsigned handleTlsRelocation(RelType type, Symbol &sym, // Initial-Exec relocs can be relaxed to Local-Exec if the symbol is locally // defined. if (toExecRelax && isLocalInExecutable) { - c.relocations.push_back( - {R_RELAX_TLS_IE_TO_LE, type, offset, addend, &sym}); + c.addReloc({R_RELAX_TLS_IE_TO_LE, type, offset, addend, &sym}); } else if (expr != R_TLSIE_HINT) { sym.setFlags(NEEDS_TLSIE); // R_GOT needs a relative relocation for PIC on i386 and Hexagon. if (expr == R_GOT && config->isPic && !target->usesOnlyLowPageBits(type)) addRelativeReloc(c, offset, sym, addend, expr, type); else - c.relocations.push_back({expr, type, offset, addend, &sym}); + c.addReloc({expr, type, offset, addend, &sym}); } return 1; } @@ -1487,7 +1483,7 @@ void RelocationScanner::scan(ArrayRef rels) { // R_RISCV_PCREL_HI20 and R_PPC64_ADDR64. if (config->emachine == EM_RISCV || (config->emachine == EM_PPC64 && sec->name == ".toc")) - llvm::stable_sort(sec->relocations, + llvm::stable_sort(sec->relocs(), [](const Relocation &lhs, const Relocation &rhs) { return lhs.offset < rhs.offset; }); @@ -1677,8 +1673,7 @@ void elf::postScanRelocations() { uint64_t off = in.got->getGlobalDynOffset(sym); if (isLocalInExecutable) // Write one to the GOT slot. - in.got->relocations.push_back( - {R_ADDEND, target->symbolicRel, off, 1, &sym}); + in.got->addConstant({R_ADDEND, target->symbolicRel, off, 1, &sym}); else mainPart->relaDyn->addSymbolReloc(target->tlsModuleIndexRel, *in.got, off, sym); @@ -1690,8 +1685,7 @@ void elf::postScanRelocations() { mainPart->relaDyn->addSymbolReloc(target->tlsOffsetRel, *in.got, offsetOff, sym); else - in.got->relocations.push_back( - {R_ABS, target->tlsOffsetRel, offsetOff, 0, &sym}); + in.got->addConstant({R_ABS, target->tlsOffsetRel, offsetOff, 0, &sym}); } if (flags & NEEDS_TLSGD_TO_IE) { in.got->addEntry(sym); @@ -1700,7 +1694,7 @@ void elf::postScanRelocations() { } if (flags & NEEDS_GOT_DTPREL) { in.got->addEntry(sym); - in.got->relocations.push_back( + in.got->addConstant( {R_ABS, target->tlsOffsetRel, sym.getGotOffset(), 0, &sym}); } @@ -1714,7 +1708,7 @@ void elf::postScanRelocations() { mainPart->relaDyn->addReloc( {target->tlsModuleIndexRel, in.got.get(), in.got->getTlsIndexOff()}); else - in.got->relocations.push_back( + in.got->addConstant( {R_ADDEND, target->symbolicRel, in.got->getTlsIndexOff(), 1, &dummy}); } @@ -2164,7 +2158,7 @@ bool ThunkCreator::createThunks(uint32_t pass, forEachInputSectionDescription( outputSections, [&](OutputSection *os, InputSectionDescription *isd) { for (InputSection *isec : isd->sections) - for (Relocation &rel : isec->relocations) { + for (Relocation &rel : isec->relocs()) { uint64_t src = isec->getVA(rel.offset); // If we are a relocation to an existing Thunk, check if it is @@ -2224,7 +2218,7 @@ bool elf::hexagonNeedsTLSSymbol(ArrayRef outputSections) { forEachInputSectionDescription( outputSections, [&](OutputSection *os, InputSectionDescription *isd) { for (InputSection *isec : isd->sections) - for (Relocation &rel : isec->relocations) + for (Relocation &rel : isec->relocs()) if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) { needTlsSymbol = true; return; @@ -2241,7 +2235,7 @@ void elf::hexagonTLSSymbolUpdate(ArrayRef outputSections) { forEachInputSectionDescription( outputSections, [&](OutputSection *os, InputSectionDescription *isd) { for (InputSection *isec : isd->sections) - for (Relocation &rel : isec->relocations) + for (Relocation &rel : isec->relocs()) if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) { if (needEntry) { sym->allocateAux(); diff --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp index eb5b230..05f6ade 100644 --- a/lld/ELF/SyntheticSections.cpp +++ b/lld/ELF/SyntheticSections.cpp @@ -618,6 +618,7 @@ GotSection::GotSection() numEntries = target->gotHeaderEntriesNum; } +void GotSection::addConstant(const Relocation &r) { relocations.push_back(r); } void GotSection::addEntry(Symbol &sym) { assert(sym.auxIdx == symAux.size() - 1); symAux.back().gotIdx = numEntries++; @@ -1589,14 +1590,14 @@ void RelocationBaseSection::addSymbolReloc(RelType dynType, } void RelocationBaseSection::addAddendOnlyRelocIfNonPreemptible( - RelType dynType, InputSectionBase &isec, uint64_t offsetInSec, Symbol &sym, + RelType dynType, GotSection &sec, uint64_t offsetInSec, Symbol &sym, RelType addendRelType) { // No need to write an addend to the section for preemptible symbols. if (sym.isPreemptible) - addReloc({dynType, &isec, offsetInSec, DynamicReloc::AgainstSymbol, sym, 0, + addReloc({dynType, &sec, offsetInSec, DynamicReloc::AgainstSymbol, sym, 0, R_ABS}); else - addReloc(DynamicReloc::AddendOnlyWithTargetVA, dynType, isec, offsetInSec, + addReloc(DynamicReloc::AddendOnlyWithTargetVA, dynType, sec, offsetInSec, sym, 0, R_ABS, addendRelType); } diff --git a/lld/ELF/SyntheticSections.h b/lld/ELF/SyntheticSections.h index e2f719a4..58e5b12 100644 --- a/lld/ELF/SyntheticSections.h +++ b/lld/ELF/SyntheticSections.h @@ -103,6 +103,7 @@ public: bool isNeeded() const override; void writeTo(uint8_t *buf) override; + void addConstant(const Relocation &r); void addEntry(Symbol &sym); bool addTlsDescEntry(Symbol &sym); bool addDynTlsEntry(Symbol &sym); @@ -515,8 +516,7 @@ public: } /// Add a dynamic relocation using the target address of \p sym as the addend /// if \p sym is non-preemptible. Otherwise add a relocation against \p sym. - void addAddendOnlyRelocIfNonPreemptible(RelType dynType, - InputSectionBase &isec, + void addAddendOnlyRelocIfNonPreemptible(RelType dynType, GotSection &sec, uint64_t offsetInSec, Symbol &sym, RelType addendRelType); template @@ -526,8 +526,7 @@ public: // Write the addends to the relocated address if required. We skip // it if the written value would be zero. if (config->writeAddends && (expr != R_ADDEND || addend != 0)) - sec.relocations.push_back( - {expr, addendRelType, offsetInSec, addend, &sym}); + sec.addReloc({expr, addendRelType, offsetInSec, addend, &sym}); addReloc({dynType, &sec, offsetInSec, kind, sym, addend, expr}); } bool isNeeded() const override { diff --git a/lld/ELF/Target.cpp b/lld/ELF/Target.cpp index e3b8284..3873c7a 100644 --- a/lld/ELF/Target.cpp +++ b/lld/ELF/Target.cpp @@ -157,7 +157,7 @@ void TargetInfo::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const { uint64_t secAddr = sec.getOutputSection()->addr; if (auto *s = dyn_cast(&sec)) secAddr += s->outSecOff; - for (const Relocation &rel : sec.relocations) { + for (const Relocation &rel : sec.relocs()) { uint8_t *loc = buf + rel.offset; const uint64_t val = SignExtend64( sec.getRelocTargetVA(sec.file, rel.type, rel.addend, -- 2.7.4