to prepare for changing `relocations` from a SmallVector to a pointer.
Also change the `isec` parameter in `addAddendOnlyRelocIfNonPreemptible` to `GotSection &`.
// and replace the relocation with a R_AARCH_JUMP26 branch relocation.
// Case 4: No relocation. We must create a new R_AARCH64_JUMP26 branch
// relocation at the offset.
- auto relIt = llvm::find_if(isec->relocations, [=](const Relocation &r) {
+ auto relIt = llvm::find_if(isec->relocs(), [=](const Relocation &r) {
return r.offset == patcheeOffset;
});
- if (relIt != isec->relocations.end() &&
+ if (relIt != isec->relocs().end() &&
(relIt->type == R_AARCH64_JUMP26 || relIt->expr == R_RELAX_TLS_IE_TO_LE))
return;
return Relocation{R_PC, R_AARCH64_JUMP26, offset, 0, patchSym};
};
- if (relIt != isec->relocations.end()) {
- ps->relocations.push_back(
- {relIt->expr, relIt->type, 0, relIt->addend, relIt->sym});
+ if (relIt != isec->relocs().end()) {
+ ps->addReloc({relIt->expr, relIt->type, 0, relIt->addend, relIt->sym});
*relIt = makeRelToPatch(patcheeOffset, ps->patchSym);
} else
- isec->relocations.push_back(makeRelToPatch(patcheeOffset, ps->patchSym));
+ isec->addReloc(makeRelToPatch(patcheeOffset, ps->patchSym));
}
// Scan all the instructions in InputSectionDescription, for each instance of
else
write32le(buf, 0x9000f000);
// If we have a relocation then apply it.
- if (!relocations.empty()) {
+ if (!relocs().empty()) {
target->relocateAlloc(*this, buf);
return;
}
// Find a relocation for the branch if it exists. This will be used
// to determine the target.
uint64_t branchOff = off + 4;
- auto relIt = llvm::find_if(isec->relocations, [=](const Relocation &r) {
+ auto relIt = llvm::find_if(isec->relocs(), [=](const Relocation &r) {
return r.offset == branchOff &&
(r.type == R_ARM_THM_JUMP19 || r.type == R_ARM_THM_JUMP24 ||
r.type == R_ARM_THM_CALL);
});
- if (relIt != isec->relocations.end())
+ if (relIt != isec->relocs().end())
scanRes.rel = &(*relIt);
if (branchDestInFirstRegion(isec, branchOff, instr2, scanRes.rel)) {
if (patchInRange(isec, branchOff, instr2)) {
patchRelType = R_ARM_JUMP24;
patchRelAddend -= 4;
}
- psec->relocations.push_back(
+ psec->addReloc(
Relocation{sr.rel->expr, patchRelType, 0, patchRelAddend, sr.rel->sym});
// Redirect the existing branch relocation to the patch.
sr.rel->expr = R_PC;
type = R_ARM_THM_JUMP24;
else
type = R_ARM_THM_CALL;
- isec->relocations.push_back(
- Relocation{R_PC, type, sr.off, -4, psec->patchSym});
+ isec->addReloc(Relocation{R_PC, type, sr.off, -4, psec->patchSym});
}
patches.push_back(psec);
}
uint64_t secAddr = sec.getOutputSection()->addr;
if (auto *s = dyn_cast<InputSection>(&sec))
secAddr += s->outSecOff;
- AArch64Relaxer relaxer(sec.relocations);
- for (size_t i = 0, size = sec.relocations.size(); i != size; ++i) {
- const Relocation &rel = sec.relocations[i];
+ AArch64Relaxer relaxer(sec.relocs());
+ for (size_t i = 0, size = sec.relocs().size(); i != size; ++i) {
+ const Relocation &rel = sec.relocs()[i];
uint8_t *loc = buf + rel.offset;
const uint64_t val =
sec.getRelocTargetVA(sec.file, rel.type, rel.addend,
switch (rel.expr) {
case R_AARCH64_GOT_PAGE_PC:
if (i + 1 < size &&
- relaxer.tryRelaxAdrpLdr(rel, sec.relocations[i + 1], secAddr, buf)) {
+ relaxer.tryRelaxAdrpLdr(rel, sec.relocs()[i + 1], secAddr, buf)) {
++i;
continue;
}
break;
case R_AARCH64_PAGE_PC:
if (i + 1 < size &&
- relaxer.tryRelaxAdrpAdd(rel, sec.relocations[i + 1], secAddr, buf)) {
+ relaxer.tryRelaxAdrpAdd(rel, sec.relocs()[i + 1], secAddr, buf)) {
++i;
continue;
}
uint64_t secAddr = sec.getOutputSection()->addr;
if (auto *s = dyn_cast<InputSection>(&sec))
secAddr += s->outSecOff;
- for (const Relocation &rel : sec.relocations) {
+ for (const Relocation &rel : sec.relocs()) {
uint8_t *loc = buf + rel.offset;
const uint64_t val = SignExtend64(
sec.getRelocTargetVA(sec.file, rel.type, rel.addend,
if (auto *s = dyn_cast<InputSection>(&sec))
secAddr += s->outSecOff;
uint64_t lastPPCRelaxedRelocOff = -1;
- for (const Relocation &rel : sec.relocations) {
+ for (const Relocation &rel : sec.relocs()) {
uint8_t *loc = buf + rel.offset;
const uint64_t val =
sec.getRelocTargetVA(sec.file, rel.type, rel.addend,
continue;
for (InputSection *sec : getInputSections(*osec, storage)) {
sec->relaxAux = make<RISCVRelaxAux>();
- if (sec->relocations.size()) {
+ if (sec->relocs().size()) {
sec->relaxAux->relocDeltas =
- std::make_unique<uint32_t[]>(sec->relocations.size());
+ std::make_unique<uint32_t[]>(sec->relocs().size());
sec->relaxAux->relocTypes =
- std::make_unique<RelType[]>(sec->relocations.size());
+ std::make_unique<RelType[]>(sec->relocs().size());
}
}
}
DenseMap<const Defined *, uint64_t> valueDelta;
ArrayRef<SymbolAnchor> sa = makeArrayRef(aux.anchors);
uint32_t delta = 0;
- for (auto [i, r] : llvm::enumerate(sec.relocations)) {
+ for (auto [i, r] : llvm::enumerate(sec.relocs())) {
for (; sa.size() && sa[0].offset <= r.offset; sa = sa.slice(1))
if (!sa[0].end)
valueDelta[sa[0].d] = delta;
sa = makeArrayRef(aux.anchors);
delta = 0;
- std::fill_n(aux.relocTypes.get(), sec.relocations.size(), R_RISCV_NONE);
+ std::fill_n(aux.relocTypes.get(), sec.relocs().size(), R_RISCV_NONE);
aux.writes.clear();
- for (auto [i, r] : llvm::enumerate(sec.relocations)) {
+ for (auto [i, r] : llvm::enumerate(sec.relocs())) {
const uint64_t loc = secAddr + r.offset - delta;
uint32_t &cur = aux.relocDeltas[i], remove = 0;
switch (r.type) {
}
case R_RISCV_CALL:
case R_RISCV_CALL_PLT:
- if (i + 1 != sec.relocations.size() &&
- sec.relocations[i + 1].type == R_RISCV_RELAX)
+ if (i + 1 != sec.relocs().size() &&
+ sec.relocs()[i + 1].type == R_RISCV_RELAX)
relaxCall(sec, i, loc, r, remove);
break;
case R_RISCV_TPREL_HI20:
case R_RISCV_TPREL_ADD:
case R_RISCV_TPREL_LO12_I:
case R_RISCV_TPREL_LO12_S:
- if (i + 1 != sec.relocations.size() &&
- sec.relocations[i + 1].type == R_RISCV_RELAX)
+ if (i + 1 != sec.relocs().size() &&
+ sec.relocs()[i + 1].type == R_RISCV_RELAX)
relaxTlsLe(sec, i, loc, r, remove);
break;
}
if (!aux.relocDeltas)
continue;
- auto &rels = sec->relocations;
+ MutableArrayRef<Relocation> rels = sec->relocs();
ArrayRef<uint8_t> old = sec->content();
- size_t newSize =
- old.size() - aux.relocDeltas[sec->relocations.size() - 1];
+ size_t newSize = old.size() - aux.relocDeltas[rels.size() - 1];
size_t writesIdx = 0;
uint8_t *p = context().bAlloc.Allocate<uint8_t>(newSize);
uint64_t offset = 0;
uint64_t secAddr = sec.getOutputSection()->addr;
if (auto *s = dyn_cast<InputSection>(&sec))
secAddr += s->outSecOff;
- for (const Relocation &rel : sec.relocations) {
+ for (const Relocation &rel : sec.relocs()) {
uint8_t *loc = buf + rel.offset;
const uint64_t val = SignExtend64(
sec.getRelocTargetVA(sec.file, rel.type, rel.addend,
// Returns the maximum size of the vector if no such relocation is found.
static unsigned getRelocationWithOffset(const InputSection &is,
uint64_t offset) {
- unsigned size = is.relocations.size();
+ unsigned size = is.relocs().size();
for (unsigned i = size - 1; i + 1 > 0; --i) {
- if (is.relocations[i].offset == offset && is.relocations[i].expr != R_NONE)
+ if (is.relocs()[i].offset == offset && is.relocs()[i].expr != R_NONE)
return i;
}
return size;
// If this jmp insn can be removed, it is the last insn and the
// relocation is 4 bytes before the end.
unsigned rIndex = getRelocationWithOffset(is, is.getSize() - 4);
- if (rIndex == is.relocations.size())
+ if (rIndex == is.relocs().size())
return false;
- Relocation &r = is.relocations[rIndex];
+ Relocation &r = is.relocs()[rIndex];
// Check if the relocation corresponds to a direct jmp.
const uint8_t *secContents = is.content().data();
unsigned rbIndex =
getRelocationWithOffset(is, (is.getSize() - sizeOfDirectJmpInsn - 4));
- if (rbIndex == is.relocations.size())
+ if (rbIndex == is.relocs().size())
return false;
- Relocation &rB = is.relocations[rbIndex];
+ Relocation &rB = is.relocs()[rbIndex];
const uint8_t *jmpInsnB = secContents + rB.offset - 1;
JmpInsnOpcode jmpOpcodeB = getJmpInsnType(jmpInsnB - 1, jmpInsnB);
uint64_t secAddr = sec.getOutputSection()->addr;
if (auto *s = dyn_cast<InputSection>(&sec))
secAddr += s->outSecOff;
- for (const Relocation &rel : sec.relocations) {
+ for (const Relocation &rel : sec.relocs()) {
if (rel.expr == R_NONE) // See deleteFallThruJmpInsn
continue;
uint8_t *loc = buf + rel.offset;
if (RelTy::IsRela)
p->r_addend = sym.getVA(addend) - section->getOutputSection()->addr;
else if (config->relocatable && type != target.noneRel)
- sec->relocations.push_back({R_ABS, type, rel.r_offset, addend, &sym});
+ sec->addReloc({R_ABS, type, rel.r_offset, addend, &sym});
} else if (config->emachine == EM_PPC && type == R_PPC_PLTREL24 &&
p->r_addend >= 0x8000 && sec->file->ppc32Got2) {
// Similar to R_MIPS_GPREL{16,32}. If the addend of R_PPC_PLTREL24
Relocation r;
r.offset = d->value;
auto range =
- std::equal_range(isec->relocations.begin(), isec->relocations.end(), r,
+ std::equal_range(isec->relocs().begin(), isec->relocs().end(), r,
[](const Relocation &lhs, const Relocation &rhs) {
return lhs.offset < rhs.offset;
});
static void relocateNonAllocForRelocatable(InputSection *sec, uint8_t *buf) {
const unsigned bits = config->is64 ? 64 : 32;
- for (const Relocation &rel : sec->relocations) {
+ for (const Relocation &rel : sec->relocs()) {
// InputSection::copyRelocations() adds only R_ABS relocations.
assert(rel.expr == R_ABS);
uint8_t *bufLoc = buf + rel.offset;
DenseSet<Defined *> prologues;
SmallVector<Relocation *, 0> morestackCalls;
- for (Relocation &rel : relocations) {
+ for (Relocation &rel : relocs()) {
// Ignore calls into the split-stack api.
if (rel.sym->getName().startswith("__morestack")) {
if (rel.sym->getName().equals("__morestack"))
// This vector contains such "cooked" relocations.
SmallVector<Relocation, 0> relocations;
+ void addReloc(const Relocation &r) { relocations.push_back(r); }
+ MutableArrayRef<Relocation> relocs() { return relocations; }
+ ArrayRef<Relocation> relocs() const { return relocations; }
+
union {
// These are modifiers to jump instructions that are necessary when basic
// block sections are enabled. Basic block sections creates opportunities
// don't store the addend values, so we must write it to the relocated
// address.
if (part.relrDyn && isec.alignment >= 2 && offsetInSec % 2 == 0) {
- isec.relocations.push_back({expr, type, offsetInSec, addend, &sym});
+ isec.addReloc({expr, type, offsetInSec, addend, &sym});
if (shard)
part.relrDyn->relocsVec[parallel::getThreadIndex()].push_back(
{&isec, offsetInSec});
// Otherwise, the value is either a link-time constant or the load base
// plus a constant.
if (!config->isPic || isAbsolute(sym))
- in.got->relocations.push_back({R_ABS, target->symbolicRel, off, 0, &sym});
+ in.got->addConstant({R_ABS, target->symbolicRel, off, 0, &sym});
else
addRelativeReloc(*in.got, off, sym, 0, R_ABS, target->symbolicRel);
}
in.got->addEntry(sym);
uint64_t off = sym.getGotOffset();
if (!sym.isPreemptible && !config->isPic) {
- in.got->relocations.push_back({R_TPREL, target->symbolicRel, off, 0, &sym});
+ in.got->addConstant({R_TPREL, target->symbolicRel, off, 0, &sym});
return;
}
mainPart->relaDyn->addAddendOnlyRelocIfNonPreemptible(
// handling of GOT-generating relocations.
if (isStaticLinkTimeConstant(expr, type, sym, offset) ||
(!config->isPic && sym.isUndefWeak())) {
- sec->relocations.push_back({expr, type, offset, addend, &sym});
- return;
+ sec->addReloc({expr, type, offset, addend, &sym});
+ return;
}
bool canWrite = (sec->flags & SHF_WRITE) || !config->zText;
getLocation(*sec, sym, offset));
sym.setFlags(NEEDS_COPY);
}
- sec->relocations.push_back({expr, type, offset, addend, &sym});
+ sec->addReloc({expr, type, offset, addend, &sym});
return;
}
"' cannot be preempted; recompile with -fPIE" +
getLocation(*sec, sym, offset));
sym.setFlags(NEEDS_COPY | NEEDS_PLT);
- sec->relocations.push_back({expr, type, offset, addend, &sym});
+ sec->addReloc({expr, type, offset, addend, &sym});
return;
}
}
int64_t addend, RelExpr expr) {
if (expr == R_MIPS_TLSLD) {
in.mipsGot->addTlsIndex(*c.file);
- c.relocations.push_back({expr, type, offset, addend, &sym});
+ c.addReloc({expr, type, offset, addend, &sym});
return 1;
}
if (expr == R_MIPS_TLSGD) {
in.mipsGot->addDynTlsEntry(*c.file, sym);
- c.relocations.push_back({expr, type, offset, addend, &sym});
+ c.addReloc({expr, type, offset, addend, &sym});
return 1;
}
return 0;
config->shared) {
if (expr != R_TLSDESC_CALL) {
sym.setFlags(NEEDS_TLSDESC);
- c.relocations.push_back({expr, type, offset, addend, &sym});
+ c.addReloc({expr, type, offset, addend, &sym});
}
return 1;
}
expr)) {
// Local-Dynamic relocs can be relaxed to Local-Exec.
if (toExecRelax) {
- c.relocations.push_back(
- {target->adjustTlsExpr(type, R_RELAX_TLS_LD_TO_LE), type, offset,
- addend, &sym});
+ c.addReloc({target->adjustTlsExpr(type, R_RELAX_TLS_LD_TO_LE), type,
+ offset, addend, &sym});
return target->getTlsGdRelaxSkip(type);
}
if (expr == R_TLSLD_HINT)
return 1;
ctx.needsTlsLd.store(true, std::memory_order_relaxed);
- c.relocations.push_back({expr, type, offset, addend, &sym});
+ c.addReloc({expr, type, offset, addend, &sym});
return 1;
}
if (expr == R_DTPREL) {
if (toExecRelax)
expr = target->adjustTlsExpr(type, R_RELAX_TLS_LD_TO_LE);
- c.relocations.push_back({expr, type, offset, addend, &sym});
+ c.addReloc({expr, type, offset, addend, &sym});
return 1;
}
// thread pointer is stored in the got. This cannot be relaxed to Local-Exec.
if (expr == R_TLSLD_GOT_OFF) {
sym.setFlags(NEEDS_GOT_DTPREL);
- c.relocations.push_back({expr, type, offset, addend, &sym});
+ c.addReloc({expr, type, offset, addend, &sym});
return 1;
}
R_TLSDESC_GOTPLT, R_TLSGD_GOT, R_TLSGD_GOTPLT, R_TLSGD_PC>(expr)) {
if (!toExecRelax) {
sym.setFlags(NEEDS_TLSGD);
- c.relocations.push_back({expr, type, offset, addend, &sym});
+ c.addReloc({expr, type, offset, addend, &sym});
return 1;
}
// depending on the symbol being locally defined or not.
if (sym.isPreemptible) {
sym.setFlags(NEEDS_TLSGD_TO_IE);
- c.relocations.push_back(
- {target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_IE), type, offset,
- addend, &sym});
+ c.addReloc({target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_IE), type,
+ offset, addend, &sym});
} else {
- c.relocations.push_back(
- {target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_LE), type, offset,
- addend, &sym});
+ c.addReloc({target->adjustTlsExpr(type, R_RELAX_TLS_GD_TO_LE), type,
+ offset, addend, &sym});
}
return target->getTlsGdRelaxSkip(type);
}
// Initial-Exec relocs can be relaxed to Local-Exec if the symbol is locally
// defined.
if (toExecRelax && isLocalInExecutable) {
- c.relocations.push_back(
- {R_RELAX_TLS_IE_TO_LE, type, offset, addend, &sym});
+ c.addReloc({R_RELAX_TLS_IE_TO_LE, type, offset, addend, &sym});
} else if (expr != R_TLSIE_HINT) {
sym.setFlags(NEEDS_TLSIE);
// R_GOT needs a relative relocation for PIC on i386 and Hexagon.
if (expr == R_GOT && config->isPic && !target->usesOnlyLowPageBits(type))
addRelativeReloc<true>(c, offset, sym, addend, expr, type);
else
- c.relocations.push_back({expr, type, offset, addend, &sym});
+ c.addReloc({expr, type, offset, addend, &sym});
}
return 1;
}
// R_RISCV_PCREL_HI20 and R_PPC64_ADDR64.
if (config->emachine == EM_RISCV ||
(config->emachine == EM_PPC64 && sec->name == ".toc"))
- llvm::stable_sort(sec->relocations,
+ llvm::stable_sort(sec->relocs(),
[](const Relocation &lhs, const Relocation &rhs) {
return lhs.offset < rhs.offset;
});
uint64_t off = in.got->getGlobalDynOffset(sym);
if (isLocalInExecutable)
// Write one to the GOT slot.
- in.got->relocations.push_back(
- {R_ADDEND, target->symbolicRel, off, 1, &sym});
+ in.got->addConstant({R_ADDEND, target->symbolicRel, off, 1, &sym});
else
mainPart->relaDyn->addSymbolReloc(target->tlsModuleIndexRel, *in.got,
off, sym);
mainPart->relaDyn->addSymbolReloc(target->tlsOffsetRel, *in.got,
offsetOff, sym);
else
- in.got->relocations.push_back(
- {R_ABS, target->tlsOffsetRel, offsetOff, 0, &sym});
+ in.got->addConstant({R_ABS, target->tlsOffsetRel, offsetOff, 0, &sym});
}
if (flags & NEEDS_TLSGD_TO_IE) {
in.got->addEntry(sym);
}
if (flags & NEEDS_GOT_DTPREL) {
in.got->addEntry(sym);
- in.got->relocations.push_back(
+ in.got->addConstant(
{R_ABS, target->tlsOffsetRel, sym.getGotOffset(), 0, &sym});
}
mainPart->relaDyn->addReloc(
{target->tlsModuleIndexRel, in.got.get(), in.got->getTlsIndexOff()});
else
- in.got->relocations.push_back(
+ in.got->addConstant(
{R_ADDEND, target->symbolicRel, in.got->getTlsIndexOff(), 1, &dummy});
}
forEachInputSectionDescription(
outputSections, [&](OutputSection *os, InputSectionDescription *isd) {
for (InputSection *isec : isd->sections)
- for (Relocation &rel : isec->relocations) {
+ for (Relocation &rel : isec->relocs()) {
uint64_t src = isec->getVA(rel.offset);
// If we are a relocation to an existing Thunk, check if it is
forEachInputSectionDescription(
outputSections, [&](OutputSection *os, InputSectionDescription *isd) {
for (InputSection *isec : isd->sections)
- for (Relocation &rel : isec->relocations)
+ for (Relocation &rel : isec->relocs())
if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) {
needTlsSymbol = true;
return;
forEachInputSectionDescription(
outputSections, [&](OutputSection *os, InputSectionDescription *isd) {
for (InputSection *isec : isd->sections)
- for (Relocation &rel : isec->relocations)
+ for (Relocation &rel : isec->relocs())
if (rel.sym->type == llvm::ELF::STT_TLS && rel.expr == R_PLT_PC) {
if (needEntry) {
sym->allocateAux();
numEntries = target->gotHeaderEntriesNum;
}
+void GotSection::addConstant(const Relocation &r) { relocations.push_back(r); }
void GotSection::addEntry(Symbol &sym) {
assert(sym.auxIdx == symAux.size() - 1);
symAux.back().gotIdx = numEntries++;
}
void RelocationBaseSection::addAddendOnlyRelocIfNonPreemptible(
- RelType dynType, InputSectionBase &isec, uint64_t offsetInSec, Symbol &sym,
+ RelType dynType, GotSection &sec, uint64_t offsetInSec, Symbol &sym,
RelType addendRelType) {
// No need to write an addend to the section for preemptible symbols.
if (sym.isPreemptible)
- addReloc({dynType, &isec, offsetInSec, DynamicReloc::AgainstSymbol, sym, 0,
+ addReloc({dynType, &sec, offsetInSec, DynamicReloc::AgainstSymbol, sym, 0,
R_ABS});
else
- addReloc(DynamicReloc::AddendOnlyWithTargetVA, dynType, isec, offsetInSec,
+ addReloc(DynamicReloc::AddendOnlyWithTargetVA, dynType, sec, offsetInSec,
sym, 0, R_ABS, addendRelType);
}
bool isNeeded() const override;
void writeTo(uint8_t *buf) override;
+ void addConstant(const Relocation &r);
void addEntry(Symbol &sym);
bool addTlsDescEntry(Symbol &sym);
bool addDynTlsEntry(Symbol &sym);
}
/// Add a dynamic relocation using the target address of \p sym as the addend
/// if \p sym is non-preemptible. Otherwise add a relocation against \p sym.
- void addAddendOnlyRelocIfNonPreemptible(RelType dynType,
- InputSectionBase &isec,
+ void addAddendOnlyRelocIfNonPreemptible(RelType dynType, GotSection &sec,
uint64_t offsetInSec, Symbol &sym,
RelType addendRelType);
template <bool shard = false>
// Write the addends to the relocated address if required. We skip
// it if the written value would be zero.
if (config->writeAddends && (expr != R_ADDEND || addend != 0))
- sec.relocations.push_back(
- {expr, addendRelType, offsetInSec, addend, &sym});
+ sec.addReloc({expr, addendRelType, offsetInSec, addend, &sym});
addReloc<shard>({dynType, &sec, offsetInSec, kind, sym, addend, expr});
}
bool isNeeded() const override {
uint64_t secAddr = sec.getOutputSection()->addr;
if (auto *s = dyn_cast<InputSection>(&sec))
secAddr += s->outSecOff;
- for (const Relocation &rel : sec.relocations) {
+ for (const Relocation &rel : sec.relocs()) {
uint8_t *loc = buf + rel.offset;
const uint64_t val = SignExtend64(
sec.getRelocTargetVA(sec.file, rel.type, rel.addend,