namespace llvm {
class MCAssembler;
class MCFragment;
-class MCSectionData;
+class MCSection;
class MCSymbol;
class MCSymbolData;
MCAssembler &Assembler;
/// List of sections in layout order.
- llvm::SmallVector<MCSectionData *, 16> SectionOrder;
+ llvm::SmallVector<MCSection *, 16> SectionOrder;
/// The last fragment which was laid out, or 0 if nothing has been laid
/// out. Fragments are always laid out in order, so all fragments with a
/// lower ordinal will be valid.
- mutable DenseMap<const MCSectionData *, MCFragment *> LastValidFragment;
+ mutable DenseMap<const MCSection *, MCFragment *> LastValidFragment;
/// \brief Make sure that the layout for the given fragment is valid, lazily
/// computing it if necessary.
/// \name Section Access (in layout order)
/// @{
- llvm::SmallVectorImpl<MCSectionData *> &getSectionOrder() {
- return SectionOrder;
- }
- const llvm::SmallVectorImpl<MCSectionData *> &getSectionOrder() const {
+ llvm::SmallVectorImpl<MCSection *> &getSectionOrder() { return SectionOrder; }
+ const llvm::SmallVectorImpl<MCSection *> &getSectionOrder() const {
return SectionOrder;
}
/// \brief Get the address space size of the given section, as it effects
/// layout. This may differ from the size reported by \see getSectionSize() by
/// not including section tail padding.
- uint64_t getSectionAddressSize(const MCSectionData *SD) const;
+ uint64_t getSectionAddressSize(const MCSection *Sec) const;
/// \brief Get the data size of the given section, as emitted to the object
/// file. This may include additional padding, or be 0 for virtual sections.
- uint64_t getSectionFileSize(const MCSectionData *SD) const;
+ uint64_t getSectionFileSize(const MCSection *Sec) const;
/// \brief Get the offset of the given symbol, as computed in the current
/// layout.
return const_cast<MCSection *>(this)->end();
}
+ MCSectionData::reverse_iterator rbegin();
+ MCSectionData::const_reverse_iterator rbegin() const {
+ return const_cast<MCSection *>(this)->rbegin();
+ }
+
MCSectionData::reverse_iterator rend();
MCSectionData::const_reverse_iterator rend() const {
return const_cast<MCSection *>(this)->rend();
const std::pair<uint64_t, uint64_t> &Offsets =
SectionOffsets.find(Section)->second;
uint64_t Size;
- if (Type == ELF::SHT_NOBITS) {
- const MCSectionData &SD = Asm.getSectionData(*Section);
- Size = Layout.getSectionAddressSize(&SD);
- } else {
+ if (Type == ELF::SHT_NOBITS)
+ Size = Layout.getSectionAddressSize(Section);
+ else
Size = Offsets.second - Offsets.first;
- }
writeSection(SectionIndexMap, GroupSymbolIndex, Offsets.first, Size,
*Section);
// Compute the section layout order. Virtual sections must go last.
for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it)
if (!it->isVirtualSection())
- SectionOrder.push_back(&it->getSectionData());
+ SectionOrder.push_back(&*it);
for (MCAssembler::iterator it = Asm.begin(), ie = Asm.end(); it != ie; ++it)
if (it->isVirtualSection())
- SectionOrder.push_back(&it->getSectionData());
+ SectionOrder.push_back(&*it);
}
bool MCAsmLayout::isFragmentValid(const MCFragment *F) const {
- const MCSectionData &SD = F->getParent()->getSectionData();
- const MCFragment *LastValid = LastValidFragment.lookup(&SD);
+ const MCSection *Sec = F->getParent();
+ const MCFragment *LastValid = LastValidFragment.lookup(Sec);
if (!LastValid)
return false;
- assert(LastValid->getParent() == F->getParent());
+ assert(LastValid->getParent() == Sec);
return F->getLayoutOrder() <= LastValid->getLayoutOrder();
}
// Otherwise, reset the last valid fragment to the previous fragment
// (if this is the first fragment, it will be NULL).
- const MCSectionData &SD = F->getParent()->getSectionData();
- LastValidFragment[&SD] = F->getPrevNode();
+ LastValidFragment[F->getParent()] = F->getPrevNode();
}
void MCAsmLayout::ensureValid(const MCFragment *F) const {
- MCSectionData &SD = F->getParent()->getSectionData();
-
- MCFragment *Cur = LastValidFragment[&SD];
+ MCSection *Sec = F->getParent();
+ MCFragment *Cur = LastValidFragment[Sec];
if (!Cur)
- Cur = &*SD.begin();
+ Cur = Sec->begin();
else
Cur = Cur->getNextNode();
return &ASym;
}
-uint64_t MCAsmLayout::getSectionAddressSize(const MCSectionData *SD) const {
+uint64_t MCAsmLayout::getSectionAddressSize(const MCSection *Sec) const {
// The size is the last fragment's end offset.
- const MCFragment &F = SD->getFragmentList().back();
+ const MCFragment &F = Sec->getFragmentList().back();
return getFragmentOffset(&F) + getAssembler().computeFragmentSize(*this, F);
}
-uint64_t MCAsmLayout::getSectionFileSize(const MCSectionData *SD) const {
+uint64_t MCAsmLayout::getSectionFileSize(const MCSection *Sec) const {
// Virtual sections have no file size.
- if (SD->getSection().isVirtualSection())
+ if (Sec->isVirtualSection())
return 0;
// Otherwise, the file size is the same as the address space size.
- return getSectionAddressSize(SD);
+ return getSectionAddressSize(Sec);
}
uint64_t llvm::computeBundlePadding(const MCAssembler &Assembler,
F->Offset = Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev);
else
F->Offset = 0;
- LastValidFragment[&F->getParent()->getSectionData()] = F;
+ LastValidFragment[F->getParent()] = F;
// If bundling is enabled and this fragment has instructions in it, it has to
// obey the bundling restrictions. With padding, we'll have:
void MCAssembler::writeSectionData(const MCSectionData *SD,
const MCAsmLayout &Layout) const {
// Ignore virtual sections.
- if (SD->getSection().isVirtualSection()) {
- assert(Layout.getSectionFileSize(SD) == 0 && "Invalid size for section!");
+ const MCSection &Sec = SD->getSection();
+ if (Sec.isVirtualSection()) {
+ assert(Layout.getSectionFileSize(&Sec) == 0 && "Invalid size for section!");
// Check that contents are only things legal inside a virtual section.
for (MCSectionData::const_iterator it = SD->begin(),
writeFragment(*this, Layout, *it);
assert(getWriter().getStream().tell() - Start ==
- Layout.getSectionAddressSize(SD));
+ Layout.getSectionAddressSize(&SD->getSection()));
}
std::pair<uint64_t, bool> MCAssembler::handleFixup(const MCAsmLayout &Layout,
// Assign layout order indices to sections and fragments.
for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) {
- MCSectionData *SD = Layout.getSectionOrder()[i];
- SD->getSection().setLayoutOrder(i);
+ MCSection *Sec = Layout.getSectionOrder()[i];
+ Sec->setLayoutOrder(i);
unsigned FragmentIndex = 0;
- for (MCSectionData::iterator iFrag = SD->begin(), iFragEnd = SD->end();
+ for (MCSectionData::iterator iFrag = Sec->begin(), iFragEnd = Sec->end();
iFrag != iFragEnd; ++iFrag)
iFrag->setLayoutOrder(FragmentIndex++);
}
MCSectionData::iterator MCSection::end() { return Data.end(); }
+MCSectionData::reverse_iterator MCSection::rbegin() { return Data.rbegin(); }
+
MCSectionData::FragmentListType &MCSection::getFragmentList() {
return Data.getFragmentList();
}
uint64_t MachObjectWriter::getPaddingSize(const MCSection *Sec,
const MCAsmLayout &Layout) const {
- uint64_t EndAddr = getSectionAddress(Sec) +
- Layout.getSectionAddressSize(&Sec->getSectionData());
+ uint64_t EndAddr = getSectionAddress(Sec) + Layout.getSectionAddressSize(Sec);
unsigned Next = Sec->getLayoutOrder() + 1;
if (Next >= Layout.getSectionOrder().size())
return 0;
- const MCSectionData &NextSD = *Layout.getSectionOrder()[Next];
- if (NextSD.getSection().isVirtualSection())
+ const MCSection &NextSec = *Layout.getSectionOrder()[Next];
+ if (NextSec.isVirtualSection())
return 0;
- return OffsetToAlignment(EndAddr, NextSD.getSection().getAlignment());
+ return OffsetToAlignment(EndAddr, NextSec.getAlignment());
}
void MachObjectWriter::WriteHeader(unsigned NumLoadCommands,
uint64_t RelocationsStart,
unsigned NumRelocations) {
const MCSectionData &SD = Sec.getSectionData();
- uint64_t SectionSize = Layout.getSectionAddressSize(&SD);
+ uint64_t SectionSize = Layout.getSectionAddressSize(&Sec);
const MCSectionMachO &Section = cast<MCSectionMachO>(Sec);
// The offset is unused for virtual sections.
if (Section.isVirtualSection()) {
- assert(Layout.getSectionFileSize(&SD) == 0 && "Invalid file size!");
+ assert(Layout.getSectionFileSize(&Sec) == 0 && "Invalid file size!");
FileOffset = 0;
}
void MachObjectWriter::computeSectionAddresses(const MCAssembler &Asm,
const MCAsmLayout &Layout) {
uint64_t StartAddress = 0;
- const SmallVectorImpl<MCSectionData*> &Order = Layout.getSectionOrder();
+ const SmallVectorImpl<MCSection *> &Order = Layout.getSectionOrder();
for (int i = 0, n = Order.size(); i != n ; ++i) {
- const MCSectionData *SD = Order[i];
- StartAddress =
- RoundUpToAlignment(StartAddress, SD->getSection().getAlignment());
- SectionAddress[&SD->getSection()] = StartAddress;
- StartAddress += Layout.getSectionAddressSize(SD);
+ const MCSection *Sec = Order[i];
+ StartAddress = RoundUpToAlignment(StartAddress, Sec->getAlignment());
+ SectionAddress[Sec] = StartAddress;
+ StartAddress += Layout.getSectionAddressSize(Sec);
// Explicitly pad the section to match the alignment requirements of the
// following one. This is for 'gas' compatibility, it shouldn't
/// strictly be necessary.
- StartAddress += getPaddingSize(&SD->getSection(), Layout);
+ StartAddress += getPaddingSize(Sec, Layout);
}
}
uint64_t VMSize = 0;
for (MCAssembler::const_iterator it = Asm.begin(),
ie = Asm.end(); it != ie; ++it) {
+ const MCSection &Sec = *it;
const MCSectionData &SD = it->getSectionData();
- uint64_t Address = getSectionAddress(&*it);
- uint64_t Size = Layout.getSectionAddressSize(&SD);
- uint64_t FileSize = Layout.getSectionFileSize(&SD);
- FileSize += getPaddingSize(&*it, Layout);
+ uint64_t Address = getSectionAddress(&Sec);
+ uint64_t Size = Layout.getSectionAddressSize(&Sec);
+ uint64_t FileSize = Layout.getSectionFileSize(&Sec);
+ FileSize += getPaddingSize(&Sec, Layout);
VMSize = std::max(VMSize, Address + Size);
if (Sec->Number == -1)
continue;
- Sec->Header.SizeOfRawData =
- Layout.getSectionAddressSize(&Section.getSectionData());
+ Sec->Header.SizeOfRawData = Layout.getSectionAddressSize(&Section);
if (IsPhysicalSection(Sec)) {
// Align the section data to a four byte boundary.