1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 // TODO(simonb): Extend for 64-bit target libraries.
10 #include <sys/types.h>
19 namespace relocation_packer {
21 // Stub identifier written to 'null out' packed data, "NULL".
22 static const Elf32_Word kStubIdentifier = 0x4c4c554eu;
24 // Out-of-band dynamic tags used to indicate the offset and size of the
25 // .android.rel.dyn section.
26 static const Elf32_Sword DT_ANDROID_ARM_REL_OFFSET = DT_LOPROC;
27 static const Elf32_Sword DT_ANDROID_ARM_REL_SIZE = DT_LOPROC + 1;
29 // Alignment to preserve, in bytes. This must be at least as large as the
30 // largest d_align and sh_addralign values found in the loaded file.
31 static const size_t kPreserveAlignment = 256;
35 // Get section data. Checks that the section has exactly one data entry,
36 // so that the section size and the data size are the same. True in
37 // practice for all sections we resize when packing or unpacking. Done
38 // by ensuring that a call to elf_getdata(section, data) returns NULL as
39 // the next data entry.
40 Elf_Data* GetSectionData(Elf_Scn* section) {
41 Elf_Data* data = elf_getdata(section, NULL);
42 CHECK(data && elf_getdata(section, data) == NULL);
46 // Rewrite section data. Allocates new data and makes it the data element's
47 // buffer. Relies on program exit to free allocated data.
48 void RewriteSectionData(Elf_Data* data,
49 const void* section_data,
51 CHECK(size == data->d_size);
52 uint8_t* area = new uint8_t[size];
53 memcpy(area, section_data, size);
57 // Verbose ELF header logging.
58 void VerboseLogElfHeader(const Elf32_Ehdr* elf_header) {
59 VLOG("e_phoff = %u\n", elf_header->e_phoff);
60 VLOG("e_shoff = %u\n", elf_header->e_shoff);
61 VLOG("e_ehsize = %u\n", elf_header->e_ehsize);
62 VLOG("e_phentsize = %u\n", elf_header->e_phentsize);
63 VLOG("e_phnum = %u\n", elf_header->e_phnum);
64 VLOG("e_shnum = %u\n", elf_header->e_shnum);
65 VLOG("e_shstrndx = %u\n", elf_header->e_shstrndx);
68 // Verbose ELF program header logging.
69 void VerboseLogProgramHeader(size_t program_header_index,
70 const Elf32_Phdr* program_header) {
72 switch (program_header->p_type) {
73 case PT_NULL: type = "NULL"; break;
74 case PT_LOAD: type = "LOAD"; break;
75 case PT_DYNAMIC: type = "DYNAMIC"; break;
76 case PT_INTERP: type = "INTERP"; break;
77 case PT_NOTE: type = "NOTE"; break;
78 case PT_SHLIB: type = "SHLIB"; break;
79 case PT_PHDR: type = "PHDR"; break;
80 case PT_TLS: type = "TLS"; break;
81 default: type = "(OTHER)"; break;
83 VLOG("phdr %lu : %s\n", program_header_index, type.c_str());
84 VLOG(" p_offset = %u\n", program_header->p_offset);
85 VLOG(" p_vaddr = %u\n", program_header->p_vaddr);
86 VLOG(" p_paddr = %u\n", program_header->p_paddr);
87 VLOG(" p_filesz = %u\n", program_header->p_filesz);
88 VLOG(" p_memsz = %u\n", program_header->p_memsz);
91 // Verbose ELF section header logging.
92 void VerboseLogSectionHeader(const std::string& section_name,
93 const Elf32_Shdr* section_header) {
94 VLOG("section %s\n", section_name.c_str());
95 VLOG(" sh_addr = %u\n", section_header->sh_addr);
96 VLOG(" sh_offset = %u\n", section_header->sh_offset);
97 VLOG(" sh_size = %u\n", section_header->sh_size);
98 VLOG(" sh_addralign = %u\n", section_header->sh_addralign);
101 // Verbose ELF section data logging.
102 void VerboseLogSectionData(const Elf_Data* data) {
104 VLOG(" d_buf = %p\n", data->d_buf);
105 VLOG(" d_off = %lu\n", data->d_off);
106 VLOG(" d_size = %lu\n", data->d_size);
107 VLOG(" d_align = %lu\n", data->d_align);
112 // Load the complete ELF file into a memory image in libelf, and identify
113 // the .rel.dyn, .dynamic, and .android.rel.dyn sections. No-op if the
114 // ELF file has already been loaded.
115 bool ElfFile::Load() {
119 elf_ = elf_begin(fd_, ELF_C_RDWR, NULL);
122 if (elf_kind(elf_) != ELF_K_ELF) {
123 LOG("ERROR: File not in ELF format\n");
127 Elf32_Ehdr* elf_header = elf32_getehdr(elf_);
129 LOG("ERROR: Failed to load ELF header\n");
132 if (elf_header->e_machine != EM_ARM) {
133 LOG("ERROR: File is not an arm32 ELF file\n");
137 // Require that our endianness matches that of the target, and that both
138 // are little-endian. Safe for all current build/target combinations.
139 const int endian = static_cast<int>(elf_header->e_ident[5]);
140 CHECK(endian == ELFDATA2LSB);
141 CHECK(__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__);
143 VLOG("endian = %u\n", endian);
144 VerboseLogElfHeader(elf_header);
146 const Elf32_Phdr* elf_program_header = elf32_getphdr(elf_);
147 CHECK(elf_program_header);
149 const Elf32_Phdr* dynamic_program_header = NULL;
150 for (size_t i = 0; i < elf_header->e_phnum; ++i) {
151 const Elf32_Phdr* program_header = &elf_program_header[i];
152 VerboseLogProgramHeader(i, program_header);
154 if (program_header->p_type == PT_DYNAMIC) {
155 CHECK(dynamic_program_header == NULL);
156 dynamic_program_header = program_header;
159 CHECK(dynamic_program_header != NULL);
162 elf_getshdrstrndx(elf_, &string_index);
164 // Notes of the .rel.dyn, .android.rel.dyn, and .dynamic sections. Found
165 // while iterating sections, and later stored in class attributes.
166 Elf_Scn* found_rel_dyn_section = NULL;
167 Elf_Scn* found_android_rel_dyn_section = NULL;
168 Elf_Scn* found_dynamic_section = NULL;
170 // Flag set if we encounter any .debug* section. We do not adjust any
171 // offsets or addresses of any debug data, so if we find one of these then
172 // the resulting output shared object should still run, but might not be
173 // usable for debugging, disassembly, and so on. Provides a warning if
175 bool has_debug_section = false;
177 Elf_Scn* section = NULL;
178 while ((section = elf_nextscn(elf_, section)) != NULL) {
179 const Elf32_Shdr* section_header = elf32_getshdr(section);
180 std::string name = elf_strptr(elf_, string_index, section_header->sh_name);
181 VerboseLogSectionHeader(name, section_header);
183 // Note special sections as we encounter them.
184 if (name == ".rel.dyn") {
185 found_rel_dyn_section = section;
187 if (name == ".android.rel.dyn") {
188 found_android_rel_dyn_section = section;
190 if (section_header->sh_offset == dynamic_program_header->p_offset) {
191 found_dynamic_section = section;
194 // If we find a section named .debug*, set the debug warning flag.
195 if (std::string(name).find(".debug") == 0) {
196 has_debug_section = true;
199 // Ensure we preserve alignment, repeated later for the data block(s).
200 CHECK(section_header->sh_addralign <= kPreserveAlignment);
202 Elf_Data* data = NULL;
203 while ((data = elf_getdata(section, data)) != NULL) {
204 CHECK(data->d_align <= kPreserveAlignment);
205 VerboseLogSectionData(data);
209 // Loading failed if we did not find the required special sections.
210 if (!found_rel_dyn_section) {
211 LOG("ERROR: Missing .rel.dyn section\n");
214 if (!found_dynamic_section) {
215 LOG("ERROR: Missing .dynamic section\n");
218 if (!found_android_rel_dyn_section) {
219 LOG("ERROR: Missing .android.rel.dyn section "
220 "(to fix, run with --help and follow the pre-packing instructions)\n");
224 if (has_debug_section) {
225 LOG("WARNING: found .debug section(s), and ignored them\n");
228 rel_dyn_section_ = found_rel_dyn_section;
229 dynamic_section_ = found_dynamic_section;
230 android_rel_dyn_section_ = found_android_rel_dyn_section;
236 // Helper for ResizeSection(). Adjust the main ELF header for the hole.
237 void AdjustElfHeaderForHole(Elf32_Ehdr* elf_header,
238 Elf32_Off hole_start,
240 if (elf_header->e_phoff > hole_start) {
241 elf_header->e_phoff += hole_size;
242 VLOG("e_phoff adjusted to %u\n", elf_header->e_phoff);
244 if (elf_header->e_shoff > hole_start) {
245 elf_header->e_shoff += hole_size;
246 VLOG("e_shoff adjusted to %u\n", elf_header->e_shoff);
250 // Helper for ResizeSection(). Adjust all program headers for the hole.
251 void AdjustProgramHeadersForHole(Elf32_Phdr* elf_program_header,
252 size_t program_header_count,
253 Elf32_Off hole_start,
255 for (size_t i = 0; i < program_header_count; ++i) {
256 Elf32_Phdr* program_header = &elf_program_header[i];
258 if (program_header->p_offset > hole_start) {
259 // The hole start is past this segment, so adjust offsets and addrs.
260 program_header->p_offset += hole_size;
261 VLOG("phdr %lu p_offset adjusted to %u\n", i, program_header->p_offset);
263 // Only adjust vaddr and paddr if this program header has them.
264 if (program_header->p_vaddr != 0) {
265 program_header->p_vaddr += hole_size;
266 VLOG("phdr %lu p_vaddr adjusted to %u\n", i, program_header->p_vaddr);
268 if (program_header->p_paddr != 0) {
269 program_header->p_paddr += hole_size;
270 VLOG("phdr %lu p_paddr adjusted to %u\n", i, program_header->p_paddr);
272 } else if (program_header->p_offset +
273 program_header->p_filesz > hole_start) {
274 // The hole start is within this segment, so adjust file and in-memory
275 // sizes, but leave offsets and addrs unchanged.
276 program_header->p_filesz += hole_size;
277 VLOG("phdr %lu p_filesz adjusted to %u\n", i, program_header->p_filesz);
278 program_header->p_memsz += hole_size;
279 VLOG("phdr %lu p_memsz adjusted to %u\n", i, program_header->p_memsz);
284 // Helper for ResizeSection(). Adjust all section headers for the hole.
285 void AdjustSectionHeadersForHole(Elf* elf,
286 Elf32_Off hole_start,
289 elf_getshdrstrndx(elf, &string_index);
291 Elf_Scn* section = NULL;
292 while ((section = elf_nextscn(elf, section)) != NULL) {
293 Elf32_Shdr* section_header = elf32_getshdr(section);
294 std::string name = elf_strptr(elf, string_index, section_header->sh_name);
296 if (section_header->sh_offset > hole_start) {
297 section_header->sh_offset += hole_size;
298 VLOG("section %s sh_offset"
299 " adjusted to %u\n", name.c_str(), section_header->sh_offset);
300 // Only adjust section addr if this section has one.
301 if (section_header->sh_addr != 0) {
302 section_header->sh_addr += hole_size;
303 VLOG("section %s sh_addr"
304 " adjusted to %u\n", name.c_str(), section_header->sh_addr);
310 // Helper for ResizeSection(). Adjust the .dynamic section for the hole.
311 void AdjustDynamicSectionForHole(Elf_Scn* dynamic_section,
312 bool is_rel_dyn_resize,
313 Elf32_Off hole_start,
315 Elf_Data* data = GetSectionData(dynamic_section);
317 const Elf32_Dyn* dynamic_base = reinterpret_cast<Elf32_Dyn*>(data->d_buf);
318 std::vector<Elf32_Dyn> dynamics(
320 dynamic_base + data->d_size / sizeof(dynamics[0]));
322 for (size_t i = 0; i < dynamics.size(); ++i) {
323 Elf32_Dyn* dynamic = &dynamics[i];
324 const Elf32_Sword tag = dynamic->d_tag;
325 // Any tags that hold offsets are adjustment candidates.
326 const bool is_adjustable = (tag == DT_PLTGOT ||
335 tag == DT_INIT_ARRAY ||
336 tag == DT_FINI_ARRAY ||
337 tag == DT_ANDROID_ARM_REL_OFFSET);
338 if (is_adjustable && dynamic->d_un.d_ptr > hole_start) {
339 dynamic->d_un.d_ptr += hole_size;
340 VLOG("dynamic[%lu] %u"
341 " d_ptr adjusted to %u\n", i, dynamic->d_tag, dynamic->d_un.d_ptr);
344 // If we are specifically resizing .rel.dyn, we need to make some added
345 // adjustments to tags that indicate the counts of R_ARM_RELATIVE
346 // relocations in the shared object.
347 if (is_rel_dyn_resize) {
348 // DT_RELSZ is the overall size of relocations. Adjust by hole size.
349 if (tag == DT_RELSZ) {
350 dynamic->d_un.d_val += hole_size;
351 VLOG("dynamic[%lu] %u"
352 " d_val adjusted to %u\n", i, dynamic->d_tag, dynamic->d_un.d_val);
355 // The crazy linker does not use DT_RELCOUNT, but we keep it updated
356 // anyway. In practice the section hole is always equal to the size
357 // of R_ARM_RELATIVE relocations, and DT_RELCOUNT is the count of
358 // relative relocations. So closing a hole on packing reduces
359 // DT_RELCOUNT to zero, and opening a hole on unpacking restores it to
360 // its pre-packed value.
361 if (tag == DT_RELCOUNT) {
362 dynamic->d_un.d_val += hole_size / sizeof(Elf32_Rel);
363 VLOG("dynamic[%lu] %u"
364 " d_val adjusted to %u\n", i, dynamic->d_tag, dynamic->d_un.d_val);
367 // DT_RELENT doesn't change, but make sure it is what we expect.
368 if (tag == DT_RELENT) {
369 CHECK(dynamic->d_un.d_val == sizeof(Elf32_Rel));
374 void* section_data = &dynamics[0];
375 size_t bytes = dynamics.size() * sizeof(dynamics[0]);
376 RewriteSectionData(data, section_data, bytes);
379 // Helper for ResizeSection(). Adjust the .dynsym section for the hole.
380 // We need to adjust the values for the symbols represented in it.
381 void AdjustDynSymSectionForHole(Elf_Scn* dynsym_section,
382 Elf32_Off hole_start,
384 Elf_Data* data = GetSectionData(dynsym_section);
386 const Elf32_Sym* dynsym_base = reinterpret_cast<Elf32_Sym*>(data->d_buf);
387 std::vector<Elf32_Sym> dynsyms
389 dynsym_base + data->d_size / sizeof(dynsyms[0]));
391 for (size_t i = 0; i < dynsyms.size(); ++i) {
392 Elf32_Sym* dynsym = &dynsyms[i];
393 const int type = static_cast<int>(ELF32_ST_TYPE(dynsym->st_info));
394 const bool is_adjustable = (type == STT_OBJECT ||
396 type == STT_SECTION ||
398 type == STT_COMMON ||
400 if (is_adjustable && dynsym->st_value > hole_start) {
401 dynsym->st_value += hole_size;
402 VLOG("dynsym[%lu] type=%u"
403 " st_value adjusted to %u\n", i, type, dynsym->st_value);
407 void* section_data = &dynsyms[0];
408 size_t bytes = dynsyms.size() * sizeof(dynsyms[0]);
409 RewriteSectionData(data, section_data, bytes);
412 // Helper for ResizeSection(). Adjust the .rel.plt section for the hole.
413 // We need to adjust the offset of every relocation inside it that falls
414 // beyond the hole start.
415 void AdjustRelPltSectionForHole(Elf_Scn* relplt_section,
416 Elf32_Off hole_start,
418 Elf_Data* data = GetSectionData(relplt_section);
420 const Elf32_Rel* relplt_base = reinterpret_cast<Elf32_Rel*>(data->d_buf);
421 std::vector<Elf32_Rel> relplts(
423 relplt_base + data->d_size / sizeof(relplts[0]));
425 for (size_t i = 0; i < relplts.size(); ++i) {
426 Elf32_Rel* relplt = &relplts[i];
427 if (relplt->r_offset > hole_start) {
428 relplt->r_offset += hole_size;
429 VLOG("relplt[%lu] r_offset adjusted to %u\n", i, relplt->r_offset);
433 void* section_data = &relplts[0];
434 size_t bytes = relplts.size() * sizeof(relplts[0]);
435 RewriteSectionData(data, section_data, bytes);
438 // Helper for ResizeSection(). Adjust the .symtab section for the hole.
439 // We want to adjust the value of every symbol in it that falls beyond
441 void AdjustSymTabSectionForHole(Elf_Scn* symtab_section,
442 Elf32_Off hole_start,
444 Elf_Data* data = GetSectionData(symtab_section);
446 const Elf32_Sym* symtab_base = reinterpret_cast<Elf32_Sym*>(data->d_buf);
447 std::vector<Elf32_Sym> symtab(
449 symtab_base + data->d_size / sizeof(symtab[0]));
451 for (size_t i = 0; i < symtab.size(); ++i) {
452 Elf32_Sym* sym = &symtab[i];
453 if (sym->st_value > hole_start) {
454 sym->st_value += hole_size;
455 VLOG("symtab[%lu] value adjusted to %u\n", i, sym->st_value);
459 void* section_data = &symtab[0];
460 size_t bytes = symtab.size() * sizeof(symtab[0]);
461 RewriteSectionData(data, section_data, bytes);
464 // Resize a section. If the new size is larger than the current size, open
465 // up a hole by increasing file offsets that come after the hole. If smaller
466 // than the current size, remove the hole by decreasing those offsets.
467 void ResizeSection(Elf* elf, Elf_Scn* section, size_t new_size) {
468 Elf32_Shdr* section_header = elf32_getshdr(section);
469 if (section_header->sh_size == new_size)
472 // Note if we are resizing the real .rel.dyn. If yes, then we have to
473 // massage d_un.d_val in the dynamic section where d_tag is DT_RELSZ and
476 elf_getshdrstrndx(elf, &string_index);
477 const std::string section_name =
478 elf_strptr(elf, string_index, section_header->sh_name);
479 const bool is_rel_dyn_resize = section_name == ".rel.dyn";
481 // Require that the section size and the data size are the same. True
482 // in practice for all sections we resize when packing or unpacking.
483 Elf_Data* data = GetSectionData(section);
484 CHECK(data->d_off == 0 && data->d_size == section_header->sh_size);
486 // Require that the section is not zero-length (that is, has allocated
487 // data that we can validly expand).
488 CHECK(data->d_size && data->d_buf);
490 const Elf32_Off hole_start = section_header->sh_offset;
491 const int32_t hole_size = new_size - data->d_size;
493 VLOG_IF(hole_size > 0, "expand section size = %lu\n", data->d_size);
494 VLOG_IF(hole_size < 0, "shrink section size = %lu\n", data->d_size);
496 // Resize the data and the section header.
497 data->d_size += hole_size;
498 section_header->sh_size += hole_size;
500 Elf32_Ehdr* elf_header = elf32_getehdr(elf);
501 Elf32_Phdr* elf_program_header = elf32_getphdr(elf);
503 // Add the hole size to all offsets in the ELF file that are after the
504 // start of the hole. If the hole size is positive we are expanding the
505 // section to create a new hole; if negative, we are closing up a hole.
507 // Start with the main ELF header.
508 AdjustElfHeaderForHole(elf_header, hole_start, hole_size);
510 // Adjust all program headers.
511 AdjustProgramHeadersForHole(elf_program_header,
516 // Adjust all section headers.
517 AdjustSectionHeadersForHole(elf, hole_start, hole_size);
519 // We use the dynamic program header entry to locate the dynamic section.
520 const Elf32_Phdr* dynamic_program_header = NULL;
522 // Find the dynamic program header entry.
523 for (size_t i = 0; i < elf_header->e_phnum; ++i) {
524 Elf32_Phdr* program_header = &elf_program_header[i];
526 if (program_header->p_type == PT_DYNAMIC) {
527 dynamic_program_header = program_header;
530 CHECK(dynamic_program_header);
532 // Sections requiring special attention, and the .android.rel.dyn offset.
533 Elf_Scn* dynamic_section = NULL;
534 Elf_Scn* dynsym_section = NULL;
535 Elf_Scn* relplt_section = NULL;
536 Elf_Scn* symtab_section = NULL;
537 Elf32_Off android_rel_dyn_offset = 0;
539 // Find these sections, and the .android.rel.dyn offset.
541 while ((section = elf_nextscn(elf, section)) != NULL) {
542 Elf32_Shdr* section_header = elf32_getshdr(section);
543 std::string name = elf_strptr(elf, string_index, section_header->sh_name);
545 if (section_header->sh_offset == dynamic_program_header->p_offset) {
546 dynamic_section = section;
548 if (name == ".dynsym") {
549 dynsym_section = section;
551 if (name == ".rel.plt") {
552 relplt_section = section;
554 if (name == ".symtab") {
555 symtab_section = section;
558 // Note .android.rel.dyn offset.
559 if (name == ".android.rel.dyn") {
560 android_rel_dyn_offset = section_header->sh_offset;
563 CHECK(dynamic_section != NULL);
564 CHECK(dynsym_section != NULL);
565 CHECK(relplt_section != NULL);
566 CHECK(android_rel_dyn_offset != 0);
568 // Adjust the .dynamic section for the hole. Because we have to edit the
569 // current contents of .dynamic we disallow resizing it.
570 CHECK(section != dynamic_section);
571 AdjustDynamicSectionForHole(dynamic_section,
576 // Adjust the .dynsym section for the hole.
577 AdjustDynSymSectionForHole(dynsym_section, hole_start, hole_size);
579 // Adjust the .rel.plt section for the hole.
580 AdjustRelPltSectionForHole(relplt_section, hole_start, hole_size);
582 // If present, adjust the .symtab section for the hole. If the shared
583 // library was stripped then .symtab will be absent.
585 AdjustSymTabSectionForHole(symtab_section, hole_start, hole_size);
588 // Replace the first free (unused) slot in a dynamics vector with the given
589 // value. The vector always ends with a free (unused) element, so the slot
590 // found cannot be the last one in the vector.
591 void AddDynamicEntry(Elf32_Dyn dyn,
592 std::vector<Elf32_Dyn>* dynamics) {
593 // Loop until the penultimate entry. We cannot replace the end sentinel.
594 for (size_t i = 0; i < dynamics->size() - 1; ++i) {
595 Elf32_Dyn &slot = dynamics->at(i);
596 if (slot.d_tag == DT_NULL) {
598 VLOG("dynamic[%lu] overwritten with %u\n", i, dyn.d_tag);
603 // No free dynamics vector slot was found.
604 LOG("FATAL: No spare dynamic vector slots found "
605 "(to fix, increase gold's --spare-dynamic-tags value)\n");
609 // Remove the element in the dynamics vector that matches the given tag with
610 // unused slot data. Shuffle the following elements up, and ensure that the
611 // last is the null sentinel.
612 void RemoveDynamicEntry(Elf32_Sword tag,
613 std::vector<Elf32_Dyn>* dynamics) {
614 // Loop until the penultimate entry, and never match the end sentinel.
615 for (size_t i = 0; i < dynamics->size() - 1; ++i) {
616 Elf32_Dyn &slot = dynamics->at(i);
617 if (slot.d_tag == tag) {
618 for ( ; i < dynamics->size() - 1; ++i) {
619 dynamics->at(i) = dynamics->at(i + 1);
620 VLOG("dynamic[%lu] overwritten with dynamic[%lu]\n", i, i + 1);
622 CHECK(dynamics->at(i).d_tag == DT_NULL);
627 // No matching dynamics vector entry was found.
631 // Apply R_ARM_RELATIVE relocations to the file data to which they refer.
632 // This relocates data into the area it will occupy after the hole in
633 // .rel.dyn is added or removed.
634 void AdjustRelocationTargets(Elf* elf,
635 Elf32_Off hole_start,
637 const std::vector<Elf32_Rel>& relocations) {
638 Elf_Scn* section = NULL;
639 while ((section = elf_nextscn(elf, section)) != NULL) {
640 const Elf32_Shdr* section_header = elf32_getshdr(section);
642 // Identify this section's start and end addresses.
643 const Elf32_Addr section_start = section_header->sh_addr;
644 const Elf32_Addr section_end = section_start + section_header->sh_size;
646 Elf_Data* data = GetSectionData(section);
648 // Ignore sections with no effective data.
649 if (data->d_buf == NULL)
652 // Create a copy-on-write pointer to the section's data.
653 uint8_t* area = reinterpret_cast<uint8_t*>(data->d_buf);
655 for (size_t i = 0; i < relocations.size(); ++i) {
656 const Elf32_Rel* relocation = &relocations[i];
657 CHECK(ELF32_R_TYPE(relocation->r_info) == R_ARM_RELATIVE);
659 // See if this relocation points into the current section.
660 if (relocation->r_offset >= section_start &&
661 relocation->r_offset < section_end) {
662 Elf32_Addr byte_offset = relocation->r_offset - section_start;
663 Elf32_Off* target = reinterpret_cast<Elf32_Off*>(area + byte_offset);
665 // Is the relocation's target after the hole's start?
666 if (*target > hole_start) {
667 // Copy on first write. Recompute target to point into the newly
669 if (area == data->d_buf) {
670 area = new uint8_t[data->d_size];
671 memcpy(area, data->d_buf, data->d_size);
672 target = reinterpret_cast<Elf32_Off*>(area + byte_offset);
675 *target += hole_size;
676 VLOG("relocation[%lu] target adjusted to %u\n", i, *target);
681 // If we applied any relocation to this section, write it back.
682 if (area != data->d_buf) {
683 RewriteSectionData(data, area, data->d_size);
689 // Pad relocations with a given number of R_ARM_NONE relocations.
690 void PadRelocations(size_t count,
691 std::vector<Elf32_Rel>* relocations) {
692 const Elf32_Rel r_arm_none = {R_ARM_NONE, 0};
693 std::vector<Elf32_Rel> padding(count, r_arm_none);
694 relocations->insert(relocations->end(), padding.begin(), padding.end());
697 // Adjust relocations so that the offset that they indicate will be correct
698 // after the hole in .rel.dyn is added or removed (in effect, relocate the
700 void AdjustRelocations(Elf32_Off hole_start,
702 std::vector<Elf32_Rel>* relocations) {
703 for (size_t i = 0; i < relocations->size(); ++i) {
704 Elf32_Rel* relocation = &relocations->at(i);
705 if (relocation->r_offset > hole_start) {
706 relocation->r_offset += hole_size;
707 VLOG("relocation[%lu] offset adjusted to %u\n", i, relocation->r_offset);
714 // Remove R_ARM_RELATIVE entries from .rel.dyn and write as packed data
715 // into .android.rel.dyn.
716 bool ElfFile::PackRelocations() {
717 // Load the ELF file into libelf.
719 LOG("ERROR: Failed to load as ELF (elf_error=%d)\n", elf_errno());
723 // Retrieve the current .rel.dyn section data.
724 Elf_Data* data = GetSectionData(rel_dyn_section_);
726 // Convert data to a vector of Elf32 relocations.
727 const Elf32_Rel* relocations_base = reinterpret_cast<Elf32_Rel*>(data->d_buf);
728 std::vector<Elf32_Rel> relocations(
730 relocations_base + data->d_size / sizeof(relocations[0]));
732 std::vector<Elf32_Rel> relative_relocations;
733 std::vector<Elf32_Rel> other_relocations;
735 // Filter relocations into those that are R_ARM_RELATIVE and others.
736 for (size_t i = 0; i < relocations.size(); ++i) {
737 const Elf32_Rel& relocation = relocations[i];
738 if (ELF32_R_TYPE(relocation.r_info) == R_ARM_RELATIVE) {
739 CHECK(ELF32_R_SYM(relocation.r_info) == 0);
740 relative_relocations.push_back(relocation);
742 other_relocations.push_back(relocation);
745 LOG("R_ARM_RELATIVE: %lu entries\n", relative_relocations.size());
746 LOG("Other : %lu entries\n", other_relocations.size());
747 LOG("Total : %lu entries\n", relocations.size());
749 // If no relative relocations then we have nothing packable. Perhaps
750 // the shared object has already been packed?
751 if (relative_relocations.empty()) {
752 LOG("ERROR: No R_ARM_RELATIVE relocations found (already packed?)\n");
756 // Unless padding, pre-apply R_ARM_RELATIVE relocations to account for the
757 // hole, and pre-adjust all relocation offsets accordingly.
758 if (!is_padding_rel_dyn_) {
759 // Pre-calculate the size of the hole we will close up when we rewrite
760 // .rel.dyn. We have to adjust relocation addresses to account for this.
761 Elf32_Shdr* section_header = elf32_getshdr(rel_dyn_section_);
762 const Elf32_Off hole_start = section_header->sh_offset;
764 relative_relocations.size() * sizeof(relative_relocations[0]);
765 const size_t unaligned_hole_size = hole_size;
767 // Adjust the actual hole size to preserve alignment.
768 hole_size -= hole_size % kPreserveAlignment;
769 LOG("Compaction : %lu bytes\n", hole_size);
771 // Adjusting for alignment may have removed any packing benefit.
772 if (hole_size == 0) {
773 LOG("Too few R_ARM_RELATIVE relocations to pack after alignment\n");
777 // Add R_ARM_NONE relocations to other_relocations to preserve alignment.
778 const size_t padding_bytes = unaligned_hole_size - hole_size;
779 CHECK(padding_bytes % sizeof(other_relocations[0]) == 0);
780 const size_t required = padding_bytes / sizeof(other_relocations[0]);
781 PadRelocations(required, &other_relocations);
782 LOG("Alignment pad : %lu relocations\n", required);
784 // Apply relocations to all R_ARM_RELATIVE data to relocate it into the
785 // area it will occupy once the hole in .rel.dyn is removed.
786 AdjustRelocationTargets(elf_, hole_start, -hole_size, relative_relocations);
787 // Relocate the relocations.
788 AdjustRelocations(hole_start, -hole_size, &relative_relocations);
789 AdjustRelocations(hole_start, -hole_size, &other_relocations);
791 // If padding, add R_ARM_NONE relocations to other_relocations to make it
792 // the same size as the the original relocations we read in. This makes
793 // the ResizeSection() below a no-op.
794 const size_t required = relocations.size() - other_relocations.size();
795 PadRelocations(required, &other_relocations);
799 // Pack R_ARM_RELATIVE relocations.
800 const size_t initial_bytes =
801 relative_relocations.size() * sizeof(relative_relocations[0]);
802 LOG("Unpacked R_ARM_RELATIVE: %lu bytes\n", initial_bytes);
803 std::vector<uint8_t> packed;
804 RelocationPacker packer;
805 packer.PackRelativeRelocations(relative_relocations, &packed);
806 const void* packed_data = &packed[0];
807 const size_t packed_bytes = packed.size() * sizeof(packed[0]);
808 LOG("Packed R_ARM_RELATIVE: %lu bytes\n", packed_bytes);
810 // If we have insufficient R_ARM_RELATIVE relocations to form a run then
812 if (packed.empty()) {
813 LOG("Too few R_ARM_RELATIVE relocations to pack\n");
817 // Run a loopback self-test as a check that packing is lossless.
818 std::vector<Elf32_Rel> unpacked;
819 packer.UnpackRelativeRelocations(packed, &unpacked);
820 CHECK(unpacked.size() == relative_relocations.size());
821 for (size_t i = 0; i < unpacked.size(); ++i) {
822 CHECK(unpacked[i].r_offset == relative_relocations[i].r_offset);
823 CHECK(unpacked[i].r_info == relative_relocations[i].r_info);
826 // Make sure packing saved some space.
827 if (packed_bytes >= initial_bytes) {
828 LOG("Packing R_ARM_RELATIVE relocations saves no space\n");
832 // Rewrite the current .rel.dyn section to be only the non-R_ARM_RELATIVE
833 // relocations, then shrink it to size.
834 const void* section_data = &other_relocations[0];
835 const size_t bytes = other_relocations.size() * sizeof(other_relocations[0]);
836 ResizeSection(elf_, rel_dyn_section_, bytes);
837 RewriteSectionData(data, section_data, bytes);
839 // Rewrite the current .android.rel.dyn section to hold the packed
840 // R_ARM_RELATIVE relocations.
841 data = GetSectionData(android_rel_dyn_section_);
842 ResizeSection(elf_, android_rel_dyn_section_, packed_bytes);
843 RewriteSectionData(data, packed_data, packed_bytes);
845 // Rewrite .dynamic to include two new tags describing .android.rel.dyn.
846 data = GetSectionData(dynamic_section_);
847 const Elf32_Dyn* dynamic_base = reinterpret_cast<Elf32_Dyn*>(data->d_buf);
848 std::vector<Elf32_Dyn> dynamics(
850 dynamic_base + data->d_size / sizeof(dynamics[0]));
851 Elf32_Shdr* section_header = elf32_getshdr(android_rel_dyn_section_);
852 // Use two of the spare slots to describe the .android.rel.dyn section.
853 const Elf32_Dyn offset_dyn
854 = {DT_ANDROID_ARM_REL_OFFSET, {section_header->sh_offset}};
855 AddDynamicEntry(offset_dyn, &dynamics);
856 const Elf32_Dyn size_dyn
857 = {DT_ANDROID_ARM_REL_SIZE, {section_header->sh_size}};
858 AddDynamicEntry(size_dyn, &dynamics);
859 const void* dynamics_data = &dynamics[0];
860 const size_t dynamics_bytes = dynamics.size() * sizeof(dynamics[0]);
861 RewriteSectionData(data, dynamics_data, dynamics_bytes);
867 // Find packed R_ARM_RELATIVE relocations in .android.rel.dyn, unpack them,
868 // and rewrite the .rel.dyn section in so_file to contain unpacked data.
869 bool ElfFile::UnpackRelocations() {
870 // Load the ELF file into libelf.
872 LOG("ERROR: Failed to load as ELF (elf_error=%d)\n", elf_errno());
876 // Retrieve the current .android.rel.dyn section data.
877 Elf_Data* data = GetSectionData(android_rel_dyn_section_);
879 // Convert data to a vector of bytes.
880 const uint8_t* packed_base = reinterpret_cast<uint8_t*>(data->d_buf);
881 std::vector<uint8_t> packed(
883 packed_base + data->d_size / sizeof(packed[0]));
885 // Properly packed data must begin with "APR1".
886 if (packed.empty() ||
887 packed[0] != 'A' || packed[1] != 'P' ||
888 packed[2] != 'R' || packed[3] != '1') {
889 LOG("ERROR: Packed R_ARM_RELATIVE relocations not found (not packed?)\n");
893 // Unpack the data to re-materialize the R_ARM_RELATIVE relocations.
894 const size_t packed_bytes = packed.size() * sizeof(packed[0]);
895 LOG("Packed R_ARM_RELATIVE: %lu bytes\n", packed_bytes);
896 std::vector<Elf32_Rel> relative_relocations;
897 RelocationPacker packer;
898 packer.UnpackRelativeRelocations(packed, &relative_relocations);
899 const size_t unpacked_bytes =
900 relative_relocations.size() * sizeof(relative_relocations[0]);
901 LOG("Unpacked R_ARM_RELATIVE: %lu bytes\n", unpacked_bytes);
903 // Retrieve the current .rel.dyn section data.
904 data = GetSectionData(rel_dyn_section_);
906 // Interpret data as Elf32 relocations.
907 const Elf32_Rel* relocations_base = reinterpret_cast<Elf32_Rel*>(data->d_buf);
908 std::vector<Elf32_Rel> relocations(
910 relocations_base + data->d_size / sizeof(relocations[0]));
912 std::vector<Elf32_Rel> other_relocations;
915 // Filter relocations to locate any that are R_ARM_NONE. These will occur
916 // if padding was turned on for packing.
917 for (size_t i = 0; i < relocations.size(); ++i) {
918 const Elf32_Rel& relocation = relocations[i];
919 if (ELF32_R_TYPE(relocation.r_info) != R_ARM_NONE) {
920 other_relocations.push_back(relocation);
925 LOG("R_ARM_RELATIVE: %lu entries\n", relative_relocations.size());
926 LOG("Other : %lu entries\n", other_relocations.size());
928 // If we found the same number of R_ARM_NONE entries in .rel.dyn as we
929 // hold as unpacked relative relocations, then this is a padded file.
930 const bool is_padded = padding == relative_relocations.size();
932 // Unless padded, pre-apply R_ARM_RELATIVE relocations to account for the
933 // hole, and pre-adjust all relocation offsets accordingly.
935 // Pre-calculate the size of the hole we will open up when we rewrite
936 // .rel.dyn. We have to adjust relocation addresses to account for this.
937 Elf32_Shdr* section_header = elf32_getshdr(rel_dyn_section_);
938 const Elf32_Off hole_start = section_header->sh_offset;
940 relative_relocations.size() * sizeof(relative_relocations[0]);
942 // Adjust the hole size for the padding added to preserve alignment.
943 hole_size -= padding * sizeof(other_relocations[0]);
944 LOG("Expansion : %lu bytes\n", hole_size);
946 // Apply relocations to all R_ARM_RELATIVE data to relocate it into the
947 // area it will occupy once the hole in .rel.dyn is opened.
948 AdjustRelocationTargets(elf_, hole_start, hole_size, relative_relocations);
949 // Relocate the relocations.
950 AdjustRelocations(hole_start, hole_size, &relative_relocations);
951 AdjustRelocations(hole_start, hole_size, &other_relocations);
954 // Rewrite the current .rel.dyn section to be the R_ARM_RELATIVE relocations
955 // followed by other relocations. This is the usual order in which we find
956 // them after linking, so this action will normally put the entire .rel.dyn
957 // section back to its pre-split-and-packed state.
958 relocations.assign(relative_relocations.begin(), relative_relocations.end());
959 relocations.insert(relocations.end(),
960 other_relocations.begin(), other_relocations.end());
961 const void* section_data = &relocations[0];
962 const size_t bytes = relocations.size() * sizeof(relocations[0]);
963 LOG("Total : %lu entries\n", relocations.size());
964 ResizeSection(elf_, rel_dyn_section_, bytes);
965 RewriteSectionData(data, section_data, bytes);
967 // Nearly empty the current .android.rel.dyn section. Leaves a four-byte
968 // stub so that some data remains allocated to the section. This is a
969 // convenience which allows us to re-pack this file again without
970 // having to remove the section and then add a new small one with objcopy.
971 // The way we resize sections relies on there being some data in a section.
972 data = GetSectionData(android_rel_dyn_section_);
973 ResizeSection(elf_, android_rel_dyn_section_, sizeof(kStubIdentifier));
974 RewriteSectionData(data, &kStubIdentifier, sizeof(kStubIdentifier));
976 // Rewrite .dynamic to remove two tags describing .android.rel.dyn.
977 data = GetSectionData(dynamic_section_);
978 const Elf32_Dyn* dynamic_base = reinterpret_cast<Elf32_Dyn*>(data->d_buf);
979 std::vector<Elf32_Dyn> dynamics(
981 dynamic_base + data->d_size / sizeof(dynamics[0]));
982 RemoveDynamicEntry(DT_ANDROID_ARM_REL_SIZE, &dynamics);
983 RemoveDynamicEntry(DT_ANDROID_ARM_REL_OFFSET, &dynamics);
984 const void* dynamics_data = &dynamics[0];
985 const size_t dynamics_bytes = dynamics.size() * sizeof(dynamics[0]);
986 RewriteSectionData(data, dynamics_data, dynamics_bytes);
992 // Flush rewritten shared object file data.
993 void ElfFile::Flush() {
994 // Flag all ELF data held in memory as needing to be written back to the
995 // file, and tell libelf that we have controlled the file layout.
996 elf_flagelf(elf_, ELF_C_SET, ELF_F_DIRTY);
997 elf_flagelf(elf_, ELF_C_SET, ELF_F_LAYOUT);
999 // Write ELF data back to disk.
1000 const off_t file_bytes = elf_update(elf_, ELF_C_WRITE);
1001 CHECK(file_bytes > 0);
1002 VLOG("elf_update returned: %lu\n", file_bytes);
1004 // Clean up libelf, and truncate the output file to the number of bytes
1005 // written by elf_update().
1008 const int truncate = ftruncate(fd_, file_bytes);
1009 CHECK(truncate == 0);
1012 } // namespace relocation_packer