X-Git-Url: http://review.tizen.org/git/?a=blobdiff_plain;ds=sidebyside;f=bfd%2Felf64-x86-64.c;h=3881f60fd3da2f46c4d6e517841c51769888ddca;hb=e609377629a12f6518eaf100c1983e23c80fecc1;hp=79b6dc606071aa2281ab6ff90acd0228f6591d66;hpb=20e52bd2b8041df10920960f5882636d420ebb13;p=platform%2Fupstream%2Fbinutils.git diff --git a/bfd/elf64-x86-64.c b/bfd/elf64-x86-64.c index 79b6dc6..3881f60 100644 --- a/bfd/elf64-x86-64.c +++ b/bfd/elf64-x86-64.c @@ -1,7 +1,5 @@ /* X86-64 specific support for ELF - Copyright 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, - 2010, 2011, 2012 - Free Software Foundation, Inc. + Copyright (C) 2000-2014 Free Software Foundation, Inc. Contributed by Jan Hubicka . This file is part of BFD, the Binary File Descriptor library. @@ -172,12 +170,18 @@ static reloc_howto_type x86_64_elf_howto_table[] = HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE, MINUS_ONE, FALSE), + HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed, + bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff, + TRUE), + HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed, + bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff, + TRUE), /* We have a gap in the reloc numbers here. R_X86_64_standard counts the number up to this point, and R_X86_64_vt_offset is the value to subtract from a reloc type of R_X86_64_GNU_VT* to form an index into this table. */ -#define R_X86_64_standard (R_X86_64_RELATIVE64 + 1) +#define R_X86_64_standard (R_X86_64_PLT32_BND + 1) #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard) /* GNU extension to record C++ vtable hierarchy. */ @@ -199,6 +203,7 @@ static reloc_howto_type x86_64_elf_howto_table[] = ( ((TYPE) == R_X86_64_PC8) \ || ((TYPE) == R_X86_64_PC16) \ || ((TYPE) == R_X86_64_PC32) \ + || ((TYPE) == R_X86_64_PC32_BND) \ || ((TYPE) == R_X86_64_PC64)) /* Map BFD relocs to the x86_64 elf relocs. */ @@ -248,6 +253,8 @@ static const struct elf_reloc_map x86_64_reloc_map[] = { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, }, { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, }, { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, }, + { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,}, + { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,}, { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, }, { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, }, }; @@ -348,10 +355,10 @@ elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note) case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */ /* pr_cursig */ - elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12); + elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12); /* pr_pid */ - elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24); + elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24); /* pr_reg */ offset = 72; @@ -361,11 +368,11 @@ elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note) case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */ /* pr_cursig */ - elf_tdata (abfd)->core_signal + elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12); /* pr_pid */ - elf_tdata (abfd)->core_lwpid + elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 32); /* pr_reg */ @@ -389,20 +396,20 @@ elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note) return FALSE; case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */ - elf_tdata (abfd)->core_pid + elf_tdata (abfd)->core->pid = bfd_get_32 (abfd, note->descdata + 12); - elf_tdata (abfd)->core_program + elf_tdata (abfd)->core->program = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16); - elf_tdata (abfd)->core_command + elf_tdata (abfd)->core->command = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80); break; case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */ - elf_tdata (abfd)->core_pid + elf_tdata (abfd)->core->pid = bfd_get_32 (abfd, note->descdata + 24); - elf_tdata (abfd)->core_program + elf_tdata (abfd)->core->program = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16); - elf_tdata (abfd)->core_command + elf_tdata (abfd)->core->command = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80); } @@ -411,7 +418,7 @@ elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note) implementations, so strip it off if it exists. */ { - char *command = elf_tdata (abfd)->core_command; + char *command = elf_tdata (abfd)->core->command; int n = strlen (command); if (0 < n && command[n - 1] == ' ') @@ -553,6 +560,56 @@ static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] = 0, 0, 0, 0 /* replaced with offset to start of .plt0. */ }; +/* The first entry in a procedure linkage table with BND relocations + like this. */ + +static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] = +{ + 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ + 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */ + 0x0f, 0x1f, 0 /* nopl (%rax) */ +}; + +/* Subsequent entries for legacy branches in a procedure linkage table + with BND relocations look like this. */ + +static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] = +{ + 0x68, 0, 0, 0, 0, /* pushq immediate */ + 0xe9, 0, 0, 0, 0, /* jmpq relative */ + 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */ +}; + +/* Subsequent entries for branches with BND prefx in a procedure linkage + table with BND relocations look like this. */ + +static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] = +{ + 0x68, 0, 0, 0, 0, /* pushq immediate */ + 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */ + 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */ +}; + +/* Entries for legacy branches in the second procedure linkage table + look like this. */ + +static const bfd_byte elf_x86_64_legacy_plt2_entry[8] = +{ + 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ + 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ + 0x66, 0x90 /* xchg %ax,%ax */ +}; + +/* Entries for branches with BND prefix in the second procedure linkage + table look like this. */ + +static const bfd_byte elf_x86_64_bnd_plt2_entry[8] = +{ + 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */ + 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ + 0x90 /* nop */ +}; + /* .eh_frame covering the .plt section. */ static const bfd_byte elf_x86_64_eh_frame_plt[] = @@ -628,9 +685,11 @@ struct elf_x86_64_backend_data unsigned int eh_frame_plt_size; }; +#define get_elf_x86_64_arch_data(bed) \ + ((const struct elf_x86_64_backend_data *) (bed)->arch_data) + #define get_elf_x86_64_backend_data(abfd) \ - ((const struct elf_x86_64_backend_data *) \ - get_elf_backend_data (abfd)->arch_data) + get_elf_x86_64_arch_data (get_elf_backend_data (abfd)) #define GET_PLT_ENTRY_SIZE(abfd) \ get_elf_x86_64_backend_data (abfd)->plt_entry_size @@ -654,6 +713,24 @@ static const struct elf_x86_64_backend_data elf_x86_64_arch_bed = sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */ }; +static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed = + { + elf_x86_64_bnd_plt0_entry, /* plt0_entry */ + elf_x86_64_bnd_plt_entry, /* plt_entry */ + sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */ + 2, /* plt0_got1_offset */ + 1+8, /* plt0_got2_offset */ + 1+12, /* plt0_got2_insn_end */ + 1+2, /* plt_got_offset */ + 1, /* plt_reloc_offset */ + 7, /* plt_plt_offset */ + 1+6, /* plt_got_insn_size */ + 11, /* plt_plt_insn_end */ + 0, /* plt_lazy_offset */ + elf_x86_64_eh_frame_plt, /* eh_frame_plt */ + sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */ + }; + #define elf_backend_arch_data &elf_x86_64_arch_bed /* x86-64 ELF linker hash entry. */ @@ -680,6 +757,13 @@ struct elf_x86_64_link_hash_entry (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type)) unsigned char tls_type; + /* TRUE if symbol has at least one BND relocation. */ + bfd_boolean has_bnd_reloc; + + /* Information about the second PLT entry. Filled when has_bnd_reloc is + set. */ + union gotplt_union plt_bnd; + /* Offset of the GOTPLT entry reserved for the TLS descriptor, starting at the end of the jump table. */ bfd_vma tlsdesc_got; @@ -730,6 +814,7 @@ struct elf_x86_64_link_hash_table asection *sdynbss; asection *srelbss; asection *plt_eh_frame; + asection *plt_bnd; union { @@ -807,6 +892,8 @@ elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry, eh = (struct elf_x86_64_link_hash_entry *) entry; eh->dyn_relocs = NULL; eh->tls_type = GOT_UNKNOWN; + eh->has_bnd_reloc = FALSE; + eh->plt_bnd.offset = (bfd_vma) -1; eh->tlsdesc_got = (bfd_vma) -1; } @@ -880,6 +967,21 @@ elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab, return &ret->elf; } +/* Destroy an X86-64 ELF linker hash table. */ + +static void +elf_x86_64_link_hash_table_free (bfd *obfd) +{ + struct elf_x86_64_link_hash_table *htab + = (struct elf_x86_64_link_hash_table *) obfd->link.hash; + + if (htab->loc_hash_table) + htab_delete (htab->loc_hash_table); + if (htab->loc_hash_memory) + objalloc_free ((struct objalloc *) htab->loc_hash_memory); + _bfd_elf_link_hash_table_free (obfd); +} + /* Create an X86-64 ELF linker hash table. */ static struct bfd_link_hash_table * @@ -888,7 +990,7 @@ elf_x86_64_link_hash_table_create (bfd *abfd) struct elf_x86_64_link_hash_table *ret; bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table); - ret = (struct elf_x86_64_link_hash_table *) bfd_malloc (amt); + ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt); if (ret == NULL) return NULL; @@ -901,18 +1003,6 @@ elf_x86_64_link_hash_table_create (bfd *abfd) return NULL; } - ret->sdynbss = NULL; - ret->srelbss = NULL; - ret->plt_eh_frame = NULL; - ret->sym_cache.abfd = NULL; - ret->tlsdesc_plt = 0; - ret->tlsdesc_got = 0; - ret->tls_ld_got.refcount = 0; - ret->sgotplt_jump_table_size = 0; - ret->tls_module_base = NULL; - ret->next_jump_slot_index = 0; - ret->next_irelative_index = 0; - if (ABI_64_P (abfd)) { ret->r_info = elf64_r_info; @@ -937,28 +1027,14 @@ elf_x86_64_link_hash_table_create (bfd *abfd) ret->loc_hash_memory = objalloc_create (); if (!ret->loc_hash_table || !ret->loc_hash_memory) { - free (ret); + elf_x86_64_link_hash_table_free (abfd); return NULL; } + ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free; return &ret->elf.root; } -/* Destroy an X86-64 ELF linker hash table. */ - -static void -elf_x86_64_link_hash_table_free (struct bfd_link_hash_table *hash) -{ - struct elf_x86_64_link_hash_table *htab - = (struct elf_x86_64_link_hash_table *) hash; - - if (htab->loc_hash_table) - htab_delete (htab->loc_hash_table); - if (htab->loc_hash_memory) - objalloc_free ((struct objalloc *) htab->loc_hash_memory); - _bfd_generic_link_hash_table_free (hash); -} - /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts to them in our hash table. */ @@ -1012,6 +1088,9 @@ elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info, edir = (struct elf_x86_64_link_hash_entry *) dir; eind = (struct elf_x86_64_link_hash_entry *) ind; + if (!edir->has_bnd_reloc) + edir->has_bnd_reloc = eind->has_bnd_reloc; + if (eind->dyn_relocs != NULL) { if (edir->dyn_relocs != NULL) @@ -1099,6 +1178,7 @@ elf_x86_64_check_tls_transition (bfd *abfd, { unsigned int val; unsigned long r_symndx; + bfd_boolean largepic = FALSE; struct elf_link_hash_entry *h; bfd_vma offset; struct elf_x86_64_link_hash_table *htab; @@ -1136,16 +1216,32 @@ elf_x86_64_check_tls_transition (bfd *abfd, can transit to different access model. For 32bit, only leaq foo@tlsgd(%rip), %rdi .word 0x6666; rex64; call __tls_get_addr - can transit to different access model. */ + can transit to different access model. For largepic + we also support: + leaq foo@tlsgd(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq $rbx, %rax + call *%rax. */ static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 }; static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d }; - if ((offset + 12) > sec->size - || memcmp (contents + offset + 4, call, 4) != 0) + if ((offset + 12) > sec->size) return FALSE; - if (ABI_64_P (abfd)) + if (memcmp (contents + offset + 4, call, 4) != 0) + { + if (!ABI_64_P (abfd) + || (offset + 19) > sec->size + || offset < 3 + || memcmp (contents + offset - 3, leaq + 1, 3) != 0 + || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0 + || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5) + != 0) + return FALSE; + largepic = TRUE; + } + else if (ABI_64_P (abfd)) { if (offset < 4 || memcmp (contents + offset - 4, leaq, 4) != 0) @@ -1163,16 +1259,31 @@ elf_x86_64_check_tls_transition (bfd *abfd, /* Check transition from LD access model. Only leaq foo@tlsld(%rip), %rdi; call __tls_get_addr - can transit to different access model. */ + can transit to different access model. For largepic + we also support: + leaq foo@tlsld(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq $rbx, %rax + call *%rax. */ static const unsigned char lea[] = { 0x48, 0x8d, 0x3d }; if (offset < 3 || (offset + 9) > sec->size) return FALSE; - if (memcmp (contents + offset - 3, lea, 3) != 0 - || 0xe8 != *(contents + offset + 4)) + if (memcmp (contents + offset - 3, lea, 3) != 0) return FALSE; + + if (0xe8 != *(contents + offset + 4)) + { + if (!ABI_64_P (abfd) + || (offset + 19) > sec->size + || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0 + || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5) + != 0) + return FALSE; + largepic = TRUE; + } } r_symndx = htab->r_sym (rel[1].r_info); @@ -1184,8 +1295,10 @@ elf_x86_64_check_tls_transition (bfd *abfd, may be versioned. */ return (h != NULL && h->root.root.string != NULL - && (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32 - || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32) + && (largepic + ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64 + : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32 + || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32)) && (strncmp (h->root.root.string, "__tls_get_addr", 14) == 0)); @@ -1514,12 +1627,60 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, default: break; - case R_X86_64_32S: + case R_X86_64_PC32_BND: + case R_X86_64_PLT32_BND: + case R_X86_64_PC32: + case R_X86_64_PLT32: case R_X86_64_32: case R_X86_64_64: - case R_X86_64_PC32: + /* MPX PLT is supported only if elf_x86_64_arch_bed + is used in 64-bit mode. */ + if (ABI_64_P (abfd) + && info->bndplt + && (get_elf_x86_64_backend_data (abfd) + == &elf_x86_64_arch_bed)) + { + elf_x86_64_hash_entry (h)->has_bnd_reloc = TRUE; + + /* Create the second PLT for Intel MPX support. */ + if (htab->plt_bnd == NULL) + { + unsigned int plt_bnd_align; + const struct elf_backend_data *bed; + + bed = get_elf_backend_data (info->output_bfd); + switch (sizeof (elf_x86_64_bnd_plt2_entry)) + { + case 8: + plt_bnd_align = 3; + break; + case 16: + plt_bnd_align = 4; + break; + default: + abort (); + } + + if (htab->elf.dynobj == NULL) + htab->elf.dynobj = abfd; + htab->plt_bnd + = bfd_make_section_anyway_with_flags (htab->elf.dynobj, + ".plt.bnd", + (bed->dynamic_sec_flags + | SEC_ALLOC + | SEC_CODE + | SEC_LOAD + | SEC_READONLY)); + if (htab->plt_bnd == NULL + || !bfd_set_section_alignment (htab->elf.dynobj, + htab->plt_bnd, + plt_bnd_align)) + return FALSE; + } + } + + case R_X86_64_32S: case R_X86_64_PC64: - case R_X86_64_PLT32: case R_X86_64_GOTPCREL: case R_X86_64_GOTPCREL64: if (htab->elf.dynobj == NULL) @@ -1531,6 +1692,7 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, /* It is referenced by a non-shared object. */ h->ref_regular = 1; + h->root.non_ir_ref = 1; } if (! elf_x86_64_tls_transition (info, abfd, sec, NULL, @@ -1681,6 +1843,7 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, break; case R_X86_64_PLT32: + case R_X86_64_PLT32_BND: /* This symbol requires a procedure linkage table entry. We actually build the entry in adjust_dynamic_symbol, because this might be a case of linking PIC code which is @@ -1741,6 +1904,7 @@ elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, case R_X86_64_PC8: case R_X86_64_PC16: case R_X86_64_PC32: + case R_X86_64_PC32_BND: case R_X86_64_PC64: case R_X86_64_64: pointer: @@ -1757,7 +1921,9 @@ pointer: /* We may need a .plt entry if the function this reloc refers to is in a shared lib. */ h->plt.refcount += 1; - if (r_type != R_X86_64_PC32 && r_type != R_X86_64_PC64) + if (r_type != R_X86_64_PC32 + && r_type != R_X86_64_PC32_BND + && r_type != R_X86_64_PC64) h->pointer_equality_needed = 1; } @@ -2041,6 +2207,7 @@ elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info, case R_X86_64_PC8: case R_X86_64_PC16: case R_X86_64_PC32: + case R_X86_64_PC32_BND: case R_X86_64_PC64: case R_X86_64_SIZE32: case R_X86_64_SIZE64: @@ -2050,6 +2217,7 @@ elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info, /* Fall thru */ case R_X86_64_PLT32: + case R_X86_64_PLT32_BND: case R_X86_64_PLTOFF64: if (h != NULL) { @@ -2268,10 +2436,28 @@ elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) here if it is defined and referenced in a non-shared object. */ if (h->type == STT_GNU_IFUNC && h->def_regular) - return _bfd_elf_allocate_ifunc_dyn_relocs (info, h, - &eh->dyn_relocs, - plt_entry_size, - GOT_ENTRY_SIZE); + { + if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h, + &eh->dyn_relocs, + plt_entry_size, + plt_entry_size, + GOT_ENTRY_SIZE)) + { + asection *s = htab->plt_bnd; + if (h->plt.offset != (bfd_vma) -1 && s != NULL) + { + /* Use the .plt.bnd section if it is created. */ + eh->plt_bnd.offset = s->size; + + /* Make room for this entry in the .plt.bnd section. */ + s->size += sizeof (elf_x86_64_legacy_plt2_entry); + } + + return TRUE; + } + else + return FALSE; + } else if (htab->elf.dynamic_sections_created && h->plt.refcount > 0) { @@ -2288,13 +2474,16 @@ elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h)) { asection *s = htab->elf.splt; + asection *bnd_s = htab->plt_bnd; /* If this is the first .plt entry, make room for the special first entry. */ if (s->size == 0) - s->size += plt_entry_size; + s->size = plt_entry_size; h->plt.offset = s->size; + if (bnd_s) + eh->plt_bnd.offset = bnd_s->size; /* If this symbol is not defined in a regular file, and we are not generating a shared library, then set the symbol to this @@ -2304,12 +2493,28 @@ elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) if (! info->shared && !h->def_regular) { - h->root.u.def.section = s; - h->root.u.def.value = h->plt.offset; + if (bnd_s) + { + /* We need to make a call to the entry of the second + PLT instead of regular PLT entry. */ + h->root.u.def.section = bnd_s; + h->root.u.def.value = eh->plt_bnd.offset; + } + else + { + h->root.u.def.section = s; + h->root.u.def.value = h->plt.offset; + } } /* Make room for this entry. */ s->size += plt_entry_size; + if (bnd_s) + { + BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) + == sizeof (elf_x86_64_legacy_plt2_entry)); + bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry); + } /* We also need to make an entry in the .got.plt section, which will be placed in the .got section by the linker script. */ @@ -2573,7 +2778,7 @@ elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec, /* Nothing to do if there are no codes, no relocations or no output. */ if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC) || sec->reloc_count == 0 - || discarded_section (sec)) + || bfd_is_abs_section (sec->output_section)) return TRUE; symtab_hdr = &elf_tdata (abfd)->symtab_hdr; @@ -2732,7 +2937,7 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, /* Set up .got offsets for local syms, and space for local dynamic relocs. */ - for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next) + for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) { bfd_signed_vma *local_got; bfd_signed_vma *end_local_got; @@ -2916,7 +3121,7 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, && _bfd_elf_eh_frame_present (info)) { const struct elf_x86_64_backend_data *arch_data - = (const struct elf_x86_64_backend_data *) bed->arch_data; + = get_elf_x86_64_arch_data (bed); htab->plt_eh_frame->size = arch_data->eh_frame_plt_size; } @@ -2933,6 +3138,7 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, || s == htab->elf.sgotplt || s == htab->elf.iplt || s == htab->elf.igotplt + || s == htab->plt_bnd || s == htab->plt_eh_frame || s == htab->sdynbss) { @@ -2988,7 +3194,7 @@ elf_x86_64_size_dynamic_sections (bfd *output_bfd, && htab->plt_eh_frame->contents != NULL) { const struct elf_x86_64_backend_data *arch_data - = (const struct elf_x86_64_backend_data *) bed->arch_data; + = get_elf_x86_64_arch_data (bed); memcpy (htab->plt_eh_frame->contents, arch_data->eh_frame_plt, htab->plt_eh_frame->size); @@ -3211,14 +3417,15 @@ elf_x86_64_relocate_section (bfd *output_bfd, reloc_howto_type *howto; unsigned long r_symndx; struct elf_link_hash_entry *h; + struct elf_x86_64_link_hash_entry *eh; Elf_Internal_Sym *sym; asection *sec; - bfd_vma off, offplt; + bfd_vma off, offplt, plt_offset; bfd_vma relocation; bfd_boolean unresolved_reloc; bfd_reloc_status_type r; int tls_type; - asection *base_got; + asection *base_got, *resolved_plt; bfd_vma st_size; r_type = ELF32_R_TYPE (rel->r_info); @@ -3272,11 +3479,12 @@ elf_x86_64_relocate_section (bfd *output_bfd, else { bfd_boolean warned ATTRIBUTE_UNUSED; + bfd_boolean ignored ATTRIBUTE_UNUSED; RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel, r_symndx, symtab_hdr, sym_hashes, h, sec, relocation, - unresolved_reloc, warned); + unresolved_reloc, warned, ignored); st_size = h->size; } @@ -3305,13 +3513,14 @@ elf_x86_64_relocate_section (bfd *output_bfd, } } + eh = (struct elf_x86_64_link_hash_entry *) h; + /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it here if it is defined in a non-shared object. */ if (h != NULL && h->type == STT_GNU_IFUNC && h->def_regular) { - asection *plt; bfd_vma plt_index; const char *name; @@ -3320,9 +3529,27 @@ elf_x86_64_relocate_section (bfd *output_bfd, abort (); /* STT_GNU_IFUNC symbol must go through PLT. */ - plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt; - relocation = (plt->output_section->vma - + plt->output_offset + h->plt.offset); + if (htab->elf.splt != NULL) + { + if (htab->plt_bnd != NULL) + { + resolved_plt = htab->plt_bnd; + plt_offset = eh->plt_bnd.offset; + } + else + { + resolved_plt = htab->elf.splt; + plt_offset = h->plt.offset; + } + } + else + { + resolved_plt = htab->elf.iplt; + plt_offset = h->plt.offset; + } + + relocation = (resolved_plt->output_section->vma + + resolved_plt->output_offset + plt_offset); switch (r_type) { @@ -3414,8 +3641,10 @@ elf_x86_64_relocate_section (bfd *output_bfd, } /* FALLTHROUGH */ case R_X86_64_PC32: + case R_X86_64_PC32_BND: case R_X86_64_PC64: case R_X86_64_PLT32: + case R_X86_64_PLT32_BND: goto do_relocation; case R_X86_64_GOTPCREL: @@ -3649,9 +3878,20 @@ elf_x86_64_relocate_section (bfd *output_bfd, && h->plt.offset != (bfd_vma) -1 && htab->elf.splt != NULL) { - relocation = (htab->elf.splt->output_section->vma - + htab->elf.splt->output_offset - + h->plt.offset); + if (htab->plt_bnd != NULL) + { + resolved_plt = htab->plt_bnd; + plt_offset = eh->plt_bnd.offset; + } + else + { + resolved_plt = htab->elf.splt; + plt_offset = h->plt.offset; + } + + relocation = (resolved_plt->output_section->vma + + resolved_plt->output_offset + + plt_offset); unresolved_reloc = FALSE; } @@ -3660,6 +3900,7 @@ elf_x86_64_relocate_section (bfd *output_bfd, break; case R_X86_64_PLT32: + case R_X86_64_PLT32_BND: /* Relocation is to the entry for this symbol in the procedure linkage table. */ @@ -3677,9 +3918,20 @@ elf_x86_64_relocate_section (bfd *output_bfd, break; } - relocation = (htab->elf.splt->output_section->vma - + htab->elf.splt->output_offset - + h->plt.offset); + if (htab->plt_bnd != NULL) + { + resolved_plt = htab->plt_bnd; + plt_offset = eh->plt_bnd.offset; + } + else + { + resolved_plt = htab->elf.splt; + plt_offset = h->plt.offset; + } + + relocation = (resolved_plt->output_section->vma + + resolved_plt->output_offset + + plt_offset); unresolved_reloc = FALSE; break; @@ -3692,6 +3944,7 @@ elf_x86_64_relocate_section (bfd *output_bfd, case R_X86_64_PC8: case R_X86_64_PC16: case R_X86_64_PC32: + case R_X86_64_PC32_BND: if (info->shared && (input_section->flags & SEC_ALLOC) != 0 && (input_section->flags & SEC_READONLY) != 0 @@ -3699,7 +3952,8 @@ elf_x86_64_relocate_section (bfd *output_bfd, { bfd_boolean fail = FALSE; bfd_boolean branch - = (r_type == R_X86_64_PC32 + = ((r_type == R_X86_64_PC32 + || r_type == R_X86_64_PC32_BND) && is_32bit_relative_branch (contents, rel->r_offset)); if (SYMBOL_REFERENCES_LOCAL (info, h)) @@ -3957,8 +4211,26 @@ direct: .word 0x6666; rex64; call __tls_get_addr into: movl %fs:0, %eax - leaq foo@tpoff(%rax), %rax */ - if (ABI_64_P (output_bfd)) + leaq foo@tpoff(%rax), %rax + For largepic, change: + leaq foo@tlsgd(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq %rbx, %rax + call *%rax + into: + movq %fs:0, %rax + leaq foo@tpoff(%rax), %rax + nopw 0x0(%rax,%rax,1) */ + int largepic = 0; + if (ABI_64_P (output_bfd) + && contents[roff + 5] == (bfd_byte) '\xb8') + { + memcpy (contents + roff - 3, + "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80" + "\0\0\0\0\x66\x0f\x1f\x44\0", 22); + largepic = 1; + } + else if (ABI_64_P (output_bfd)) memcpy (contents + roff - 4, "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", 16); @@ -3968,8 +4240,8 @@ direct: 15); bfd_put_32 (output_bfd, elf_x86_64_tpoff (info, relocation), - contents + roff + 8); - /* Skip R_X86_64_PC32/R_X86_64_PLT32. */ + contents + roff + 8 + largepic); + /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */ rel++; continue; } @@ -4010,17 +4282,27 @@ direct: else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF) { /* IE->LE transition: - Originally it can be one of: + For 64bit, originally it can be one of: movq foo@gottpoff(%rip), %reg addq foo@gottpoff(%rip), %reg We change it into: movq $foo, %reg leaq foo(%reg), %reg - addq $foo, %reg. */ + addq $foo, %reg. + For 32bit, originally it can be one of: + movq foo@gottpoff(%rip), %reg + addl foo@gottpoff(%rip), %reg + We change it into: + movq $foo, %reg + leal foo(%reg), %reg + addl $foo, %reg. */ unsigned int val, type, reg; - val = bfd_get_8 (input_bfd, contents + roff - 3); + if (roff >= 3) + val = bfd_get_8 (input_bfd, contents + roff - 3); + else + val = 0; type = bfd_get_8 (input_bfd, contents + roff - 2); reg = bfd_get_8 (input_bfd, contents + roff - 1); reg >>= 3; @@ -4040,8 +4322,8 @@ direct: } else if (reg == 4) { - /* addq -> addq - addressing with %rsp/%r12 is - special */ + /* addq/addl -> addq/addl - addressing with %rsp/%r12 + is special */ if (val == 0x4c) bfd_put_8 (output_bfd, 0x49, contents + roff - 3); @@ -4055,7 +4337,7 @@ direct: } else { - /* addq -> leaq */ + /* addq/addl -> leaq/leal */ if (val == 0x4c) bfd_put_8 (output_bfd, 0x4d, contents + roff - 3); @@ -4204,8 +4486,26 @@ direct: .word 0x6666; rex64; call __tls_get_addr@plt into: movl %fs:0, %eax - addq foo@gottpoff(%rip), %rax */ - if (ABI_64_P (output_bfd)) + addq foo@gottpoff(%rip), %rax + For largepic, change: + leaq foo@tlsgd(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq %rbx, %rax + call *%rax + into: + movq %fs:0, %rax + addq foo@gottpoff(%rax), %rax + nopw 0x0(%rax,%rax,1) */ + int largepic = 0; + if (ABI_64_P (output_bfd) + && contents[roff + 5] == (bfd_byte) '\xb8') + { + memcpy (contents + roff - 3, + "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05" + "\0\0\0\0\x66\x0f\x1f\x44\0", 22); + largepic = 1; + } + else if (ABI_64_P (output_bfd)) memcpy (contents + roff - 4, "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", 16); @@ -4217,12 +4517,13 @@ direct: relocation = (htab->elf.sgot->output_section->vma + htab->elf.sgot->output_offset + off - roff + - largepic - input_section->output_section->vma - input_section->output_offset - 12); bfd_put_32 (output_bfd, relocation, - contents + roff + 8); - /* Skip R_X86_64_PLT32. */ + contents + roff + 8 + largepic); + /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */ rel++; continue; } @@ -4284,16 +4585,29 @@ direct: For 64bit, we change it into: .word 0x6666; .byte 0x66; movq %fs:0, %rax. For 32bit, we change it into: - nopl 0x0(%rax); movl %fs:0, %eax. */ + nopl 0x0(%rax); movl %fs:0, %eax. + For largepic, change: + leaq foo@tlsgd(%rip), %rdi + movabsq $__tls_get_addr@pltoff, %rax + addq %rbx, %rax + call *%rax + into: + data32 data32 data32 nopw %cs:0x0(%rax,%rax,1) + movq %fs:0, %eax */ BFD_ASSERT (r_type == R_X86_64_TPOFF32); - if (ABI_64_P (output_bfd)) + if (ABI_64_P (output_bfd) + && contents[rel->r_offset + 5] == (bfd_byte) '\xb8') + memcpy (contents + rel->r_offset - 3, + "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0" + "\x64\x48\x8b\x04\x25\0\0\0", 22); + else if (ABI_64_P (output_bfd)) memcpy (contents + rel->r_offset - 3, "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12); else memcpy (contents + rel->r_offset - 3, "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12); - /* Skip R_X86_64_PC32/R_X86_64_PLT32. */ + /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */ rel++; continue; } @@ -4342,6 +4656,11 @@ direct: relocation = elf_x86_64_tpoff (info, relocation); break; + case R_X86_64_DTPOFF64: + BFD_ASSERT ((input_section->flags & SEC_CODE) == 0); + relocation -= elf_x86_64_dtpoff_base (info); + break; + default: break; } @@ -4420,20 +4739,28 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, Elf_Internal_Sym *sym ATTRIBUTE_UNUSED) { struct elf_x86_64_link_hash_table *htab; - const struct elf_x86_64_backend_data *const abed - = get_elf_x86_64_backend_data (output_bfd); + const struct elf_x86_64_backend_data *abed; + bfd_boolean use_plt_bnd; htab = elf_x86_64_hash_table (info); if (htab == NULL) return FALSE; + /* Use MPX backend data in case of BND relocation. Use .plt_bnd + section only if there is .plt section. */ + use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL; + abed = (use_plt_bnd + ? &elf_x86_64_bnd_arch_bed + : get_elf_x86_64_backend_data (output_bfd)); + if (h->plt.offset != (bfd_vma) -1) { bfd_vma plt_index; - bfd_vma got_offset; + bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset; + bfd_vma plt_plt_insn_end, plt_got_insn_size; Elf_Internal_Rela rela; bfd_byte *loc; - asection *plt, *gotplt, *relplt; + asection *plt, *gotplt, *relplt, *resolved_plt; const struct elf_backend_data *bed; /* When building a static executable, use .iplt, .igot.plt and @@ -4484,9 +4811,56 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, got_offset = got_offset * GOT_ENTRY_SIZE; } - /* Fill in the entry in the procedure linkage table. */ - memcpy (plt->contents + h->plt.offset, abed->plt_entry, - abed->plt_entry_size); + plt_plt_insn_end = abed->plt_plt_insn_end; + plt_plt_offset = abed->plt_plt_offset; + plt_got_insn_size = abed->plt_got_insn_size; + plt_got_offset = abed->plt_got_offset; + if (use_plt_bnd) + { + /* Use the second PLT with BND relocations. */ + const bfd_byte *plt_entry, *plt2_entry; + struct elf_x86_64_link_hash_entry *eh + = (struct elf_x86_64_link_hash_entry *) h; + + if (eh->has_bnd_reloc) + { + plt_entry = elf_x86_64_bnd_plt_entry; + plt2_entry = elf_x86_64_bnd_plt2_entry; + } + else + { + plt_entry = elf_x86_64_legacy_plt_entry; + plt2_entry = elf_x86_64_legacy_plt2_entry; + + /* Subtract 1 since there is no BND prefix. */ + plt_plt_insn_end -= 1; + plt_plt_offset -= 1; + plt_got_insn_size -= 1; + plt_got_offset -= 1; + } + + BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry) + == sizeof (elf_x86_64_legacy_plt_entry)); + + /* Fill in the entry in the procedure linkage table. */ + memcpy (plt->contents + h->plt.offset, + plt_entry, sizeof (elf_x86_64_legacy_plt_entry)); + /* Fill in the entry in the second PLT. */ + memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset, + plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry)); + + resolved_plt = htab->plt_bnd; + plt_offset = eh->plt_bnd.offset; + } + else + { + /* Fill in the entry in the procedure linkage table. */ + memcpy (plt->contents + h->plt.offset, abed->plt_entry, + abed->plt_entry_size); + + resolved_plt = plt; + plt_offset = h->plt.offset; + } /* Insert the relocation positions of the plt section. */ @@ -4496,11 +4870,11 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, (gotplt->output_section->vma + gotplt->output_offset + got_offset - - plt->output_section->vma - - plt->output_offset - - h->plt.offset - - abed->plt_got_insn_size), - plt->contents + h->plt.offset + abed->plt_got_offset); + - resolved_plt->output_section->vma + - resolved_plt->output_offset + - plt_offset + - plt_got_insn_size), + resolved_plt->contents + plt_offset + plt_got_offset); /* Fill in the entry in the global offset table, initially this points to the second part of the PLT entry. */ @@ -4542,8 +4916,8 @@ elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, bfd_put_32 (output_bfd, plt_index, plt->contents + h->plt.offset + abed->plt_reloc_offset); /* Put offset for jmp .PLT0. */ - bfd_put_32 (output_bfd, - (h->plt.offset + abed->plt_plt_insn_end), - plt->contents + h->plt.offset + abed->plt_plt_offset); + bfd_put_32 (output_bfd, - (h->plt.offset + plt_plt_insn_end), + plt->contents + h->plt.offset + plt_plt_offset); } bed = get_elf_backend_data (output_bfd); @@ -4678,7 +5052,9 @@ elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf) dynamic linker, before writing them out. */ static enum elf_reloc_type_class -elf_x86_64_reloc_type_class (const Elf_Internal_Rela *rela) +elf_x86_64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED, + const asection *rel_sec ATTRIBUTE_UNUSED, + const Elf_Internal_Rela *rela) { switch ((int) ELF32_R_TYPE (rela->r_info)) { @@ -4703,13 +5079,18 @@ elf_x86_64_finish_dynamic_sections (bfd *output_bfd, struct elf_x86_64_link_hash_table *htab; bfd *dynobj; asection *sdyn; - const struct elf_x86_64_backend_data *const abed - = get_elf_x86_64_backend_data (output_bfd); + const struct elf_x86_64_backend_data *abed; htab = elf_x86_64_hash_table (info); if (htab == NULL) return FALSE; + /* Use MPX backend data in case of BND relocation. Use .plt_bnd + section only if there is .plt section. */ + abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL + ? &elf_x86_64_bnd_arch_bed + : get_elf_x86_64_backend_data (output_bfd)); + dynobj = htab->elf.dynobj; sdyn = bfd_get_linker_section (dynobj, ".dynamic"); @@ -4850,6 +5231,10 @@ elf_x86_64_finish_dynamic_sections (bfd *output_bfd, } } + if (htab->plt_bnd != NULL) + elf_section_data (htab->plt_bnd->output_section) + ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry); + if (htab->elf.sgotplt) { if (bfd_is_abs_section (htab->elf.sgotplt->output_section)) @@ -4918,24 +5303,208 @@ elf_x86_64_finish_dynamic_sections (bfd *output_bfd, return TRUE; } -/* Return address for Ith PLT stub in section PLT, for relocation REL - or (bfd_vma) -1 if it should not be included. */ +/* Return address in section PLT for the Ith GOTPLT relocation, for + relocation REL or (bfd_vma) -1 if it should not be included. */ static bfd_vma elf_x86_64_plt_sym_val (bfd_vma i, const asection *plt, - const arelent *rel ATTRIBUTE_UNUSED) + const arelent *rel) +{ + bfd *abfd; + const struct elf_x86_64_backend_data *bed; + bfd_vma plt_offset; + + /* Only match R_X86_64_JUMP_SLOT and R_X86_64_IRELATIVE. */ + if (rel->howto->type != R_X86_64_JUMP_SLOT + && rel->howto->type != R_X86_64_IRELATIVE) + return (bfd_vma) -1; + + abfd = plt->owner; + bed = get_elf_x86_64_backend_data (abfd); + plt_offset = bed->plt_entry_size; + + if (elf_elfheader (abfd)->e_ident[EI_OSABI] != ELFOSABI_GNU) + return plt->vma + (i + 1) * plt_offset; + + while (plt_offset < plt->size) + { + bfd_vma reloc_index; + bfd_byte reloc_index_raw[4]; + + if (!bfd_get_section_contents (abfd, (asection *) plt, + reloc_index_raw, + plt_offset + bed->plt_reloc_offset, + sizeof (reloc_index_raw))) + return (bfd_vma) -1; + + reloc_index = H_GET_32 (abfd, reloc_index_raw); + if (reloc_index == i) + return plt->vma + plt_offset; + plt_offset += bed->plt_entry_size; + } + + abort (); +} + +/* Return offset in .plt.bnd section for the Ith GOTPLT relocation with + PLT section, or (bfd_vma) -1 if it should not be included. */ + +static bfd_vma +elf_x86_64_plt_sym_val_offset_plt_bnd (bfd_vma i, const asection *plt) +{ + const struct elf_x86_64_backend_data *bed = &elf_x86_64_bnd_arch_bed; + bfd *abfd = plt->owner; + bfd_vma plt_offset = bed->plt_entry_size; + + if (elf_elfheader (abfd)->e_ident[EI_OSABI] != ELFOSABI_GNU) + return i * sizeof (elf_x86_64_legacy_plt2_entry); + + while (plt_offset < plt->size) + { + bfd_vma reloc_index; + bfd_byte reloc_index_raw[4]; + + if (!bfd_get_section_contents (abfd, (asection *) plt, + reloc_index_raw, + plt_offset + bed->plt_reloc_offset, + sizeof (reloc_index_raw))) + return (bfd_vma) -1; + + reloc_index = H_GET_32 (abfd, reloc_index_raw); + if (reloc_index == i) + { + /* This is the index in .plt section. */ + long plt_index = plt_offset / bed->plt_entry_size; + /* Return the offset in .plt.bnd section. */ + return (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry); + } + plt_offset += bed->plt_entry_size; + } + + abort (); +} + +/* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section + support. */ + +static long +elf_x86_64_get_synthetic_symtab (bfd *abfd, + long symcount, + asymbol **syms, + long dynsymcount, + asymbol **dynsyms, + asymbol **ret) { - return plt->vma + (i + 1) * GET_PLT_ENTRY_SIZE (plt->owner); + const struct elf_backend_data *bed = get_elf_backend_data (abfd); + asection *relplt; + asymbol *s; + bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean); + arelent *p; + long count, i, n; + size_t size; + Elf_Internal_Shdr *hdr; + char *names; + asection *plt, *plt_push; + + plt_push = bfd_get_section_by_name (abfd, ".plt"); + if (plt_push == NULL) + return 0; + + plt = bfd_get_section_by_name (abfd, ".plt.bnd"); + /* Use the generic ELF version if there is no .plt.bnd section. */ + if (plt == NULL) + return _bfd_elf_get_synthetic_symtab (abfd, symcount, syms, + dynsymcount, dynsyms, ret); + + *ret = NULL; + + if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0) + return 0; + + if (dynsymcount <= 0) + return 0; + + relplt = bfd_get_section_by_name (abfd, ".rela.plt"); + if (relplt == NULL) + return 0; + + hdr = &elf_section_data (relplt)->this_hdr; + if (hdr->sh_link != elf_dynsymtab (abfd) + || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA)) + return 0; + + slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table; + if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE)) + return -1; + + count = relplt->size / hdr->sh_entsize; + size = count * sizeof (asymbol); + p = relplt->relocation; + for (i = 0; i < count; i++, p += bed->s->int_rels_per_ext_rel) + { + size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt"); + if (p->addend != 0) + size += sizeof ("+0x") - 1 + 8 + 8; + } + + s = *ret = (asymbol *) bfd_malloc (size); + if (s == NULL) + return -1; + + names = (char *) (s + count); + p = relplt->relocation; + n = 0; + for (i = 0; i < count; i++, p++) + { + bfd_vma offset; + size_t len; + + if (p->howto->type != R_X86_64_JUMP_SLOT + && p->howto->type != R_X86_64_IRELATIVE) + continue; + + offset = elf_x86_64_plt_sym_val_offset_plt_bnd (i, plt_push); + + *s = **p->sym_ptr_ptr; + /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since + we are defining a symbol, ensure one of them is set. */ + if ((s->flags & BSF_LOCAL) == 0) + s->flags |= BSF_GLOBAL; + s->flags |= BSF_SYNTHETIC; + s->section = plt; + s->value = offset; + s->name = names; + s->udata.p = NULL; + len = strlen ((*p->sym_ptr_ptr)->name); + memcpy (names, (*p->sym_ptr_ptr)->name, len); + names += len; + if (p->addend != 0) + { + char buf[30], *a; + + memcpy (names, "+0x", sizeof ("+0x") - 1); + names += sizeof ("+0x") - 1; + bfd_sprintf_vma (abfd, buf, p->addend); + for (a = buf; *a == '0'; ++a) + ; + len = strlen (a); + memcpy (names, a, len); + names += len; + } + memcpy (names, "@plt", sizeof ("@plt")); + names += sizeof ("@plt"); + ++s, ++n; + } + + return n; } /* Handle an x86-64 specific section when reading an object file. This is called when elfcode.h finds a section with an unknown type. */ static bfd_boolean -elf_x86_64_section_from_shdr (bfd *abfd, - Elf_Internal_Shdr *hdr, - const char *name, - int shindex) +elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr, + const char *name, int shindex) { if (hdr->sh_type != SHT_X86_64_UNWIND) return FALSE; @@ -4981,9 +5550,10 @@ elf_x86_64_add_symbol_hook (bfd *abfd, return TRUE; } - if ((abfd->flags & DYNAMIC) == 0 - && (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC - || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)) + if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC + || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE) + && (abfd->flags & DYNAMIC) == 0 + && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour) elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE; return TRUE; @@ -5050,49 +5620,33 @@ elf_x86_64_common_section (asection *sec) } static bfd_boolean -elf_x86_64_merge_symbol (struct bfd_link_info *info ATTRIBUTE_UNUSED, - struct elf_link_hash_entry **sym_hash ATTRIBUTE_UNUSED, - struct elf_link_hash_entry *h, - Elf_Internal_Sym *sym, +elf_x86_64_merge_symbol (struct elf_link_hash_entry *h, + const Elf_Internal_Sym *sym, asection **psec, - bfd_vma *pvalue ATTRIBUTE_UNUSED, - unsigned int *pold_alignment ATTRIBUTE_UNUSED, - bfd_boolean *skip ATTRIBUTE_UNUSED, - bfd_boolean *override ATTRIBUTE_UNUSED, - bfd_boolean *type_change_ok ATTRIBUTE_UNUSED, - bfd_boolean *size_change_ok ATTRIBUTE_UNUSED, - bfd_boolean *newdyn ATTRIBUTE_UNUSED, - bfd_boolean *newdef, - bfd_boolean *newdyncommon ATTRIBUTE_UNUSED, - bfd_boolean *newweak ATTRIBUTE_UNUSED, - bfd *abfd ATTRIBUTE_UNUSED, - asection **sec, - bfd_boolean *olddyn ATTRIBUTE_UNUSED, - bfd_boolean *olddef, - bfd_boolean *olddyncommon ATTRIBUTE_UNUSED, - bfd_boolean *oldweak ATTRIBUTE_UNUSED, + bfd_boolean newdef, + bfd_boolean olddef, bfd *oldbfd, - asection **oldsec) + const asection *oldsec) { /* A normal common symbol and a large common symbol result in a normal common symbol. We turn the large common symbol into a normal one. */ - if (!*olddef + if (!olddef && h->root.type == bfd_link_hash_common - && !*newdef - && bfd_is_com_section (*sec) - && *oldsec != *sec) + && !newdef + && bfd_is_com_section (*psec) + && oldsec != *psec) { if (sym->st_shndx == SHN_COMMON - && (elf_section_flags (*oldsec) & SHF_X86_64_LARGE) != 0) + && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0) { h->root.u.c.p->section = bfd_make_section_old_way (oldbfd, "COMMON"); h->root.u.c.p->section->flags = SEC_ALLOC; } else if (sym->st_shndx == SHN_X86_64_LCOMMON - && (elf_section_flags (*oldsec) & SHF_X86_64_LARGE) == 0) - *psec = *sec = bfd_com_section_ptr; + && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0) + *psec = bfd_com_section_ptr; } return TRUE; @@ -5156,7 +5710,7 @@ static const struct bfd_elf_special_section { NULL, 0, 0, 0, 0 } }; -#define TARGET_LITTLE_SYM bfd_elf64_x86_64_vec +#define TARGET_LITTLE_SYM x86_64_elf64_vec #define TARGET_LITTLE_NAME "elf64-x86-64" #define ELF_ARCH bfd_arch_i386 #define ELF_TARGET_ID X86_64_ELF_DATA @@ -5178,8 +5732,6 @@ static const struct bfd_elf_special_section #define bfd_elf64_bfd_link_hash_table_create \ elf_x86_64_link_hash_table_create -#define bfd_elf64_bfd_link_hash_table_free \ - elf_x86_64_link_hash_table_free #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup #define bfd_elf64_bfd_reloc_name_lookup \ elf_x86_64_reloc_name_lookup @@ -5206,6 +5758,7 @@ static const struct bfd_elf_special_section #define elf_backend_plt_sym_val elf_x86_64_plt_sym_val #define elf_backend_object_p elf64_x86_64_elf_object_p #define bfd_elf64_mkobject elf_x86_64_mkobject +#define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab #define elf_backend_section_from_shdr \ elf_x86_64_section_from_shdr @@ -5231,14 +5784,12 @@ static const struct bfd_elf_special_section #define elf_backend_hash_symbol \ elf_x86_64_hash_symbol -#define elf_backend_post_process_headers _bfd_elf_set_osabi - #include "elf64-target.h" /* FreeBSD support. */ #undef TARGET_LITTLE_SYM -#define TARGET_LITTLE_SYM bfd_elf64_x86_64_freebsd_vec +#define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec #undef TARGET_LITTLE_NAME #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd" @@ -5253,7 +5804,7 @@ static const struct bfd_elf_special_section /* Solaris 2 support. */ #undef TARGET_LITTLE_SYM -#define TARGET_LITTLE_SYM bfd_elf64_x86_64_sol2_vec +#define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec #undef TARGET_LITTLE_NAME #define TARGET_LITTLE_NAME "elf64-x86-64-sol2" @@ -5278,10 +5829,20 @@ static const struct bfd_elf_special_section #include "elf64-target.h" +#undef bfd_elf64_get_synthetic_symtab + /* Native Client support. */ +static bfd_boolean +elf64_x86_64_nacl_elf_object_p (bfd *abfd) +{ + /* Set the right machine number for a NaCl x86-64 ELF64 file. */ + bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl); + return TRUE; +} + #undef TARGET_LITTLE_SYM -#define TARGET_LITTLE_SYM bfd_elf64_x86_64_nacl_vec +#define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec #undef TARGET_LITTLE_NAME #define TARGET_LITTLE_NAME "elf64-x86-64-nacl" #undef elf64_bed @@ -5316,7 +5877,7 @@ static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] = 0x41, 0xff, 0xe3, /* jmpq *%r11 */ /* 9-byte nop sequence to pad out to the next 32-byte boundary. */ - 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopl %cs:0x0(%rax,%rax,1) */ + 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */ /* 32 bytes of nop to pad out to the standard size. */ 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */ @@ -5412,17 +5973,29 @@ static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed = #undef elf_backend_arch_data #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed +#undef elf_backend_object_p +#define elf_backend_object_p elf64_x86_64_nacl_elf_object_p #undef elf_backend_modify_segment_map #define elf_backend_modify_segment_map nacl_modify_segment_map #undef elf_backend_modify_program_headers #define elf_backend_modify_program_headers nacl_modify_program_headers +#undef elf_backend_final_write_processing +#define elf_backend_final_write_processing nacl_final_write_processing #include "elf64-target.h" /* Native Client x32 support. */ +static bfd_boolean +elf32_x86_64_nacl_elf_object_p (bfd *abfd) +{ + /* Set the right machine number for a NaCl x86-64 ELF32 file. */ + bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl); + return TRUE; +} + #undef TARGET_LITTLE_SYM -#define TARGET_LITTLE_SYM bfd_elf32_x86_64_nacl_vec +#define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec #undef TARGET_LITTLE_NAME #define TARGET_LITTLE_NAME "elf32-x86-64-nacl" #undef elf32_bed @@ -5430,8 +6003,6 @@ static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed = #define bfd_elf32_bfd_link_hash_table_create \ elf_x86_64_link_hash_table_create -#define bfd_elf32_bfd_link_hash_table_free \ - elf_x86_64_link_hash_table_free #define bfd_elf32_bfd_reloc_type_lookup \ elf_x86_64_reloc_type_lookup #define bfd_elf32_bfd_reloc_name_lookup \ @@ -5441,7 +6012,7 @@ static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed = #undef elf_backend_object_p #define elf_backend_object_p \ - elf32_x86_64_elf_object_p + elf32_x86_64_nacl_elf_object_p #undef elf_backend_bfd_from_remote_memory #define elf_backend_bfd_from_remote_memory \ @@ -5460,6 +6031,7 @@ static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed = #undef elf_backend_size_info #undef elf_backend_modify_segment_map #undef elf_backend_modify_program_headers +#undef elf_backend_final_write_processing /* Intel L1OM support. */ @@ -5472,7 +6044,7 @@ elf64_l1om_elf_object_p (bfd *abfd) } #undef TARGET_LITTLE_SYM -#define TARGET_LITTLE_SYM bfd_elf64_l1om_vec +#define TARGET_LITTLE_SYM l1om_elf64_vec #undef TARGET_LITTLE_NAME #define TARGET_LITTLE_NAME "elf64-l1om" #undef ELF_ARCH @@ -5506,7 +6078,7 @@ elf64_l1om_elf_object_p (bfd *abfd) /* FreeBSD L1OM support. */ #undef TARGET_LITTLE_SYM -#define TARGET_LITTLE_SYM bfd_elf64_l1om_freebsd_vec +#define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec #undef TARGET_LITTLE_NAME #define TARGET_LITTLE_NAME "elf64-l1om-freebsd" @@ -5529,7 +6101,7 @@ elf64_k1om_elf_object_p (bfd *abfd) } #undef TARGET_LITTLE_SYM -#define TARGET_LITTLE_SYM bfd_elf64_k1om_vec +#define TARGET_LITTLE_SYM k1om_elf64_vec #undef TARGET_LITTLE_NAME #define TARGET_LITTLE_NAME "elf64-k1om" #undef ELF_ARCH @@ -5556,7 +6128,7 @@ elf64_k1om_elf_object_p (bfd *abfd) /* FreeBSD K1OM support. */ #undef TARGET_LITTLE_SYM -#define TARGET_LITTLE_SYM bfd_elf64_k1om_freebsd_vec +#define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec #undef TARGET_LITTLE_NAME #define TARGET_LITTLE_NAME "elf64-k1om-freebsd" @@ -5571,7 +6143,7 @@ elf64_k1om_elf_object_p (bfd *abfd) /* 32bit x86-64 support. */ #undef TARGET_LITTLE_SYM -#define TARGET_LITTLE_SYM bfd_elf32_x86_64_vec +#define TARGET_LITTLE_SYM x86_64_elf32_vec #undef TARGET_LITTLE_NAME #define TARGET_LITTLE_NAME "elf32-x86-64" #undef elf32_bed