1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2018 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
22 #include "elfxx-x86.h"
25 #include "libiberty.h"
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
212 /* Map BFD relocs to the x86_64 elf relocs. */
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
219 static const struct elf_reloc_map x86_64_reloc_map[] =
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
272 if (r_type == (unsigned int) R_X86_64_32)
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
282 if (r_type >= (unsigned int) R_X86_64_standard)
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%B: invalid relocation type %d"),
287 r_type = R_X86_64_NONE;
292 i = r_type - (unsigned int) R_X86_64_vt_offset;
293 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
294 return &x86_64_elf_howto_table[i];
297 /* Given a BFD reloc type, return a HOWTO structure. */
298 static reloc_howto_type *
299 elf_x86_64_reloc_type_lookup (bfd *abfd,
300 bfd_reloc_code_real_type code)
304 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
307 if (x86_64_reloc_map[i].bfd_reloc_val == code)
308 return elf_x86_64_rtype_to_howto (abfd,
309 x86_64_reloc_map[i].elf_reloc_val);
314 static reloc_howto_type *
315 elf_x86_64_reloc_name_lookup (bfd *abfd,
320 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
322 /* Get x32 R_X86_64_32. */
323 reloc_howto_type *reloc
324 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
325 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
329 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
330 if (x86_64_elf_howto_table[i].name != NULL
331 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
332 return &x86_64_elf_howto_table[i];
337 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
340 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
341 Elf_Internal_Rela *dst)
345 r_type = ELF32_R_TYPE (dst->r_info);
346 if (r_type != (unsigned int) R_X86_64_GNU_VTINHERIT
347 && r_type != (unsigned int) R_X86_64_GNU_VTENTRY)
348 r_type &= ~R_X86_64_converted_reloc_bit;
349 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
351 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
354 /* Support for core dump NOTE sections. */
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
361 switch (note->descsz)
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
403 switch (note->descsz)
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
443 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
446 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
448 const char *fname, *psargs;
459 va_start (ap, note_type);
460 fname = va_arg (ap, const char *);
461 psargs = va_arg (ap, const char *);
464 if (bed->s->elfclass == ELFCLASS32)
467 memset (&data, 0, sizeof (data));
468 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
469 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
470 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
471 &data, sizeof (data));
476 memset (&data, 0, sizeof (data));
477 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
478 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
479 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
480 &data, sizeof (data));
485 va_start (ap, note_type);
486 pid = va_arg (ap, long);
487 cursig = va_arg (ap, int);
488 gregs = va_arg (ap, const void *);
491 if (bed->s->elfclass == ELFCLASS32)
493 if (bed->elf_machine_code == EM_X86_64)
495 prstatusx32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
506 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_cursig = cursig;
509 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
510 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
511 &prstat, sizeof (prstat));
517 memset (&prstat, 0, sizeof (prstat));
519 prstat.pr_cursig = cursig;
520 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
521 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
522 &prstat, sizeof (prstat));
529 /* Functions for the x86-64 ELF linker. */
531 /* The size in bytes of an entry in the global offset table. */
533 #define GOT_ENTRY_SIZE 8
535 /* The size in bytes of an entry in the lazy procedure linkage table. */
537 #define LAZY_PLT_ENTRY_SIZE 16
539 /* The size in bytes of an entry in the non-lazy procedure linkage
542 #define NON_LAZY_PLT_ENTRY_SIZE 8
544 /* The first entry in a lazy procedure linkage table looks like this.
545 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
548 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
550 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
551 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
552 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
555 /* Subsequent entries in a lazy procedure linkage table look like this. */
557 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
559 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
560 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
561 0x68, /* pushq immediate */
562 0, 0, 0, 0, /* replaced with index into relocation table. */
563 0xe9, /* jmp relative */
564 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
567 /* The first entry in a lazy procedure linkage table with BND prefix
570 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
572 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
573 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
574 0x0f, 0x1f, 0 /* nopl (%rax) */
577 /* Subsequent entries for branches with BND prefx in a lazy procedure
578 linkage table look like this. */
580 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
582 0x68, 0, 0, 0, 0, /* pushq immediate */
583 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
584 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
587 /* The first entry in the IBT-enabled lazy procedure linkage table is the
588 the same as the lazy PLT with BND prefix so that bound registers are
589 preserved when control is passed to dynamic linker. Subsequent
590 entries for a IBT-enabled lazy procedure linkage table look like
593 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
595 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
596 0x68, 0, 0, 0, 0, /* pushq immediate */
597 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
601 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
602 is the same as the normal lazy PLT. Subsequent entries for an
603 x32 IBT-enabled lazy procedure linkage table look like this. */
605 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
607 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
608 0x68, 0, 0, 0, 0, /* pushq immediate */
609 0xe9, 0, 0, 0, 0, /* jmpq relative */
610 0x66, 0x90 /* xchg %ax,%ax */
613 /* Entries in the non-lazey procedure linkage table look like this. */
615 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
617 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x66, 0x90 /* xchg %ax,%ax */
622 /* Entries for branches with BND prefix in the non-lazey procedure
623 linkage table look like this. */
625 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
627 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
628 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
632 /* Entries for branches with IBT-enabled in the non-lazey procedure
633 linkage table look like this. They have the same size as the lazy
636 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
638 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
639 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
640 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
641 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
644 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
645 linkage table look like this. They have the same size as the lazy
648 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
650 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
651 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
652 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
653 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
656 /* .eh_frame covering the lazy .plt section. */
658 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
660 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
661 0, 0, 0, 0, /* CIE ID */
663 'z', 'R', 0, /* Augmentation string */
664 1, /* Code alignment factor */
665 0x78, /* Data alignment factor */
666 16, /* Return address column */
667 1, /* Augmentation size */
668 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
669 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
670 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
671 DW_CFA_nop, DW_CFA_nop,
673 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
674 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
675 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
676 0, 0, 0, 0, /* .plt size goes here */
677 0, /* Augmentation size */
678 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
679 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
680 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
681 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
682 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
683 11, /* Block length */
684 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
685 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
686 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
687 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
688 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
691 /* .eh_frame covering the lazy BND .plt section. */
693 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
695 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
696 0, 0, 0, 0, /* CIE ID */
698 'z', 'R', 0, /* Augmentation string */
699 1, /* Code alignment factor */
700 0x78, /* Data alignment factor */
701 16, /* Return address column */
702 1, /* Augmentation size */
703 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
704 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
705 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
706 DW_CFA_nop, DW_CFA_nop,
708 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
709 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
710 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
711 0, 0, 0, 0, /* .plt size goes here */
712 0, /* Augmentation size */
713 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
714 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
715 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
716 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
717 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
718 11, /* Block length */
719 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
720 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
721 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
722 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
723 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
726 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
728 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
730 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
731 0, 0, 0, 0, /* CIE ID */
733 'z', 'R', 0, /* Augmentation string */
734 1, /* Code alignment factor */
735 0x78, /* Data alignment factor */
736 16, /* Return address column */
737 1, /* Augmentation size */
738 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
739 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
740 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
741 DW_CFA_nop, DW_CFA_nop,
743 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
744 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
745 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
746 0, 0, 0, 0, /* .plt size goes here */
747 0, /* Augmentation size */
748 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
749 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
750 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
751 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
752 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
753 11, /* Block length */
754 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
755 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
756 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
757 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
758 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
761 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
763 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
765 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
766 0, 0, 0, 0, /* CIE ID */
768 'z', 'R', 0, /* Augmentation string */
769 1, /* Code alignment factor */
770 0x78, /* Data alignment factor */
771 16, /* Return address column */
772 1, /* Augmentation size */
773 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
774 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
775 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
776 DW_CFA_nop, DW_CFA_nop,
778 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
779 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
780 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
781 0, 0, 0, 0, /* .plt size goes here */
782 0, /* Augmentation size */
783 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
784 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
785 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
786 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
787 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
788 11, /* Block length */
789 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
790 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
791 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
792 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
793 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
796 /* .eh_frame covering the non-lazy .plt section. */
798 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
800 #define PLT_GOT_FDE_LENGTH 20
801 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
802 0, 0, 0, 0, /* CIE ID */
804 'z', 'R', 0, /* Augmentation string */
805 1, /* Code alignment factor */
806 0x78, /* Data alignment factor */
807 16, /* Return address column */
808 1, /* Augmentation size */
809 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
810 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
811 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
812 DW_CFA_nop, DW_CFA_nop,
814 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
815 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
816 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
817 0, 0, 0, 0, /* non-lazy .plt size goes here */
818 0, /* Augmentation size */
819 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
820 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
823 /* These are the standard parameters. */
824 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
826 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
827 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
828 elf_x86_64_lazy_plt_entry, /* plt_entry */
829 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
830 2, /* plt0_got1_offset */
831 8, /* plt0_got2_offset */
832 12, /* plt0_got2_insn_end */
833 2, /* plt_got_offset */
834 7, /* plt_reloc_offset */
835 12, /* plt_plt_offset */
836 6, /* plt_got_insn_size */
837 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
838 6, /* plt_lazy_offset */
839 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
840 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
841 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
842 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
845 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
847 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
848 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
849 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
850 2, /* plt_got_offset */
851 6, /* plt_got_insn_size */
852 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
853 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
856 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
858 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
859 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
860 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
861 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
862 2, /* plt0_got1_offset */
863 1+8, /* plt0_got2_offset */
864 1+12, /* plt0_got2_insn_end */
865 1+2, /* plt_got_offset */
866 1, /* plt_reloc_offset */
867 7, /* plt_plt_offset */
868 1+6, /* plt_got_insn_size */
869 11, /* plt_plt_insn_end */
870 0, /* plt_lazy_offset */
871 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
872 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
873 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
877 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
879 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
880 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
881 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
882 1+2, /* plt_got_offset */
883 1+6, /* plt_got_insn_size */
884 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
885 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
888 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
890 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
891 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
892 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
893 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
894 2, /* plt0_got1_offset */
895 1+8, /* plt0_got2_offset */
896 1+12, /* plt0_got2_insn_end */
897 4+1+2, /* plt_got_offset */
898 4+1, /* plt_reloc_offset */
899 4+1+6, /* plt_plt_offset */
900 4+1+6, /* plt_got_insn_size */
901 4+1+5+5, /* plt_plt_insn_end */
902 0, /* plt_lazy_offset */
903 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
904 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
905 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
906 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
909 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
911 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
912 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
913 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
914 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
915 2, /* plt0_got1_offset */
916 8, /* plt0_got2_offset */
917 12, /* plt0_got2_insn_end */
918 4+2, /* plt_got_offset */
919 4+1, /* plt_reloc_offset */
920 4+6, /* plt_plt_offset */
921 4+6, /* plt_got_insn_size */
922 4+5+5, /* plt_plt_insn_end */
923 0, /* plt_lazy_offset */
924 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
925 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
926 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
927 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
930 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
932 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
933 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
934 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
935 4+1+2, /* plt_got_offset */
936 4+1+6, /* plt_got_insn_size */
937 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
938 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
941 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
943 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
944 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
946 4+2, /* plt_got_offset */
947 4+6, /* plt_got_insn_size */
948 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
949 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
952 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
957 #define elf_backend_arch_data &elf_x86_64_arch_bed
960 elf64_x86_64_elf_object_p (bfd *abfd)
962 /* Set the right machine number for an x86-64 elf64 file. */
963 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
968 elf32_x86_64_elf_object_p (bfd *abfd)
970 /* Set the right machine number for an x86-64 elf32 file. */
971 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
975 /* Return TRUE if the TLS access code sequence support transition
979 elf_x86_64_check_tls_transition (bfd *abfd,
980 struct bfd_link_info *info,
983 Elf_Internal_Shdr *symtab_hdr,
984 struct elf_link_hash_entry **sym_hashes,
986 const Elf_Internal_Rela *rel,
987 const Elf_Internal_Rela *relend)
990 unsigned long r_symndx;
991 bfd_boolean largepic = FALSE;
992 struct elf_link_hash_entry *h;
994 struct elf_x86_link_hash_table *htab;
996 bfd_boolean indirect_call;
998 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
999 offset = rel->r_offset;
1002 case R_X86_64_TLSGD:
1003 case R_X86_64_TLSLD:
1004 if ((rel + 1) >= relend)
1007 if (r_type == R_X86_64_TLSGD)
1009 /* Check transition from GD access model. For 64bit, only
1010 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1011 .word 0x6666; rex64; call __tls_get_addr@PLT
1013 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1015 call *__tls_get_addr@GOTPCREL(%rip)
1016 which may be converted to
1017 addr32 call __tls_get_addr
1018 can transit to different access model. For 32bit, only
1019 leaq foo@tlsgd(%rip), %rdi
1020 .word 0x6666; rex64; call __tls_get_addr@PLT
1022 leaq foo@tlsgd(%rip), %rdi
1024 call *__tls_get_addr@GOTPCREL(%rip)
1025 which may be converted to
1026 addr32 call __tls_get_addr
1027 can transit to different access model. For largepic,
1029 leaq foo@tlsgd(%rip), %rdi
1030 movabsq $__tls_get_addr@pltoff, %rax
1034 leaq foo@tlsgd(%rip), %rdi
1035 movabsq $__tls_get_addr@pltoff, %rax
1039 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1041 if ((offset + 12) > sec->size)
1044 call = contents + offset + 4;
1046 || !((call[1] == 0x48
1054 && call[3] == 0xe8)))
1056 if (!ABI_64_P (abfd)
1057 || (offset + 19) > sec->size
1059 || memcmp (call - 7, leaq + 1, 3) != 0
1060 || memcmp (call, "\x48\xb8", 2) != 0
1064 || !((call[10] == 0x48 && call[12] == 0xd8)
1065 || (call[10] == 0x4c && call[12] == 0xf8)))
1069 else if (ABI_64_P (abfd))
1072 || memcmp (contents + offset - 4, leaq, 4) != 0)
1078 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1081 indirect_call = call[2] == 0xff;
1085 /* Check transition from LD access model. Only
1086 leaq foo@tlsld(%rip), %rdi;
1087 call __tls_get_addr@PLT
1089 leaq foo@tlsld(%rip), %rdi;
1090 call *__tls_get_addr@GOTPCREL(%rip)
1091 which may be converted to
1092 addr32 call __tls_get_addr
1093 can transit to different access model. For largepic
1095 leaq foo@tlsld(%rip), %rdi
1096 movabsq $__tls_get_addr@pltoff, %rax
1100 leaq foo@tlsld(%rip), %rdi
1101 movabsq $__tls_get_addr@pltoff, %rax
1105 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1107 if (offset < 3 || (offset + 9) > sec->size)
1110 if (memcmp (contents + offset - 3, lea, 3) != 0)
1113 call = contents + offset + 4;
1114 if (!(call[0] == 0xe8
1115 || (call[0] == 0xff && call[1] == 0x15)
1116 || (call[0] == 0x67 && call[1] == 0xe8)))
1118 if (!ABI_64_P (abfd)
1119 || (offset + 19) > sec->size
1120 || memcmp (call, "\x48\xb8", 2) != 0
1124 || !((call[10] == 0x48 && call[12] == 0xd8)
1125 || (call[10] == 0x4c && call[12] == 0xf8)))
1129 indirect_call = call[0] == 0xff;
1132 r_symndx = htab->r_sym (rel[1].r_info);
1133 if (r_symndx < symtab_hdr->sh_info)
1136 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1138 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1142 r_type = (ELF32_R_TYPE (rel[1].r_info)
1143 & ~R_X86_64_converted_reloc_bit);
1145 return r_type == R_X86_64_PLTOFF64;
1146 else if (indirect_call)
1147 return r_type == R_X86_64_GOTPCRELX;
1149 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1152 case R_X86_64_GOTTPOFF:
1153 /* Check transition from IE access model:
1154 mov foo@gottpoff(%rip), %reg
1155 add foo@gottpoff(%rip), %reg
1158 /* Check REX prefix first. */
1159 if (offset >= 3 && (offset + 4) <= sec->size)
1161 val = bfd_get_8 (abfd, contents + offset - 3);
1162 if (val != 0x48 && val != 0x4c)
1164 /* X32 may have 0x44 REX prefix or no REX prefix. */
1165 if (ABI_64_P (abfd))
1171 /* X32 may not have any REX prefix. */
1172 if (ABI_64_P (abfd))
1174 if (offset < 2 || (offset + 3) > sec->size)
1178 val = bfd_get_8 (abfd, contents + offset - 2);
1179 if (val != 0x8b && val != 0x03)
1182 val = bfd_get_8 (abfd, contents + offset - 1);
1183 return (val & 0xc7) == 5;
1185 case R_X86_64_GOTPC32_TLSDESC:
1186 /* Check transition from GDesc access model:
1187 leaq x@tlsdesc(%rip), %rax
1189 Make sure it's a leaq adding rip to a 32-bit offset
1190 into any register, although it's probably almost always
1193 if (offset < 3 || (offset + 4) > sec->size)
1196 val = bfd_get_8 (abfd, contents + offset - 3);
1197 if ((val & 0xfb) != 0x48)
1200 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1203 val = bfd_get_8 (abfd, contents + offset - 1);
1204 return (val & 0xc7) == 0x05;
1206 case R_X86_64_TLSDESC_CALL:
1207 /* Check transition from GDesc access model:
1208 call *x@tlsdesc(%rax)
1210 if (offset + 2 <= sec->size)
1212 /* Make sure that it's a call *x@tlsdesc(%rax). */
1213 call = contents + offset;
1214 return call[0] == 0xff && call[1] == 0x10;
1224 /* Return TRUE if the TLS access transition is OK or no transition
1225 will be performed. Update R_TYPE if there is a transition. */
1228 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1229 asection *sec, bfd_byte *contents,
1230 Elf_Internal_Shdr *symtab_hdr,
1231 struct elf_link_hash_entry **sym_hashes,
1232 unsigned int *r_type, int tls_type,
1233 const Elf_Internal_Rela *rel,
1234 const Elf_Internal_Rela *relend,
1235 struct elf_link_hash_entry *h,
1236 unsigned long r_symndx,
1237 bfd_boolean from_relocate_section)
1239 unsigned int from_type = *r_type;
1240 unsigned int to_type = from_type;
1241 bfd_boolean check = TRUE;
1243 /* Skip TLS transition for functions. */
1245 && (h->type == STT_FUNC
1246 || h->type == STT_GNU_IFUNC))
1251 case R_X86_64_TLSGD:
1252 case R_X86_64_GOTPC32_TLSDESC:
1253 case R_X86_64_TLSDESC_CALL:
1254 case R_X86_64_GOTTPOFF:
1255 if (bfd_link_executable (info))
1258 to_type = R_X86_64_TPOFF32;
1260 to_type = R_X86_64_GOTTPOFF;
1263 /* When we are called from elf_x86_64_relocate_section, there may
1264 be additional transitions based on TLS_TYPE. */
1265 if (from_relocate_section)
1267 unsigned int new_to_type = to_type;
1269 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1270 new_to_type = R_X86_64_TPOFF32;
1272 if (to_type == R_X86_64_TLSGD
1273 || to_type == R_X86_64_GOTPC32_TLSDESC
1274 || to_type == R_X86_64_TLSDESC_CALL)
1276 if (tls_type == GOT_TLS_IE)
1277 new_to_type = R_X86_64_GOTTPOFF;
1280 /* We checked the transition before when we were called from
1281 elf_x86_64_check_relocs. We only want to check the new
1282 transition which hasn't been checked before. */
1283 check = new_to_type != to_type && from_type == to_type;
1284 to_type = new_to_type;
1289 case R_X86_64_TLSLD:
1290 if (bfd_link_executable (info))
1291 to_type = R_X86_64_TPOFF32;
1298 /* Return TRUE if there is no transition. */
1299 if (from_type == to_type)
1302 /* Check if the transition can be performed. */
1304 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1305 symtab_hdr, sym_hashes,
1306 from_type, rel, relend))
1308 reloc_howto_type *from, *to;
1311 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1312 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1315 name = h->root.root.string;
1318 struct elf_x86_link_hash_table *htab;
1320 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1325 Elf_Internal_Sym *isym;
1327 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1329 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1334 /* xgettext:c-format */
1335 (_("%B: TLS transition from %s to %s against `%s' at %#Lx "
1336 "in section `%A' failed"),
1337 abfd, from->name, to->name, name, rel->r_offset, sec);
1338 bfd_set_error (bfd_error_bad_value);
1346 /* Rename some of the generic section flags to better document how they
1348 #define check_relocs_failed sec_flg0
1351 elf_x86_64_need_pic (struct bfd_link_info *info,
1352 bfd *input_bfd, asection *sec,
1353 struct elf_link_hash_entry *h,
1354 Elf_Internal_Shdr *symtab_hdr,
1355 Elf_Internal_Sym *isym,
1356 reloc_howto_type *howto)
1359 const char *und = "";
1360 const char *pic = "";
1366 name = h->root.root.string;
1367 switch (ELF_ST_VISIBILITY (h->other))
1370 v = _("hidden symbol ");
1373 v = _("internal symbol ");
1376 v = _("protected symbol ");
1379 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1380 v = _("protected symbol ");
1383 pic = _("; recompile with -fPIC");
1387 if (!h->def_regular && !h->def_dynamic)
1388 und = _("undefined ");
1392 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1393 pic = _("; recompile with -fPIC");
1396 if (bfd_link_dll (info))
1397 object = _("a shared object");
1398 else if (bfd_link_pie (info))
1399 object = _("a PIE object");
1401 object = _("a PDE object");
1403 /* xgettext:c-format */
1404 _bfd_error_handler (_("%B: relocation %s against %s%s`%s' can "
1405 "not be used when making %s%s"),
1406 input_bfd, howto->name, und, v, name,
1408 bfd_set_error (bfd_error_bad_value);
1409 sec->check_relocs_failed = 1;
1413 /* With the local symbol, foo, we convert
1414 mov foo@GOTPCREL(%rip), %reg
1418 call/jmp *foo@GOTPCREL(%rip)
1420 nop call foo/jmp foo nop
1421 When PIC is false, convert
1422 test %reg, foo@GOTPCREL(%rip)
1426 binop foo@GOTPCREL(%rip), %reg
1429 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1433 elf_x86_64_convert_load_reloc (bfd *abfd,
1435 unsigned int *r_type_p,
1436 Elf_Internal_Rela *irel,
1437 struct elf_link_hash_entry *h,
1438 bfd_boolean *converted,
1439 struct bfd_link_info *link_info)
1441 struct elf_x86_link_hash_table *htab;
1443 bfd_boolean no_overflow;
1445 bfd_boolean to_reloc_pc32;
1447 bfd_signed_vma raddend;
1448 unsigned int opcode;
1450 unsigned int r_type = *r_type_p;
1451 unsigned int r_symndx;
1452 bfd_vma roff = irel->r_offset;
1454 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1457 raddend = irel->r_addend;
1458 /* Addend for 32-bit PC-relative relocation must be -4. */
1462 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1463 is_pic = bfd_link_pic (link_info);
1465 relocx = (r_type == R_X86_64_GOTPCRELX
1466 || r_type == R_X86_64_REX_GOTPCRELX);
1468 /* TRUE if --no-relax is used. */
1469 no_overflow = link_info->disable_target_specific_optimizations > 1;
1471 r_symndx = htab->r_sym (irel->r_info);
1473 opcode = bfd_get_8 (abfd, contents + roff - 2);
1475 /* Convert mov to lea since it has been done for a while. */
1478 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1479 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1480 test, xor instructions. */
1485 /* We convert only to R_X86_64_PC32:
1487 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1488 3. no_overflow is true.
1491 to_reloc_pc32 = (opcode == 0xff
1496 /* Get the symbol referred to by the reloc. */
1499 Elf_Internal_Sym *isym
1500 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1502 /* Skip relocation against undefined symbols. */
1503 if (isym->st_shndx == SHN_UNDEF)
1506 if (isym->st_shndx == SHN_ABS)
1507 tsec = bfd_abs_section_ptr;
1508 else if (isym->st_shndx == SHN_COMMON)
1509 tsec = bfd_com_section_ptr;
1510 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1511 tsec = &_bfd_elf_large_com_section;
1513 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1517 /* Undefined weak symbol is only bound locally in executable
1518 and its reference is resolved as 0 without relocation
1519 overflow. We can only perform this optimization for
1520 GOTPCRELX relocations since we need to modify REX byte.
1521 It is OK convert mov with R_X86_64_GOTPCREL to
1523 bfd_boolean local_ref;
1524 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1526 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1527 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1528 if ((relocx || opcode == 0x8b)
1529 && (h->root.type == bfd_link_hash_undefweak
1535 /* Skip for branch instructions since R_X86_64_PC32
1542 /* For non-branch instructions, we can convert to
1543 R_X86_64_32/R_X86_64_32S since we know if there
1545 to_reloc_pc32 = FALSE;
1548 /* Since we don't know the current PC when PIC is true,
1549 we can't convert to R_X86_64_PC32. */
1550 if (to_reloc_pc32 && is_pic)
1555 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1556 ld.so may use its link-time address. */
1557 else if (h->start_stop
1560 || h->root.type == bfd_link_hash_defined
1561 || h->root.type == bfd_link_hash_defweak)
1562 && h != htab->elf.hdynamic
1565 /* bfd_link_hash_new or bfd_link_hash_undefined is
1566 set by an assignment in a linker script in
1567 bfd_elf_record_link_assignment. start_stop is set
1568 on __start_SECNAME/__stop_SECNAME which mark section
1573 && (h->root.type == bfd_link_hash_new
1574 || h->root.type == bfd_link_hash_undefined
1575 || ((h->root.type == bfd_link_hash_defined
1576 || h->root.type == bfd_link_hash_defweak)
1577 && h->root.u.def.section == bfd_und_section_ptr))))
1579 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1584 tsec = h->root.u.def.section;
1590 /* Don't convert GOTPCREL relocation against large section. */
1591 if (elf_section_data (tsec) != NULL
1592 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1595 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1602 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1607 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1609 modrm = bfd_get_8 (abfd, contents + roff - 1);
1612 /* Convert to "jmp foo nop". */
1615 nop_offset = irel->r_offset + 3;
1616 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1617 irel->r_offset -= 1;
1618 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1622 struct elf_x86_link_hash_entry *eh
1623 = (struct elf_x86_link_hash_entry *) h;
1625 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1628 /* To support TLS optimization, always use addr32 prefix for
1629 "call *__tls_get_addr@GOTPCREL(%rip)". */
1630 if (eh && eh->tls_get_addr)
1633 nop_offset = irel->r_offset - 2;
1637 nop = link_info->call_nop_byte;
1638 if (link_info->call_nop_as_suffix)
1640 nop_offset = irel->r_offset + 3;
1641 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1642 irel->r_offset -= 1;
1643 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1646 nop_offset = irel->r_offset - 2;
1649 bfd_put_8 (abfd, nop, contents + nop_offset);
1650 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1651 r_type = R_X86_64_PC32;
1656 unsigned int rex_mask = REX_R;
1658 if (r_type == R_X86_64_REX_GOTPCRELX)
1659 rex = bfd_get_8 (abfd, contents + roff - 3);
1667 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1668 "lea foo(%rip), %reg". */
1670 r_type = R_X86_64_PC32;
1674 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1675 "mov $foo, %reg". */
1677 modrm = bfd_get_8 (abfd, contents + roff - 1);
1678 modrm = 0xc0 | (modrm & 0x38) >> 3;
1679 if ((rex & REX_W) != 0
1680 && ABI_64_P (link_info->output_bfd))
1682 /* Keep the REX_W bit in REX byte for LP64. */
1683 r_type = R_X86_64_32S;
1684 goto rewrite_modrm_rex;
1688 /* If the REX_W bit in REX byte isn't needed,
1689 use R_X86_64_32 and clear the W bit to avoid
1690 sign-extend imm32 to imm64. */
1691 r_type = R_X86_64_32;
1692 /* Clear the W bit in REX byte. */
1694 goto rewrite_modrm_rex;
1700 /* R_X86_64_PC32 isn't supported. */
1704 modrm = bfd_get_8 (abfd, contents + roff - 1);
1707 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1708 "test $foo, %reg". */
1709 modrm = 0xc0 | (modrm & 0x38) >> 3;
1714 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1715 "binop $foo, %reg". */
1716 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1720 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1721 overflow when sign-extending imm32 to imm64. */
1722 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1725 bfd_put_8 (abfd, modrm, contents + roff - 1);
1729 /* Move the R bit to the B bit in REX byte. */
1730 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1731 bfd_put_8 (abfd, rex, contents + roff - 3);
1734 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1738 bfd_put_8 (abfd, opcode, contents + roff - 2);
1742 irel->r_info = htab->r_info (r_symndx,
1743 r_type | R_X86_64_converted_reloc_bit);
1750 /* Look through the relocs for a section during the first phase, and
1751 calculate needed space in the global offset table, procedure
1752 linkage table, and dynamic reloc sections. */
1755 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1757 const Elf_Internal_Rela *relocs)
1759 struct elf_x86_link_hash_table *htab;
1760 Elf_Internal_Shdr *symtab_hdr;
1761 struct elf_link_hash_entry **sym_hashes;
1762 const Elf_Internal_Rela *rel;
1763 const Elf_Internal_Rela *rel_end;
1766 bfd_boolean converted;
1768 if (bfd_link_relocatable (info))
1771 /* Don't do anything special with non-loaded, non-alloced sections.
1772 In particular, any relocs in such sections should not affect GOT
1773 and PLT reference counting (ie. we don't allow them to create GOT
1774 or PLT entries), there's no possibility or desire to optimize TLS
1775 relocs, and there's not much point in propagating relocs to shared
1776 libs that the dynamic linker won't relocate. */
1777 if ((sec->flags & SEC_ALLOC) == 0)
1780 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1783 sec->check_relocs_failed = 1;
1787 BFD_ASSERT (is_x86_elf (abfd, htab));
1789 /* Get the section contents. */
1790 if (elf_section_data (sec)->this_hdr.contents != NULL)
1791 contents = elf_section_data (sec)->this_hdr.contents;
1792 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1794 sec->check_relocs_failed = 1;
1798 symtab_hdr = &elf_symtab_hdr (abfd);
1799 sym_hashes = elf_sym_hashes (abfd);
1805 rel_end = relocs + sec->reloc_count;
1806 for (rel = relocs; rel < rel_end; rel++)
1808 unsigned int r_type;
1809 unsigned int r_symndx;
1810 struct elf_link_hash_entry *h;
1811 struct elf_x86_link_hash_entry *eh;
1812 Elf_Internal_Sym *isym;
1814 bfd_boolean size_reloc;
1815 bfd_boolean converted_reloc;
1817 r_symndx = htab->r_sym (rel->r_info);
1818 r_type = ELF32_R_TYPE (rel->r_info);
1820 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1822 /* xgettext:c-format */
1823 _bfd_error_handler (_("%B: bad symbol index: %d"),
1828 if (r_symndx < symtab_hdr->sh_info)
1830 /* A local symbol. */
1831 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1836 /* Check relocation against local STT_GNU_IFUNC symbol. */
1837 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1839 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1844 /* Fake a STT_GNU_IFUNC symbol. */
1845 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1847 h->type = STT_GNU_IFUNC;
1850 h->forced_local = 1;
1851 h->root.type = bfd_link_hash_defined;
1859 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1860 while (h->root.type == bfd_link_hash_indirect
1861 || h->root.type == bfd_link_hash_warning)
1862 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1865 /* Check invalid x32 relocations. */
1866 if (!ABI_64_P (abfd))
1872 case R_X86_64_DTPOFF64:
1873 case R_X86_64_TPOFF64:
1875 case R_X86_64_GOTOFF64:
1876 case R_X86_64_GOT64:
1877 case R_X86_64_GOTPCREL64:
1878 case R_X86_64_GOTPC64:
1879 case R_X86_64_GOTPLT64:
1880 case R_X86_64_PLTOFF64:
1883 name = h->root.root.string;
1885 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1888 /* xgettext:c-format */
1889 (_("%B: relocation %s against symbol `%s' isn't "
1890 "supported in x32 mode"), abfd,
1891 x86_64_elf_howto_table[r_type].name, name);
1892 bfd_set_error (bfd_error_bad_value);
1900 /* It is referenced by a non-shared object. */
1903 if (h->type == STT_GNU_IFUNC)
1904 elf_tdata (info->output_bfd)->has_gnu_symbols
1905 |= elf_gnu_symbol_ifunc;
1908 converted_reloc = FALSE;
1909 if ((r_type == R_X86_64_GOTPCREL
1910 || r_type == R_X86_64_GOTPCRELX
1911 || r_type == R_X86_64_REX_GOTPCRELX)
1912 && (h == NULL || h->type != STT_GNU_IFUNC))
1914 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1915 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1916 irel, h, &converted_reloc,
1920 if (converted_reloc)
1924 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1925 symtab_hdr, sym_hashes,
1926 &r_type, GOT_UNKNOWN,
1927 rel, rel_end, h, r_symndx, FALSE))
1930 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1931 if (h == htab->elf.hgot)
1932 htab->got_referenced = TRUE;
1934 eh = (struct elf_x86_link_hash_entry *) h;
1937 case R_X86_64_TLSLD:
1938 htab->tls_ld_or_ldm_got.refcount = 1;
1941 case R_X86_64_TPOFF32:
1942 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1943 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1944 &x86_64_elf_howto_table[r_type]);
1946 eh->zero_undefweak &= 0x2;
1949 case R_X86_64_GOTTPOFF:
1950 if (!bfd_link_executable (info))
1951 info->flags |= DF_STATIC_TLS;
1954 case R_X86_64_GOT32:
1955 case R_X86_64_GOTPCREL:
1956 case R_X86_64_GOTPCRELX:
1957 case R_X86_64_REX_GOTPCRELX:
1958 case R_X86_64_TLSGD:
1959 case R_X86_64_GOT64:
1960 case R_X86_64_GOTPCREL64:
1961 case R_X86_64_GOTPLT64:
1962 case R_X86_64_GOTPC32_TLSDESC:
1963 case R_X86_64_TLSDESC_CALL:
1964 /* This symbol requires a global offset table entry. */
1966 int tls_type, old_tls_type;
1970 default: tls_type = GOT_NORMAL; break;
1971 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1972 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1973 case R_X86_64_GOTPC32_TLSDESC:
1974 case R_X86_64_TLSDESC_CALL:
1975 tls_type = GOT_TLS_GDESC; break;
1980 h->got.refcount = 1;
1981 old_tls_type = eh->tls_type;
1985 bfd_signed_vma *local_got_refcounts;
1987 /* This is a global offset table entry for a local symbol. */
1988 local_got_refcounts = elf_local_got_refcounts (abfd);
1989 if (local_got_refcounts == NULL)
1993 size = symtab_hdr->sh_info;
1994 size *= sizeof (bfd_signed_vma)
1995 + sizeof (bfd_vma) + sizeof (char);
1996 local_got_refcounts = ((bfd_signed_vma *)
1997 bfd_zalloc (abfd, size));
1998 if (local_got_refcounts == NULL)
2000 elf_local_got_refcounts (abfd) = local_got_refcounts;
2001 elf_x86_local_tlsdesc_gotent (abfd)
2002 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2003 elf_x86_local_got_tls_type (abfd)
2004 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2006 local_got_refcounts[r_symndx] = 1;
2008 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2011 /* If a TLS symbol is accessed using IE at least once,
2012 there is no point to use dynamic model for it. */
2013 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2014 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2015 || tls_type != GOT_TLS_IE))
2017 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2018 tls_type = old_tls_type;
2019 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2020 && GOT_TLS_GD_ANY_P (tls_type))
2021 tls_type |= old_tls_type;
2025 name = h->root.root.string;
2027 name = bfd_elf_sym_name (abfd, symtab_hdr,
2030 /* xgettext:c-format */
2031 (_("%B: '%s' accessed both as normal and"
2032 " thread local symbol"),
2034 bfd_set_error (bfd_error_bad_value);
2039 if (old_tls_type != tls_type)
2042 eh->tls_type = tls_type;
2044 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2049 case R_X86_64_GOTOFF64:
2050 case R_X86_64_GOTPC32:
2051 case R_X86_64_GOTPC64:
2054 eh->zero_undefweak &= 0x2;
2057 case R_X86_64_PLT32:
2058 case R_X86_64_PLT32_BND:
2059 /* This symbol requires a procedure linkage table entry. We
2060 actually build the entry in adjust_dynamic_symbol,
2061 because this might be a case of linking PIC code which is
2062 never referenced by a dynamic object, in which case we
2063 don't need to generate a procedure linkage table entry
2066 /* If this is a local symbol, we resolve it directly without
2067 creating a procedure linkage table entry. */
2071 eh->zero_undefweak &= 0x2;
2073 h->plt.refcount = 1;
2076 case R_X86_64_PLTOFF64:
2077 /* This tries to form the 'address' of a function relative
2078 to GOT. For global symbols we need a PLT entry. */
2082 h->plt.refcount = 1;
2086 case R_X86_64_SIZE32:
2087 case R_X86_64_SIZE64:
2092 if (!ABI_64_P (abfd))
2098 /* Check relocation overflow as these relocs may lead to
2099 run-time relocation overflow. Don't error out for
2100 sections we don't care about, such as debug sections or
2101 when relocation overflow check is disabled. */
2102 if (!info->no_reloc_overflow_check
2104 && (bfd_link_pic (info)
2105 || (bfd_link_executable (info)
2109 && (sec->flags & SEC_READONLY) == 0)))
2110 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2111 &x86_64_elf_howto_table[r_type]);
2117 case R_X86_64_PC32_BND:
2121 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2122 eh->zero_undefweak |= 0x2;
2123 /* We are called after all symbols have been resolved. Only
2124 relocation against STT_GNU_IFUNC symbol must go through
2127 && (bfd_link_executable (info)
2128 || h->type == STT_GNU_IFUNC))
2130 bfd_boolean func_pointer_ref = FALSE;
2132 if (r_type == R_X86_64_PC32)
2134 /* Since something like ".long foo - ." may be used
2135 as pointer, make sure that PLT is used if foo is
2136 a function defined in a shared library. */
2137 if ((sec->flags & SEC_CODE) == 0)
2138 h->pointer_equality_needed = 1;
2140 else if (r_type != R_X86_64_PC32_BND
2141 && r_type != R_X86_64_PC64)
2143 h->pointer_equality_needed = 1;
2144 /* At run-time, R_X86_64_64 can be resolved for both
2145 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2146 can only be resolved for x32. */
2147 if ((sec->flags & SEC_READONLY) == 0
2148 && (r_type == R_X86_64_64
2149 || (!ABI_64_P (abfd)
2150 && (r_type == R_X86_64_32
2151 || r_type == R_X86_64_32S))))
2152 func_pointer_ref = TRUE;
2155 if (!func_pointer_ref)
2157 /* If this reloc is in a read-only section, we might
2158 need a copy reloc. We can't check reliably at this
2159 stage whether the section is read-only, as input
2160 sections have not yet been mapped to output sections.
2161 Tentatively set the flag for now, and correct in
2162 adjust_dynamic_symbol. */
2165 /* We may need a .plt entry if the symbol is a function
2166 defined in a shared lib or is a function referenced
2167 from the code or read-only section. */
2169 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2170 h->plt.refcount = 1;
2176 if (NEED_DYNAMIC_RELOCATION_P (info, h, sec, r_type,
2177 htab->pointer_r_type))
2179 struct elf_dyn_relocs *p;
2180 struct elf_dyn_relocs **head;
2182 /* We must copy these reloc types into the output file.
2183 Create a reloc section in dynobj and make room for
2187 sreloc = _bfd_elf_make_dynamic_reloc_section
2188 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2189 abfd, /*rela?*/ TRUE);
2195 /* If this is a global symbol, we count the number of
2196 relocations we need for this symbol. */
2198 head = &eh->dyn_relocs;
2201 /* Track dynamic relocs needed for local syms too.
2202 We really need local syms available to do this
2207 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2212 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2216 /* Beware of type punned pointers vs strict aliasing
2218 vpp = &(elf_section_data (s)->local_dynrel);
2219 head = (struct elf_dyn_relocs **)vpp;
2223 if (p == NULL || p->sec != sec)
2225 bfd_size_type amt = sizeof *p;
2227 p = ((struct elf_dyn_relocs *)
2228 bfd_alloc (htab->elf.dynobj, amt));
2239 /* Count size relocation as PC-relative relocation. */
2240 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2245 /* This relocation describes the C++ object vtable hierarchy.
2246 Reconstruct it for later use during GC. */
2247 case R_X86_64_GNU_VTINHERIT:
2248 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2252 /* This relocation describes which C++ vtable entries are actually
2253 used. Record for later use during GC. */
2254 case R_X86_64_GNU_VTENTRY:
2255 BFD_ASSERT (h != NULL);
2257 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2266 if (elf_section_data (sec)->this_hdr.contents != contents)
2268 if (!converted && !info->keep_memory)
2272 /* Cache the section contents for elf_link_input_bfd if any
2273 load is converted or --no-keep-memory isn't used. */
2274 elf_section_data (sec)->this_hdr.contents = contents;
2278 /* Cache relocations if any load is converted. */
2279 if (elf_section_data (sec)->relocs != relocs && converted)
2280 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2285 if (elf_section_data (sec)->this_hdr.contents != contents)
2287 sec->check_relocs_failed = 1;
2291 /* Return the relocation value for @tpoff relocation
2292 if STT_TLS virtual address is ADDRESS. */
2295 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2297 struct elf_link_hash_table *htab = elf_hash_table (info);
2298 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2299 bfd_vma static_tls_size;
2301 /* If tls_segment is NULL, we should have signalled an error already. */
2302 if (htab->tls_sec == NULL)
2305 /* Consider special static TLS alignment requirements. */
2306 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2307 return address - static_tls_size - htab->tls_sec->vma;
2310 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
2314 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
2316 /* Opcode Instruction
2319 0x0f 0x8x conditional jump */
2321 && (contents [offset - 1] == 0xe8
2322 || contents [offset - 1] == 0xe9))
2324 && contents [offset - 2] == 0x0f
2325 && (contents [offset - 1] & 0xf0) == 0x80));
2328 /* Relocate an x86_64 ELF section. */
2331 elf_x86_64_relocate_section (bfd *output_bfd,
2332 struct bfd_link_info *info,
2334 asection *input_section,
2336 Elf_Internal_Rela *relocs,
2337 Elf_Internal_Sym *local_syms,
2338 asection **local_sections)
2340 struct elf_x86_link_hash_table *htab;
2341 Elf_Internal_Shdr *symtab_hdr;
2342 struct elf_link_hash_entry **sym_hashes;
2343 bfd_vma *local_got_offsets;
2344 bfd_vma *local_tlsdesc_gotents;
2345 Elf_Internal_Rela *rel;
2346 Elf_Internal_Rela *wrel;
2347 Elf_Internal_Rela *relend;
2348 unsigned int plt_entry_size;
2350 /* Skip if check_relocs failed. */
2351 if (input_section->check_relocs_failed)
2354 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2358 BFD_ASSERT (is_x86_elf (input_bfd, htab));
2360 plt_entry_size = htab->plt.plt_entry_size;
2361 symtab_hdr = &elf_symtab_hdr (input_bfd);
2362 sym_hashes = elf_sym_hashes (input_bfd);
2363 local_got_offsets = elf_local_got_offsets (input_bfd);
2364 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2366 _bfd_x86_elf_set_tls_module_base (info);
2368 rel = wrel = relocs;
2369 relend = relocs + input_section->reloc_count;
2370 for (; rel < relend; wrel++, rel++)
2372 unsigned int r_type, r_type_tls;
2373 reloc_howto_type *howto;
2374 unsigned long r_symndx;
2375 struct elf_link_hash_entry *h;
2376 struct elf_x86_link_hash_entry *eh;
2377 Elf_Internal_Sym *sym;
2379 bfd_vma off, offplt, plt_offset;
2381 bfd_boolean unresolved_reloc;
2382 bfd_reloc_status_type r;
2384 asection *base_got, *resolved_plt;
2386 bfd_boolean resolved_to_zero;
2387 bfd_boolean relative_reloc;
2388 bfd_boolean converted_reloc;
2389 bfd_boolean need_copy_reloc_in_pie;
2391 r_type = ELF32_R_TYPE (rel->r_info);
2392 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2393 || r_type == (int) R_X86_64_GNU_VTENTRY)
2400 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2401 r_type &= ~R_X86_64_converted_reloc_bit;
2403 if (r_type >= (int) R_X86_64_standard)
2404 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2406 if (r_type != (int) R_X86_64_32
2407 || ABI_64_P (output_bfd))
2408 howto = x86_64_elf_howto_table + r_type;
2410 howto = (x86_64_elf_howto_table
2411 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
2412 r_symndx = htab->r_sym (rel->r_info);
2416 unresolved_reloc = FALSE;
2417 if (r_symndx < symtab_hdr->sh_info)
2419 sym = local_syms + r_symndx;
2420 sec = local_sections[r_symndx];
2422 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2424 st_size = sym->st_size;
2426 /* Relocate against local STT_GNU_IFUNC symbol. */
2427 if (!bfd_link_relocatable (info)
2428 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2430 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2435 /* Set STT_GNU_IFUNC symbol value. */
2436 h->root.u.def.value = sym->st_value;
2437 h->root.u.def.section = sec;
2442 bfd_boolean warned ATTRIBUTE_UNUSED;
2443 bfd_boolean ignored ATTRIBUTE_UNUSED;
2445 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2446 r_symndx, symtab_hdr, sym_hashes,
2448 unresolved_reloc, warned, ignored);
2452 if (sec != NULL && discarded_section (sec))
2454 _bfd_clear_contents (howto, input_bfd, input_section,
2455 contents + rel->r_offset);
2456 wrel->r_offset = rel->r_offset;
2460 /* For ld -r, remove relocations in debug sections against
2461 sections defined in discarded sections. Not done for
2462 eh_frame editing code expects to be present. */
2463 if (bfd_link_relocatable (info)
2464 && (input_section->flags & SEC_DEBUGGING))
2470 if (bfd_link_relocatable (info))
2477 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2479 if (r_type == R_X86_64_64)
2481 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2482 zero-extend it to 64bit if addend is zero. */
2483 r_type = R_X86_64_32;
2484 memset (contents + rel->r_offset + 4, 0, 4);
2486 else if (r_type == R_X86_64_SIZE64)
2488 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2489 zero-extend it to 64bit if addend is zero. */
2490 r_type = R_X86_64_SIZE32;
2491 memset (contents + rel->r_offset + 4, 0, 4);
2495 eh = (struct elf_x86_link_hash_entry *) h;
2497 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2498 it here if it is defined in a non-shared object. */
2500 && h->type == STT_GNU_IFUNC
2506 if ((input_section->flags & SEC_ALLOC) == 0)
2508 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2509 sections because such sections are not SEC_ALLOC and
2510 thus ld.so will not process them. */
2511 if ((input_section->flags & SEC_DEBUGGING) != 0)
2521 case R_X86_64_GOTPCREL:
2522 case R_X86_64_GOTPCRELX:
2523 case R_X86_64_REX_GOTPCRELX:
2524 case R_X86_64_GOTPCREL64:
2525 base_got = htab->elf.sgot;
2526 off = h->got.offset;
2528 if (base_got == NULL)
2531 if (off == (bfd_vma) -1)
2533 /* We can't use h->got.offset here to save state, or
2534 even just remember the offset, as finish_dynamic_symbol
2535 would use that as offset into .got. */
2537 if (h->plt.offset == (bfd_vma) -1)
2540 if (htab->elf.splt != NULL)
2542 plt_index = (h->plt.offset / plt_entry_size
2543 - htab->plt.has_plt0);
2544 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2545 base_got = htab->elf.sgotplt;
2549 plt_index = h->plt.offset / plt_entry_size;
2550 off = plt_index * GOT_ENTRY_SIZE;
2551 base_got = htab->elf.igotplt;
2554 if (h->dynindx == -1
2558 /* This references the local defitionion. We must
2559 initialize this entry in the global offset table.
2560 Since the offset must always be a multiple of 8,
2561 we use the least significant bit to record
2562 whether we have initialized it already.
2564 When doing a dynamic link, we create a .rela.got
2565 relocation entry to initialize the value. This
2566 is done in the finish_dynamic_symbol routine. */
2571 bfd_put_64 (output_bfd, relocation,
2572 base_got->contents + off);
2573 /* Note that this is harmless for the GOTPLT64
2574 case, as -1 | 1 still is -1. */
2580 relocation = (base_got->output_section->vma
2581 + base_got->output_offset + off);
2586 if (h->plt.offset == (bfd_vma) -1)
2588 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2589 if (r_type == htab->pointer_r_type
2590 && (input_section->flags & SEC_CODE) == 0)
2591 goto do_ifunc_pointer;
2592 goto bad_ifunc_reloc;
2595 /* STT_GNU_IFUNC symbol must go through PLT. */
2596 if (htab->elf.splt != NULL)
2598 if (htab->plt_second != NULL)
2600 resolved_plt = htab->plt_second;
2601 plt_offset = eh->plt_second.offset;
2605 resolved_plt = htab->elf.splt;
2606 plt_offset = h->plt.offset;
2611 resolved_plt = htab->elf.iplt;
2612 plt_offset = h->plt.offset;
2615 relocation = (resolved_plt->output_section->vma
2616 + resolved_plt->output_offset + plt_offset);
2622 if (h->root.root.string)
2623 name = h->root.root.string;
2625 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2628 /* xgettext:c-format */
2629 (_("%B: relocation %s against STT_GNU_IFUNC "
2630 "symbol `%s' isn't supported"), input_bfd,
2632 bfd_set_error (bfd_error_bad_value);
2636 if (bfd_link_pic (info))
2641 if (ABI_64_P (output_bfd))
2646 if (rel->r_addend != 0)
2648 if (h->root.root.string)
2649 name = h->root.root.string;
2651 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2654 /* xgettext:c-format */
2655 (_("%B: relocation %s against STT_GNU_IFUNC "
2656 "symbol `%s' has non-zero addend: %Ld"),
2657 input_bfd, howto->name, name, rel->r_addend);
2658 bfd_set_error (bfd_error_bad_value);
2662 /* Generate dynamic relcoation only when there is a
2663 non-GOT reference in a shared object or there is no
2665 if ((bfd_link_pic (info) && h->non_got_ref)
2666 || h->plt.offset == (bfd_vma) -1)
2668 Elf_Internal_Rela outrel;
2671 /* Need a dynamic relocation to get the real function
2673 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2677 if (outrel.r_offset == (bfd_vma) -1
2678 || outrel.r_offset == (bfd_vma) -2)
2681 outrel.r_offset += (input_section->output_section->vma
2682 + input_section->output_offset);
2684 if (POINTER_LOCAL_IFUNC_P (info, h))
2686 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
2687 h->root.root.string,
2688 h->root.u.def.section->owner);
2690 /* This symbol is resolved locally. */
2691 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2692 outrel.r_addend = (h->root.u.def.value
2693 + h->root.u.def.section->output_section->vma
2694 + h->root.u.def.section->output_offset);
2698 outrel.r_info = htab->r_info (h->dynindx, r_type);
2699 outrel.r_addend = 0;
2702 /* Dynamic relocations are stored in
2703 1. .rela.ifunc section in PIC object.
2704 2. .rela.got section in dynamic executable.
2705 3. .rela.iplt section in static executable. */
2706 if (bfd_link_pic (info))
2707 sreloc = htab->elf.irelifunc;
2708 else if (htab->elf.splt != NULL)
2709 sreloc = htab->elf.srelgot;
2711 sreloc = htab->elf.irelplt;
2712 elf_append_rela (output_bfd, sreloc, &outrel);
2714 /* If this reloc is against an external symbol, we
2715 do not want to fiddle with the addend. Otherwise,
2716 we need to include the symbol value so that it
2717 becomes an addend for the dynamic reloc. For an
2718 internal symbol, we have updated addend. */
2723 case R_X86_64_PC32_BND:
2725 case R_X86_64_PLT32:
2726 case R_X86_64_PLT32_BND:
2731 resolved_to_zero = (eh != NULL
2732 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2734 /* When generating a shared object, the relocations handled here are
2735 copied into the output file to be resolved at run time. */
2738 case R_X86_64_GOT32:
2739 case R_X86_64_GOT64:
2740 /* Relocation is to the entry for this symbol in the global
2742 case R_X86_64_GOTPCREL:
2743 case R_X86_64_GOTPCRELX:
2744 case R_X86_64_REX_GOTPCRELX:
2745 case R_X86_64_GOTPCREL64:
2746 /* Use global offset table entry as symbol value. */
2747 case R_X86_64_GOTPLT64:
2748 /* This is obsolete and treated the same as GOT64. */
2749 base_got = htab->elf.sgot;
2751 if (htab->elf.sgot == NULL)
2754 relative_reloc = FALSE;
2757 off = h->got.offset;
2759 && h->plt.offset != (bfd_vma)-1
2760 && off == (bfd_vma)-1)
2762 /* We can't use h->got.offset here to save
2763 state, or even just remember the offset, as
2764 finish_dynamic_symbol would use that as offset into
2766 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2767 - htab->plt.has_plt0);
2768 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2769 base_got = htab->elf.sgotplt;
2772 if (RESOLVED_LOCALLY_P (info, h, htab))
2774 /* We must initialize this entry in the global offset
2775 table. Since the offset must always be a multiple
2776 of 8, we use the least significant bit to record
2777 whether we have initialized it already.
2779 When doing a dynamic link, we create a .rela.got
2780 relocation entry to initialize the value. This is
2781 done in the finish_dynamic_symbol routine. */
2786 bfd_put_64 (output_bfd, relocation,
2787 base_got->contents + off);
2788 /* Note that this is harmless for the GOTPLT64 case,
2789 as -1 | 1 still is -1. */
2792 if (GENERATE_RELATIVE_RELOC_P (info, h))
2794 /* If this symbol isn't dynamic in PIC,
2795 generate R_X86_64_RELATIVE here. */
2796 eh->no_finish_dynamic_symbol = 1;
2797 relative_reloc = TRUE;
2802 unresolved_reloc = FALSE;
2806 if (local_got_offsets == NULL)
2809 off = local_got_offsets[r_symndx];
2811 /* The offset must always be a multiple of 8. We use
2812 the least significant bit to record whether we have
2813 already generated the necessary reloc. */
2818 bfd_put_64 (output_bfd, relocation,
2819 base_got->contents + off);
2820 local_got_offsets[r_symndx] |= 1;
2822 if (bfd_link_pic (info))
2823 relative_reloc = TRUE;
2830 Elf_Internal_Rela outrel;
2832 /* We need to generate a R_X86_64_RELATIVE reloc
2833 for the dynamic linker. */
2834 s = htab->elf.srelgot;
2838 outrel.r_offset = (base_got->output_section->vma
2839 + base_got->output_offset
2841 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2842 outrel.r_addend = relocation;
2843 elf_append_rela (output_bfd, s, &outrel);
2846 if (off >= (bfd_vma) -2)
2849 relocation = base_got->output_section->vma
2850 + base_got->output_offset + off;
2851 if (r_type != R_X86_64_GOTPCREL
2852 && r_type != R_X86_64_GOTPCRELX
2853 && r_type != R_X86_64_REX_GOTPCRELX
2854 && r_type != R_X86_64_GOTPCREL64)
2855 relocation -= htab->elf.sgotplt->output_section->vma
2856 - htab->elf.sgotplt->output_offset;
2860 case R_X86_64_GOTOFF64:
2861 /* Relocation is relative to the start of the global offset
2864 /* Check to make sure it isn't a protected function or data
2865 symbol for shared library since it may not be local when
2866 used as function address or with copy relocation. We also
2867 need to make sure that a symbol is referenced locally. */
2868 if (bfd_link_pic (info) && h)
2870 if (!h->def_regular)
2874 switch (ELF_ST_VISIBILITY (h->other))
2877 v = _("hidden symbol");
2880 v = _("internal symbol");
2883 v = _("protected symbol");
2891 /* xgettext:c-format */
2892 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s"
2893 " `%s' can not be used when making a shared object"),
2894 input_bfd, v, h->root.root.string);
2895 bfd_set_error (bfd_error_bad_value);
2898 else if (!bfd_link_executable (info)
2899 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2900 && (h->type == STT_FUNC
2901 || h->type == STT_OBJECT)
2902 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2905 /* xgettext:c-format */
2906 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s"
2907 " `%s' can not be used when making a shared object"),
2909 h->type == STT_FUNC ? "function" : "data",
2910 h->root.root.string);
2911 bfd_set_error (bfd_error_bad_value);
2916 /* Note that sgot is not involved in this
2917 calculation. We always want the start of .got.plt. If we
2918 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2919 permitted by the ABI, we might have to change this
2921 relocation -= htab->elf.sgotplt->output_section->vma
2922 + htab->elf.sgotplt->output_offset;
2925 case R_X86_64_GOTPC32:
2926 case R_X86_64_GOTPC64:
2927 /* Use global offset table as symbol value. */
2928 relocation = htab->elf.sgotplt->output_section->vma
2929 + htab->elf.sgotplt->output_offset;
2930 unresolved_reloc = FALSE;
2933 case R_X86_64_PLTOFF64:
2934 /* Relocation is PLT entry relative to GOT. For local
2935 symbols it's the symbol itself relative to GOT. */
2937 /* See PLT32 handling. */
2938 && (h->plt.offset != (bfd_vma) -1
2939 || eh->plt_got.offset != (bfd_vma) -1)
2940 && htab->elf.splt != NULL)
2942 if (eh->plt_got.offset != (bfd_vma) -1)
2944 /* Use the GOT PLT. */
2945 resolved_plt = htab->plt_got;
2946 plt_offset = eh->plt_got.offset;
2948 else if (htab->plt_second != NULL)
2950 resolved_plt = htab->plt_second;
2951 plt_offset = eh->plt_second.offset;
2955 resolved_plt = htab->elf.splt;
2956 plt_offset = h->plt.offset;
2959 relocation = (resolved_plt->output_section->vma
2960 + resolved_plt->output_offset
2962 unresolved_reloc = FALSE;
2965 relocation -= htab->elf.sgotplt->output_section->vma
2966 + htab->elf.sgotplt->output_offset;
2969 case R_X86_64_PLT32:
2970 case R_X86_64_PLT32_BND:
2971 /* Relocation is to the entry for this symbol in the
2972 procedure linkage table. */
2974 /* Resolve a PLT32 reloc against a local symbol directly,
2975 without using the procedure linkage table. */
2979 if ((h->plt.offset == (bfd_vma) -1
2980 && eh->plt_got.offset == (bfd_vma) -1)
2981 || htab->elf.splt == NULL)
2983 /* We didn't make a PLT entry for this symbol. This
2984 happens when statically linking PIC code, or when
2985 using -Bsymbolic. */
2989 if (h->plt.offset != (bfd_vma) -1)
2991 if (htab->plt_second != NULL)
2993 resolved_plt = htab->plt_second;
2994 plt_offset = eh->plt_second.offset;
2998 resolved_plt = htab->elf.splt;
2999 plt_offset = h->plt.offset;
3004 /* Use the GOT PLT. */
3005 resolved_plt = htab->plt_got;
3006 plt_offset = eh->plt_got.offset;
3009 relocation = (resolved_plt->output_section->vma
3010 + resolved_plt->output_offset
3012 unresolved_reloc = FALSE;
3015 case R_X86_64_SIZE32:
3016 case R_X86_64_SIZE64:
3017 /* Set to symbol size. */
3018 relocation = st_size;
3024 case R_X86_64_PC32_BND:
3025 /* Don't complain about -fPIC if the symbol is undefined when
3026 building executable unless it is unresolved weak symbol or
3027 -z nocopyreloc is used. */
3028 if ((input_section->flags & SEC_ALLOC) != 0
3029 && (input_section->flags & SEC_READONLY) != 0
3031 && ((bfd_link_executable (info)
3032 && ((h->root.type == bfd_link_hash_undefweak
3033 && !resolved_to_zero)
3034 || ((info->nocopyreloc
3035 || (eh->def_protected
3036 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3038 && !(h->root.u.def.section->flags & SEC_CODE))))
3039 || bfd_link_dll (info)))
3041 bfd_boolean fail = FALSE;
3043 = ((r_type == R_X86_64_PC32
3044 || r_type == R_X86_64_PC32_BND)
3045 && is_32bit_relative_branch (contents, rel->r_offset));
3047 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3049 /* Symbol is referenced locally. Make sure it is
3050 defined locally or for a branch. */
3051 fail = (!(h->def_regular || ELF_COMMON_DEF_P (h))
3054 else if (!(bfd_link_pie (info)
3055 && (h->needs_copy || eh->needs_copy)))
3057 /* Symbol doesn't need copy reloc and isn't referenced
3058 locally. We only allow branch to symbol with
3059 non-default visibility. */
3061 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
3065 return elf_x86_64_need_pic (info, input_bfd, input_section,
3066 h, NULL, NULL, howto);
3075 /* FIXME: The ABI says the linker should make sure the value is
3076 the same when it's zeroextended to 64 bit. */
3079 if ((input_section->flags & SEC_ALLOC) == 0)
3082 need_copy_reloc_in_pie = (bfd_link_pie (info)
3087 == bfd_link_hash_undefined))
3088 && (X86_PCREL_TYPE_P (r_type)
3089 || X86_SIZE_TYPE_P (r_type)));
3091 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3092 need_copy_reloc_in_pie,
3093 resolved_to_zero, FALSE))
3095 Elf_Internal_Rela outrel;
3096 bfd_boolean skip, relocate;
3099 /* When generating a shared object, these relocations
3100 are copied into the output file to be resolved at run
3106 _bfd_elf_section_offset (output_bfd, info, input_section,
3108 if (outrel.r_offset == (bfd_vma) -1)
3110 else if (outrel.r_offset == (bfd_vma) -2)
3111 skip = TRUE, relocate = TRUE;
3113 outrel.r_offset += (input_section->output_section->vma
3114 + input_section->output_offset);
3117 memset (&outrel, 0, sizeof outrel);
3119 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3121 outrel.r_info = htab->r_info (h->dynindx, r_type);
3122 outrel.r_addend = rel->r_addend;
3126 /* This symbol is local, or marked to become local.
3127 When relocation overflow check is disabled, we
3128 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3129 if (r_type == htab->pointer_r_type
3130 || (r_type == R_X86_64_32
3131 && info->no_reloc_overflow_check))
3134 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3135 outrel.r_addend = relocation + rel->r_addend;
3137 else if (r_type == R_X86_64_64
3138 && !ABI_64_P (output_bfd))
3141 outrel.r_info = htab->r_info (0,
3142 R_X86_64_RELATIVE64);
3143 outrel.r_addend = relocation + rel->r_addend;
3144 /* Check addend overflow. */
3145 if ((outrel.r_addend & 0x80000000)
3146 != (rel->r_addend & 0x80000000))
3149 int addend = rel->r_addend;
3150 if (h && h->root.root.string)
3151 name = h->root.root.string;
3153 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3156 /* xgettext:c-format */
3157 (_("%B: addend %s%#x in relocation %s against "
3158 "symbol `%s' at %#Lx in section `%A' is "
3160 input_bfd, addend < 0 ? "-" : "", addend,
3161 howto->name, name, rel->r_offset, input_section);
3162 bfd_set_error (bfd_error_bad_value);
3170 if (bfd_is_abs_section (sec))
3172 else if (sec == NULL || sec->owner == NULL)
3174 bfd_set_error (bfd_error_bad_value);
3181 /* We are turning this relocation into one
3182 against a section symbol. It would be
3183 proper to subtract the symbol's value,
3184 osec->vma, from the emitted reloc addend,
3185 but ld.so expects buggy relocs. */
3186 osec = sec->output_section;
3187 sindx = elf_section_data (osec)->dynindx;
3190 asection *oi = htab->elf.text_index_section;
3191 sindx = elf_section_data (oi)->dynindx;
3193 BFD_ASSERT (sindx != 0);
3196 outrel.r_info = htab->r_info (sindx, r_type);
3197 outrel.r_addend = relocation + rel->r_addend;
3201 sreloc = elf_section_data (input_section)->sreloc;
3203 if (sreloc == NULL || sreloc->contents == NULL)
3205 r = bfd_reloc_notsupported;
3206 goto check_relocation_error;
3209 elf_append_rela (output_bfd, sreloc, &outrel);
3211 /* If this reloc is against an external symbol, we do
3212 not want to fiddle with the addend. Otherwise, we
3213 need to include the symbol value so that it becomes
3214 an addend for the dynamic reloc. */
3221 case R_X86_64_TLSGD:
3222 case R_X86_64_GOTPC32_TLSDESC:
3223 case R_X86_64_TLSDESC_CALL:
3224 case R_X86_64_GOTTPOFF:
3225 tls_type = GOT_UNKNOWN;
3226 if (h == NULL && local_got_offsets)
3227 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3229 tls_type = elf_x86_hash_entry (h)->tls_type;
3231 r_type_tls = r_type;
3232 if (! elf_x86_64_tls_transition (info, input_bfd,
3233 input_section, contents,
3234 symtab_hdr, sym_hashes,
3235 &r_type_tls, tls_type, rel,
3236 relend, h, r_symndx, TRUE))
3239 if (r_type_tls == R_X86_64_TPOFF32)
3241 bfd_vma roff = rel->r_offset;
3243 BFD_ASSERT (! unresolved_reloc);
3245 if (r_type == R_X86_64_TLSGD)
3247 /* GD->LE transition. For 64bit, change
3248 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3249 .word 0x6666; rex64; call __tls_get_addr@PLT
3251 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3253 call *__tls_get_addr@GOTPCREL(%rip)
3254 which may be converted to
3255 addr32 call __tls_get_addr
3258 leaq foo@tpoff(%rax), %rax
3260 leaq foo@tlsgd(%rip), %rdi
3261 .word 0x6666; rex64; call __tls_get_addr@PLT
3263 leaq foo@tlsgd(%rip), %rdi
3265 call *__tls_get_addr@GOTPCREL(%rip)
3266 which may be converted to
3267 addr32 call __tls_get_addr
3270 leaq foo@tpoff(%rax), %rax
3271 For largepic, change:
3272 leaq foo@tlsgd(%rip), %rdi
3273 movabsq $__tls_get_addr@pltoff, %rax
3278 leaq foo@tpoff(%rax), %rax
3279 nopw 0x0(%rax,%rax,1) */
3281 if (ABI_64_P (output_bfd))
3283 if (contents[roff + 5] == 0xb8)
3285 memcpy (contents + roff - 3,
3286 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3287 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3291 memcpy (contents + roff - 4,
3292 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3296 memcpy (contents + roff - 3,
3297 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3299 bfd_put_32 (output_bfd,
3300 elf_x86_64_tpoff (info, relocation),
3301 contents + roff + 8 + largepic);
3302 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3303 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3308 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3310 /* GDesc -> LE transition.
3311 It's originally something like:
3312 leaq x@tlsdesc(%rip), %rax
3315 movl $x@tpoff, %rax. */
3317 unsigned int val, type;
3319 type = bfd_get_8 (input_bfd, contents + roff - 3);
3320 val = bfd_get_8 (input_bfd, contents + roff - 1);
3321 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3322 contents + roff - 3);
3323 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3324 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3325 contents + roff - 1);
3326 bfd_put_32 (output_bfd,
3327 elf_x86_64_tpoff (info, relocation),
3331 else if (r_type == R_X86_64_TLSDESC_CALL)
3333 /* GDesc -> LE transition.
3338 bfd_put_8 (output_bfd, 0x66, contents + roff);
3339 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3342 else if (r_type == R_X86_64_GOTTPOFF)
3344 /* IE->LE transition:
3345 For 64bit, originally it can be one of:
3346 movq foo@gottpoff(%rip), %reg
3347 addq foo@gottpoff(%rip), %reg
3350 leaq foo(%reg), %reg
3352 For 32bit, originally it can be one of:
3353 movq foo@gottpoff(%rip), %reg
3354 addl foo@gottpoff(%rip), %reg
3357 leal foo(%reg), %reg
3360 unsigned int val, type, reg;
3363 val = bfd_get_8 (input_bfd, contents + roff - 3);
3366 type = bfd_get_8 (input_bfd, contents + roff - 2);
3367 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3373 bfd_put_8 (output_bfd, 0x49,
3374 contents + roff - 3);
3375 else if (!ABI_64_P (output_bfd) && val == 0x44)
3376 bfd_put_8 (output_bfd, 0x41,
3377 contents + roff - 3);
3378 bfd_put_8 (output_bfd, 0xc7,
3379 contents + roff - 2);
3380 bfd_put_8 (output_bfd, 0xc0 | reg,
3381 contents + roff - 1);
3385 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3388 bfd_put_8 (output_bfd, 0x49,
3389 contents + roff - 3);
3390 else if (!ABI_64_P (output_bfd) && val == 0x44)
3391 bfd_put_8 (output_bfd, 0x41,
3392 contents + roff - 3);
3393 bfd_put_8 (output_bfd, 0x81,
3394 contents + roff - 2);
3395 bfd_put_8 (output_bfd, 0xc0 | reg,
3396 contents + roff - 1);
3400 /* addq/addl -> leaq/leal */
3402 bfd_put_8 (output_bfd, 0x4d,
3403 contents + roff - 3);
3404 else if (!ABI_64_P (output_bfd) && val == 0x44)
3405 bfd_put_8 (output_bfd, 0x45,
3406 contents + roff - 3);
3407 bfd_put_8 (output_bfd, 0x8d,
3408 contents + roff - 2);
3409 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3410 contents + roff - 1);
3412 bfd_put_32 (output_bfd,
3413 elf_x86_64_tpoff (info, relocation),
3421 if (htab->elf.sgot == NULL)
3426 off = h->got.offset;
3427 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3431 if (local_got_offsets == NULL)
3434 off = local_got_offsets[r_symndx];
3435 offplt = local_tlsdesc_gotents[r_symndx];
3442 Elf_Internal_Rela outrel;
3446 if (htab->elf.srelgot == NULL)
3449 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3451 if (GOT_TLS_GDESC_P (tls_type))
3453 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3454 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3455 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3456 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3457 + htab->elf.sgotplt->output_offset
3459 + htab->sgotplt_jump_table_size);
3460 sreloc = htab->elf.srelplt;
3462 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3464 outrel.r_addend = 0;
3465 elf_append_rela (output_bfd, sreloc, &outrel);
3468 sreloc = htab->elf.srelgot;
3470 outrel.r_offset = (htab->elf.sgot->output_section->vma
3471 + htab->elf.sgot->output_offset + off);
3473 if (GOT_TLS_GD_P (tls_type))
3474 dr_type = R_X86_64_DTPMOD64;
3475 else if (GOT_TLS_GDESC_P (tls_type))
3478 dr_type = R_X86_64_TPOFF64;
3480 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3481 outrel.r_addend = 0;
3482 if ((dr_type == R_X86_64_TPOFF64
3483 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3484 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3485 outrel.r_info = htab->r_info (indx, dr_type);
3487 elf_append_rela (output_bfd, sreloc, &outrel);
3489 if (GOT_TLS_GD_P (tls_type))
3493 BFD_ASSERT (! unresolved_reloc);
3494 bfd_put_64 (output_bfd,
3495 relocation - _bfd_x86_elf_dtpoff_base (info),
3496 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3500 bfd_put_64 (output_bfd, 0,
3501 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3502 outrel.r_info = htab->r_info (indx,
3504 outrel.r_offset += GOT_ENTRY_SIZE;
3505 elf_append_rela (output_bfd, sreloc,
3514 local_got_offsets[r_symndx] |= 1;
3517 if (off >= (bfd_vma) -2
3518 && ! GOT_TLS_GDESC_P (tls_type))
3520 if (r_type_tls == r_type)
3522 if (r_type == R_X86_64_GOTPC32_TLSDESC
3523 || r_type == R_X86_64_TLSDESC_CALL)
3524 relocation = htab->elf.sgotplt->output_section->vma
3525 + htab->elf.sgotplt->output_offset
3526 + offplt + htab->sgotplt_jump_table_size;
3528 relocation = htab->elf.sgot->output_section->vma
3529 + htab->elf.sgot->output_offset + off;
3530 unresolved_reloc = FALSE;
3534 bfd_vma roff = rel->r_offset;
3536 if (r_type == R_X86_64_TLSGD)
3538 /* GD->IE transition. For 64bit, change
3539 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3540 .word 0x6666; rex64; call __tls_get_addr@PLT
3542 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3544 call *__tls_get_addr@GOTPCREL(%rip
3545 which may be converted to
3546 addr32 call __tls_get_addr
3549 addq foo@gottpoff(%rip), %rax
3551 leaq foo@tlsgd(%rip), %rdi
3552 .word 0x6666; rex64; call __tls_get_addr@PLT
3554 leaq foo@tlsgd(%rip), %rdi
3556 call *__tls_get_addr@GOTPCREL(%rip)
3557 which may be converted to
3558 addr32 call __tls_get_addr
3561 addq foo@gottpoff(%rip), %rax
3562 For largepic, change:
3563 leaq foo@tlsgd(%rip), %rdi
3564 movabsq $__tls_get_addr@pltoff, %rax
3569 addq foo@gottpoff(%rax), %rax
3570 nopw 0x0(%rax,%rax,1) */
3572 if (ABI_64_P (output_bfd))
3574 if (contents[roff + 5] == 0xb8)
3576 memcpy (contents + roff - 3,
3577 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3578 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3582 memcpy (contents + roff - 4,
3583 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3587 memcpy (contents + roff - 3,
3588 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3591 relocation = (htab->elf.sgot->output_section->vma
3592 + htab->elf.sgot->output_offset + off
3595 - input_section->output_section->vma
3596 - input_section->output_offset
3598 bfd_put_32 (output_bfd, relocation,
3599 contents + roff + 8 + largepic);
3600 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3605 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3607 /* GDesc -> IE transition.
3608 It's originally something like:
3609 leaq x@tlsdesc(%rip), %rax
3612 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3614 /* Now modify the instruction as appropriate. To
3615 turn a leaq into a movq in the form we use it, it
3616 suffices to change the second byte from 0x8d to
3618 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3620 bfd_put_32 (output_bfd,
3621 htab->elf.sgot->output_section->vma
3622 + htab->elf.sgot->output_offset + off
3624 - input_section->output_section->vma
3625 - input_section->output_offset
3630 else if (r_type == R_X86_64_TLSDESC_CALL)
3632 /* GDesc -> IE transition.
3639 bfd_put_8 (output_bfd, 0x66, contents + roff);
3640 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3648 case R_X86_64_TLSLD:
3649 if (! elf_x86_64_tls_transition (info, input_bfd,
3650 input_section, contents,
3651 symtab_hdr, sym_hashes,
3652 &r_type, GOT_UNKNOWN, rel,
3653 relend, h, r_symndx, TRUE))
3656 if (r_type != R_X86_64_TLSLD)
3658 /* LD->LE transition:
3659 leaq foo@tlsld(%rip), %rdi
3660 call __tls_get_addr@PLT
3661 For 64bit, we change it into:
3662 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3663 For 32bit, we change it into:
3664 nopl 0x0(%rax); movl %fs:0, %eax
3666 leaq foo@tlsld(%rip), %rdi;
3667 call *__tls_get_addr@GOTPCREL(%rip)
3668 which may be converted to
3669 addr32 call __tls_get_addr
3670 For 64bit, we change it into:
3671 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3672 For 32bit, we change it into:
3673 nopw 0x0(%rax); movl %fs:0, %eax
3674 For largepic, change:
3675 leaq foo@tlsgd(%rip), %rdi
3676 movabsq $__tls_get_addr@pltoff, %rax
3680 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3683 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3684 if (ABI_64_P (output_bfd))
3686 if (contents[rel->r_offset + 5] == 0xb8)
3687 memcpy (contents + rel->r_offset - 3,
3688 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3689 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3690 else if (contents[rel->r_offset + 4] == 0xff
3691 || contents[rel->r_offset + 4] == 0x67)
3692 memcpy (contents + rel->r_offset - 3,
3693 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3696 memcpy (contents + rel->r_offset - 3,
3697 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3701 if (contents[rel->r_offset + 4] == 0xff)
3702 memcpy (contents + rel->r_offset - 3,
3703 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3706 memcpy (contents + rel->r_offset - 3,
3707 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3709 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3710 and R_X86_64_PLTOFF64. */
3716 if (htab->elf.sgot == NULL)
3719 off = htab->tls_ld_or_ldm_got.offset;
3724 Elf_Internal_Rela outrel;
3726 if (htab->elf.srelgot == NULL)
3729 outrel.r_offset = (htab->elf.sgot->output_section->vma
3730 + htab->elf.sgot->output_offset + off);
3732 bfd_put_64 (output_bfd, 0,
3733 htab->elf.sgot->contents + off);
3734 bfd_put_64 (output_bfd, 0,
3735 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3736 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3737 outrel.r_addend = 0;
3738 elf_append_rela (output_bfd, htab->elf.srelgot,
3740 htab->tls_ld_or_ldm_got.offset |= 1;
3742 relocation = htab->elf.sgot->output_section->vma
3743 + htab->elf.sgot->output_offset + off;
3744 unresolved_reloc = FALSE;
3747 case R_X86_64_DTPOFF32:
3748 if (!bfd_link_executable (info)
3749 || (input_section->flags & SEC_CODE) == 0)
3750 relocation -= _bfd_x86_elf_dtpoff_base (info);
3752 relocation = elf_x86_64_tpoff (info, relocation);
3755 case R_X86_64_TPOFF32:
3756 case R_X86_64_TPOFF64:
3757 BFD_ASSERT (bfd_link_executable (info));
3758 relocation = elf_x86_64_tpoff (info, relocation);
3761 case R_X86_64_DTPOFF64:
3762 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3763 relocation -= _bfd_x86_elf_dtpoff_base (info);
3770 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3771 because such sections are not SEC_ALLOC and thus ld.so will
3772 not process them. */
3773 if (unresolved_reloc
3774 && !((input_section->flags & SEC_DEBUGGING) != 0
3776 && _bfd_elf_section_offset (output_bfd, info, input_section,
3777 rel->r_offset) != (bfd_vma) -1)
3782 sec = h->root.u.def.section;
3783 if ((info->nocopyreloc
3784 || (eh->def_protected
3785 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3786 && !(h->root.u.def.section->flags & SEC_CODE))
3787 return elf_x86_64_need_pic (info, input_bfd, input_section,
3788 h, NULL, NULL, howto);
3793 /* xgettext:c-format */
3794 (_("%B(%A+%#Lx): unresolvable %s relocation against symbol `%s'"),
3799 h->root.root.string);
3805 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3806 contents, rel->r_offset,
3807 relocation, rel->r_addend);
3809 check_relocation_error:
3810 if (r != bfd_reloc_ok)
3815 name = h->root.root.string;
3818 name = bfd_elf_string_from_elf_section (input_bfd,
3819 symtab_hdr->sh_link,
3824 name = bfd_section_name (input_bfd, sec);
3827 if (r == bfd_reloc_overflow)
3829 if (converted_reloc)
3831 info->callbacks->einfo
3832 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3835 (*info->callbacks->reloc_overflow)
3836 (info, (h ? &h->root : NULL), name, howto->name,
3837 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3842 /* xgettext:c-format */
3843 (_("%B(%A+%#Lx): reloc against `%s': error %d"),
3844 input_bfd, input_section,
3845 rel->r_offset, name, (int) r);
3856 Elf_Internal_Shdr *rel_hdr;
3857 size_t deleted = rel - wrel;
3859 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3860 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3861 if (rel_hdr->sh_size == 0)
3863 /* It is too late to remove an empty reloc section. Leave
3865 ??? What is wrong with an empty section??? */
3866 rel_hdr->sh_size = rel_hdr->sh_entsize;
3869 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3870 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3871 input_section->reloc_count -= deleted;
3877 /* Finish up dynamic symbol handling. We set the contents of various
3878 dynamic sections here. */
3881 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3882 struct bfd_link_info *info,
3883 struct elf_link_hash_entry *h,
3884 Elf_Internal_Sym *sym)
3886 struct elf_x86_link_hash_table *htab;
3887 bfd_boolean use_plt_second;
3888 struct elf_x86_link_hash_entry *eh;
3889 bfd_boolean local_undefweak;
3891 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3895 /* Use the second PLT section only if there is .plt section. */
3896 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3898 eh = (struct elf_x86_link_hash_entry *) h;
3899 if (eh->no_finish_dynamic_symbol)
3902 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3903 resolved undefined weak symbols in executable so that their
3904 references have value 0 at run-time. */
3905 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3907 if (h->plt.offset != (bfd_vma) -1)
3910 bfd_vma got_offset, plt_offset;
3911 Elf_Internal_Rela rela;
3913 asection *plt, *gotplt, *relplt, *resolved_plt;
3914 const struct elf_backend_data *bed;
3915 bfd_vma plt_got_pcrel_offset;
3917 /* When building a static executable, use .iplt, .igot.plt and
3918 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3919 if (htab->elf.splt != NULL)
3921 plt = htab->elf.splt;
3922 gotplt = htab->elf.sgotplt;
3923 relplt = htab->elf.srelplt;
3927 plt = htab->elf.iplt;
3928 gotplt = htab->elf.igotplt;
3929 relplt = htab->elf.irelplt;
3932 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
3934 /* Get the index in the procedure linkage table which
3935 corresponds to this symbol. This is the index of this symbol
3936 in all the symbols for which we are making plt entries. The
3937 first entry in the procedure linkage table is reserved.
3939 Get the offset into the .got table of the entry that
3940 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
3941 bytes. The first three are reserved for the dynamic linker.
3943 For static executables, we don't reserve anything. */
3945 if (plt == htab->elf.splt)
3947 got_offset = (h->plt.offset / htab->plt.plt_entry_size
3948 - htab->plt.has_plt0);
3949 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
3953 got_offset = h->plt.offset / htab->plt.plt_entry_size;
3954 got_offset = got_offset * GOT_ENTRY_SIZE;
3957 /* Fill in the entry in the procedure linkage table. */
3958 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
3959 htab->plt.plt_entry_size);
3962 memcpy (htab->plt_second->contents + eh->plt_second.offset,
3963 htab->non_lazy_plt->plt_entry,
3964 htab->non_lazy_plt->plt_entry_size);
3966 resolved_plt = htab->plt_second;
3967 plt_offset = eh->plt_second.offset;
3972 plt_offset = h->plt.offset;
3975 /* Insert the relocation positions of the plt section. */
3977 /* Put offset the PC-relative instruction referring to the GOT entry,
3978 subtracting the size of that instruction. */
3979 plt_got_pcrel_offset = (gotplt->output_section->vma
3980 + gotplt->output_offset
3982 - resolved_plt->output_section->vma
3983 - resolved_plt->output_offset
3985 - htab->plt.plt_got_insn_size);
3987 /* Check PC-relative offset overflow in PLT entry. */
3988 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
3989 /* xgettext:c-format */
3990 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
3991 output_bfd, h->root.root.string);
3993 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
3994 (resolved_plt->contents + plt_offset
3995 + htab->plt.plt_got_offset));
3997 /* Fill in the entry in the global offset table, initially this
3998 points to the second part of the PLT entry. Leave the entry
3999 as zero for undefined weak symbol in PIE. No PLT relocation
4000 against undefined weak symbol in PIE. */
4001 if (!local_undefweak)
4003 if (htab->plt.has_plt0)
4004 bfd_put_64 (output_bfd, (plt->output_section->vma
4005 + plt->output_offset
4007 + htab->lazy_plt->plt_lazy_offset),
4008 gotplt->contents + got_offset);
4010 /* Fill in the entry in the .rela.plt section. */
4011 rela.r_offset = (gotplt->output_section->vma
4012 + gotplt->output_offset
4014 if (PLT_LOCAL_IFUNC_P (info, h))
4016 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
4017 h->root.root.string,
4018 h->root.u.def.section->owner);
4020 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4021 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4022 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4023 rela.r_addend = (h->root.u.def.value
4024 + h->root.u.def.section->output_section->vma
4025 + h->root.u.def.section->output_offset);
4026 /* R_X86_64_IRELATIVE comes last. */
4027 plt_index = htab->next_irelative_index--;
4031 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4033 plt_index = htab->next_jump_slot_index++;
4036 /* Don't fill the second and third slots in PLT entry for
4037 static executables nor without PLT0. */
4038 if (plt == htab->elf.splt && htab->plt.has_plt0)
4041 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4043 /* Put relocation index. */
4044 bfd_put_32 (output_bfd, plt_index,
4045 (plt->contents + h->plt.offset
4046 + htab->lazy_plt->plt_reloc_offset));
4048 /* Put offset for jmp .PLT0 and check for overflow. We don't
4049 check relocation index for overflow since branch displacement
4050 will overflow first. */
4051 if (plt0_offset > 0x80000000)
4052 /* xgettext:c-format */
4053 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
4054 output_bfd, h->root.root.string);
4055 bfd_put_32 (output_bfd, - plt0_offset,
4056 (plt->contents + h->plt.offset
4057 + htab->lazy_plt->plt_plt_offset));
4060 bed = get_elf_backend_data (output_bfd);
4061 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4062 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4065 else if (eh->plt_got.offset != (bfd_vma) -1)
4067 bfd_vma got_offset, plt_offset;
4068 asection *plt, *got;
4069 bfd_boolean got_after_plt;
4070 int32_t got_pcrel_offset;
4072 /* Set the entry in the GOT procedure linkage table. */
4073 plt = htab->plt_got;
4074 got = htab->elf.sgot;
4075 got_offset = h->got.offset;
4077 if (got_offset == (bfd_vma) -1
4078 || (h->type == STT_GNU_IFUNC && h->def_regular)
4083 /* Use the non-lazy PLT entry template for the GOT PLT since they
4084 are the identical. */
4085 /* Fill in the entry in the GOT procedure linkage table. */
4086 plt_offset = eh->plt_got.offset;
4087 memcpy (plt->contents + plt_offset,
4088 htab->non_lazy_plt->plt_entry,
4089 htab->non_lazy_plt->plt_entry_size);
4091 /* Put offset the PC-relative instruction referring to the GOT
4092 entry, subtracting the size of that instruction. */
4093 got_pcrel_offset = (got->output_section->vma
4094 + got->output_offset
4096 - plt->output_section->vma
4097 - plt->output_offset
4099 - htab->non_lazy_plt->plt_got_insn_size);
4101 /* Check PC-relative offset overflow in GOT PLT entry. */
4102 got_after_plt = got->output_section->vma > plt->output_section->vma;
4103 if ((got_after_plt && got_pcrel_offset < 0)
4104 || (!got_after_plt && got_pcrel_offset > 0))
4105 /* xgettext:c-format */
4106 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4107 output_bfd, h->root.root.string);
4109 bfd_put_32 (output_bfd, got_pcrel_offset,
4110 (plt->contents + plt_offset
4111 + htab->non_lazy_plt->plt_got_offset));
4114 if (!local_undefweak
4116 && (h->plt.offset != (bfd_vma) -1
4117 || eh->plt_got.offset != (bfd_vma) -1))
4119 /* Mark the symbol as undefined, rather than as defined in
4120 the .plt section. Leave the value if there were any
4121 relocations where pointer equality matters (this is a clue
4122 for the dynamic linker, to make function pointer
4123 comparisons work between an application and shared
4124 library), otherwise set it to zero. If a function is only
4125 called from a binary, there is no need to slow down
4126 shared libraries because of that. */
4127 sym->st_shndx = SHN_UNDEF;
4128 if (!h->pointer_equality_needed)
4132 /* Don't generate dynamic GOT relocation against undefined weak
4133 symbol in executable. */
4134 if (h->got.offset != (bfd_vma) -1
4135 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4136 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4137 && !local_undefweak)
4139 Elf_Internal_Rela rela;
4140 asection *relgot = htab->elf.srelgot;
4142 /* This symbol has an entry in the global offset table. Set it
4144 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4147 rela.r_offset = (htab->elf.sgot->output_section->vma
4148 + htab->elf.sgot->output_offset
4149 + (h->got.offset &~ (bfd_vma) 1));
4151 /* If this is a static link, or it is a -Bsymbolic link and the
4152 symbol is defined locally or was forced to be local because
4153 of a version file, we just want to emit a RELATIVE reloc.
4154 The entry in the global offset table will already have been
4155 initialized in the relocate_section function. */
4157 && h->type == STT_GNU_IFUNC)
4159 if (h->plt.offset == (bfd_vma) -1)
4161 /* STT_GNU_IFUNC is referenced without PLT. */
4162 if (htab->elf.splt == NULL)
4164 /* use .rel[a].iplt section to store .got relocations
4165 in static executable. */
4166 relgot = htab->elf.irelplt;
4168 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4170 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
4171 h->root.root.string,
4172 h->root.u.def.section->owner);
4174 rela.r_info = htab->r_info (0,
4175 R_X86_64_IRELATIVE);
4176 rela.r_addend = (h->root.u.def.value
4177 + h->root.u.def.section->output_section->vma
4178 + h->root.u.def.section->output_offset);
4183 else if (bfd_link_pic (info))
4185 /* Generate R_X86_64_GLOB_DAT. */
4193 if (!h->pointer_equality_needed)
4196 /* For non-shared object, we can't use .got.plt, which
4197 contains the real function addres if we need pointer
4198 equality. We load the GOT entry with the PLT entry. */
4199 if (htab->plt_second != NULL)
4201 plt = htab->plt_second;
4202 plt_offset = eh->plt_second.offset;
4206 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4207 plt_offset = h->plt.offset;
4209 bfd_put_64 (output_bfd, (plt->output_section->vma
4210 + plt->output_offset
4212 htab->elf.sgot->contents + h->got.offset);
4216 else if (bfd_link_pic (info)
4217 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4219 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
4221 BFD_ASSERT((h->got.offset & 1) != 0);
4222 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4223 rela.r_addend = (h->root.u.def.value
4224 + h->root.u.def.section->output_section->vma
4225 + h->root.u.def.section->output_offset);
4229 BFD_ASSERT((h->got.offset & 1) == 0);
4231 bfd_put_64 (output_bfd, (bfd_vma) 0,
4232 htab->elf.sgot->contents + h->got.offset);
4233 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4237 elf_append_rela (output_bfd, relgot, &rela);
4242 Elf_Internal_Rela rela;
4245 /* This symbol needs a copy reloc. Set it up. */
4246 VERIFY_COPY_RELOC (h, htab)
4248 rela.r_offset = (h->root.u.def.value
4249 + h->root.u.def.section->output_section->vma
4250 + h->root.u.def.section->output_offset);
4251 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4253 if (h->root.u.def.section == htab->elf.sdynrelro)
4254 s = htab->elf.sreldynrelro;
4256 s = htab->elf.srelbss;
4257 elf_append_rela (output_bfd, s, &rela);
4263 /* Finish up local dynamic symbol handling. We set the contents of
4264 various dynamic sections here. */
4267 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4269 struct elf_link_hash_entry *h
4270 = (struct elf_link_hash_entry *) *slot;
4271 struct bfd_link_info *info
4272 = (struct bfd_link_info *) inf;
4274 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4278 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4279 here since undefined weak symbol may not be dynamic and may not be
4280 called for elf_x86_64_finish_dynamic_symbol. */
4283 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4286 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4287 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4289 if (h->root.type != bfd_link_hash_undefweak
4290 || h->dynindx != -1)
4293 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4297 /* Used to decide how to sort relocs in an optimal manner for the
4298 dynamic linker, before writing them out. */
4300 static enum elf_reloc_type_class
4301 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4302 const asection *rel_sec ATTRIBUTE_UNUSED,
4303 const Elf_Internal_Rela *rela)
4305 bfd *abfd = info->output_bfd;
4306 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4307 struct elf_x86_link_hash_table *htab
4308 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4310 if (htab->elf.dynsym != NULL
4311 && htab->elf.dynsym->contents != NULL)
4313 /* Check relocation against STT_GNU_IFUNC symbol if there are
4315 unsigned long r_symndx = htab->r_sym (rela->r_info);
4316 if (r_symndx != STN_UNDEF)
4318 Elf_Internal_Sym sym;
4319 if (!bed->s->swap_symbol_in (abfd,
4320 (htab->elf.dynsym->contents
4321 + r_symndx * bed->s->sizeof_sym),
4325 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4326 return reloc_class_ifunc;
4330 switch ((int) ELF32_R_TYPE (rela->r_info))
4332 case R_X86_64_IRELATIVE:
4333 return reloc_class_ifunc;
4334 case R_X86_64_RELATIVE:
4335 case R_X86_64_RELATIVE64:
4336 return reloc_class_relative;
4337 case R_X86_64_JUMP_SLOT:
4338 return reloc_class_plt;
4340 return reloc_class_copy;
4342 return reloc_class_normal;
4346 /* Finish up the dynamic sections. */
4349 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4350 struct bfd_link_info *info)
4352 struct elf_x86_link_hash_table *htab;
4354 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4358 if (! htab->elf.dynamic_sections_created)
4361 if (htab->elf.splt && htab->elf.splt->size > 0)
4363 elf_section_data (htab->elf.splt->output_section)
4364 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4366 if (htab->plt.has_plt0)
4368 /* Fill in the special first entry in the procedure linkage
4370 memcpy (htab->elf.splt->contents,
4371 htab->lazy_plt->plt0_entry,
4372 htab->lazy_plt->plt0_entry_size);
4373 /* Add offset for pushq GOT+8(%rip), since the instruction
4374 uses 6 bytes subtract this value. */
4375 bfd_put_32 (output_bfd,
4376 (htab->elf.sgotplt->output_section->vma
4377 + htab->elf.sgotplt->output_offset
4379 - htab->elf.splt->output_section->vma
4380 - htab->elf.splt->output_offset
4382 (htab->elf.splt->contents
4383 + htab->lazy_plt->plt0_got1_offset));
4384 /* Add offset for the PC-relative instruction accessing
4385 GOT+16, subtracting the offset to the end of that
4387 bfd_put_32 (output_bfd,
4388 (htab->elf.sgotplt->output_section->vma
4389 + htab->elf.sgotplt->output_offset
4391 - htab->elf.splt->output_section->vma
4392 - htab->elf.splt->output_offset
4393 - htab->lazy_plt->plt0_got2_insn_end),
4394 (htab->elf.splt->contents
4395 + htab->lazy_plt->plt0_got2_offset));
4398 if (htab->tlsdesc_plt)
4400 bfd_put_64 (output_bfd, (bfd_vma) 0,
4401 htab->elf.sgot->contents + htab->tlsdesc_got);
4403 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4404 htab->lazy_plt->plt0_entry,
4405 htab->lazy_plt->plt0_entry_size);
4407 /* Add offset for pushq GOT+8(%rip), since the
4408 instruction uses 6 bytes subtract this value. */
4409 bfd_put_32 (output_bfd,
4410 (htab->elf.sgotplt->output_section->vma
4411 + htab->elf.sgotplt->output_offset
4413 - htab->elf.splt->output_section->vma
4414 - htab->elf.splt->output_offset
4417 (htab->elf.splt->contents
4419 + htab->lazy_plt->plt0_got1_offset));
4420 /* Add offset for the PC-relative instruction accessing
4421 GOT+TDG, where TDG stands for htab->tlsdesc_got,
4422 subtracting the offset to the end of that
4424 bfd_put_32 (output_bfd,
4425 (htab->elf.sgot->output_section->vma
4426 + htab->elf.sgot->output_offset
4428 - htab->elf.splt->output_section->vma
4429 - htab->elf.splt->output_offset
4431 - htab->lazy_plt->plt0_got2_insn_end),
4432 (htab->elf.splt->contents
4434 + htab->lazy_plt->plt0_got2_offset));
4438 /* Fill PLT entries for undefined weak symbols in PIE. */
4439 if (bfd_link_pie (info))
4440 bfd_hash_traverse (&info->hash->table,
4441 elf_x86_64_pie_finish_undefweak_symbol,
4447 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4448 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4449 It has to be done before elf_link_sort_relocs is called so that
4450 dynamic relocations are properly sorted. */
4453 elf_x86_64_output_arch_local_syms
4454 (bfd *output_bfd ATTRIBUTE_UNUSED,
4455 struct bfd_link_info *info,
4456 void *flaginfo ATTRIBUTE_UNUSED,
4457 int (*func) (void *, const char *,
4460 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4462 struct elf_x86_link_hash_table *htab
4463 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4467 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4468 htab_traverse (htab->loc_hash_table,
4469 elf_x86_64_finish_local_dynamic_symbol,
4475 /* Forward declaration. */
4476 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4478 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4479 dynamic relocations. */
4482 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4483 long symcount ATTRIBUTE_UNUSED,
4484 asymbol **syms ATTRIBUTE_UNUSED,
4491 bfd_byte *plt_contents;
4493 const struct elf_x86_lazy_plt_layout *lazy_plt;
4494 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4495 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4496 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4497 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4498 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4500 enum elf_x86_plt_type plt_type;
4501 struct elf_x86_plt plts[] =
4503 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4504 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4505 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4506 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4507 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4512 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4515 if (dynsymcount <= 0)
4518 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4522 if (get_elf_x86_backend_data (abfd)->target_os == is_normal)
4524 lazy_plt = &elf_x86_64_lazy_plt;
4525 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4526 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4527 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4528 if (ABI_64_P (abfd))
4530 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4531 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4535 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4536 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4541 lazy_plt = &elf_x86_64_nacl_plt;
4542 non_lazy_plt = NULL;
4543 lazy_bnd_plt = NULL;
4544 non_lazy_bnd_plt = NULL;
4545 lazy_ibt_plt = NULL;
4546 non_lazy_ibt_plt = NULL;
4550 for (j = 0; plts[j].name != NULL; j++)
4552 plt = bfd_get_section_by_name (abfd, plts[j].name);
4553 if (plt == NULL || plt->size == 0)
4556 /* Get the PLT section contents. */
4557 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4558 if (plt_contents == NULL)
4560 if (!bfd_get_section_contents (abfd, (asection *) plt,
4561 plt_contents, 0, plt->size))
4563 free (plt_contents);
4567 /* Check what kind of PLT it is. */
4568 plt_type = plt_unknown;
4569 if (plts[j].type == plt_unknown
4570 && (plt->size >= (lazy_plt->plt_entry_size
4571 + lazy_plt->plt_entry_size)))
4573 /* Match lazy PLT first. Need to check the first two
4575 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4576 lazy_plt->plt0_got1_offset) == 0)
4577 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4579 plt_type = plt_lazy;
4580 else if (lazy_bnd_plt != NULL
4581 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4582 lazy_bnd_plt->plt0_got1_offset) == 0)
4583 && (memcmp (plt_contents + 6,
4584 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4586 plt_type = plt_lazy | plt_second;
4587 /* The fist entry in the lazy IBT PLT is the same as the
4589 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4590 lazy_ibt_plt->plt_entry,
4591 lazy_ibt_plt->plt_got_offset) == 0))
4592 lazy_plt = lazy_ibt_plt;
4594 lazy_plt = lazy_bnd_plt;
4598 if (non_lazy_plt != NULL
4599 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4600 && plt->size >= non_lazy_plt->plt_entry_size)
4602 /* Match non-lazy PLT. */
4603 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4604 non_lazy_plt->plt_got_offset) == 0)
4605 plt_type = plt_non_lazy;
4608 if (plt_type == plt_unknown || plt_type == plt_second)
4610 if (non_lazy_bnd_plt != NULL
4611 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4612 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4613 non_lazy_bnd_plt->plt_got_offset) == 0))
4615 /* Match BND PLT. */
4616 plt_type = plt_second;
4617 non_lazy_plt = non_lazy_bnd_plt;
4619 else if (non_lazy_ibt_plt != NULL
4620 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4621 && (memcmp (plt_contents,
4622 non_lazy_ibt_plt->plt_entry,
4623 non_lazy_ibt_plt->plt_got_offset) == 0))
4625 /* Match IBT PLT. */
4626 plt_type = plt_second;
4627 non_lazy_plt = non_lazy_ibt_plt;
4631 if (plt_type == plt_unknown)
4633 free (plt_contents);
4638 plts[j].type = plt_type;
4640 if ((plt_type & plt_lazy))
4642 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4643 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4644 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4645 /* Skip PLT0 in lazy PLT. */
4650 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4651 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4652 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4656 /* Skip lazy PLT when the second PLT is used. */
4657 if (plt_type == (plt_lazy | plt_second))
4661 n = plt->size / plts[j].plt_entry_size;
4666 plts[j].contents = plt_contents;
4669 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4670 (bfd_vma) 0, plts, dynsyms,
4674 /* Handle an x86-64 specific section when reading an object file. This
4675 is called when elfcode.h finds a section with an unknown type. */
4678 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4679 const char *name, int shindex)
4681 if (hdr->sh_type != SHT_X86_64_UNWIND)
4684 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4690 /* Hook called by the linker routine which adds symbols from an object
4691 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4695 elf_x86_64_add_symbol_hook (bfd *abfd,
4696 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4697 Elf_Internal_Sym *sym,
4698 const char **namep ATTRIBUTE_UNUSED,
4699 flagword *flagsp ATTRIBUTE_UNUSED,
4705 switch (sym->st_shndx)
4707 case SHN_X86_64_LCOMMON:
4708 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4711 lcomm = bfd_make_section_with_flags (abfd,
4715 | SEC_LINKER_CREATED));
4718 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4721 *valp = sym->st_size;
4729 /* Given a BFD section, try to locate the corresponding ELF section
4733 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4734 asection *sec, int *index_return)
4736 if (sec == &_bfd_elf_large_com_section)
4738 *index_return = SHN_X86_64_LCOMMON;
4744 /* Process a symbol. */
4747 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4750 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4752 switch (elfsym->internal_elf_sym.st_shndx)
4754 case SHN_X86_64_LCOMMON:
4755 asym->section = &_bfd_elf_large_com_section;
4756 asym->value = elfsym->internal_elf_sym.st_size;
4757 /* Common symbol doesn't set BSF_GLOBAL. */
4758 asym->flags &= ~BSF_GLOBAL;
4764 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4766 return (sym->st_shndx == SHN_COMMON
4767 || sym->st_shndx == SHN_X86_64_LCOMMON);
4771 elf_x86_64_common_section_index (asection *sec)
4773 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4776 return SHN_X86_64_LCOMMON;
4780 elf_x86_64_common_section (asection *sec)
4782 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4783 return bfd_com_section_ptr;
4785 return &_bfd_elf_large_com_section;
4789 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4790 const Elf_Internal_Sym *sym,
4795 const asection *oldsec)
4797 /* A normal common symbol and a large common symbol result in a
4798 normal common symbol. We turn the large common symbol into a
4801 && h->root.type == bfd_link_hash_common
4803 && bfd_is_com_section (*psec)
4806 if (sym->st_shndx == SHN_COMMON
4807 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4809 h->root.u.c.p->section
4810 = bfd_make_section_old_way (oldbfd, "COMMON");
4811 h->root.u.c.p->section->flags = SEC_ALLOC;
4813 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4814 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4815 *psec = bfd_com_section_ptr;
4822 elf_x86_64_additional_program_headers (bfd *abfd,
4823 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4828 /* Check to see if we need a large readonly segment. */
4829 s = bfd_get_section_by_name (abfd, ".lrodata");
4830 if (s && (s->flags & SEC_LOAD))
4833 /* Check to see if we need a large data segment. Since .lbss sections
4834 is placed right after the .bss section, there should be no need for
4835 a large data segment just because of .lbss. */
4836 s = bfd_get_section_by_name (abfd, ".ldata");
4837 if (s && (s->flags & SEC_LOAD))
4843 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
4846 elf_x86_64_relocs_compatible (const bfd_target *input,
4847 const bfd_target *output)
4849 return ((xvec_get_elf_backend_data (input)->s->elfclass
4850 == xvec_get_elf_backend_data (output)->s->elfclass)
4851 && _bfd_elf_relocs_compatible (input, output));
4854 extern const bfd_target x86_64_elf64_sol2_vec;
4856 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
4857 with GNU properties if found. Otherwise, return NULL. */
4860 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
4862 struct elf_x86_init_table init_table;
4864 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
4865 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
4866 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
4867 != (int) R_X86_64_GNU_VTINHERIT)
4868 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
4869 != (int) R_X86_64_GNU_VTENTRY))
4872 /* This is unused for x86-64. */
4873 init_table.plt0_pad_byte = 0x90;
4875 if (get_elf_x86_backend_data (info->output_bfd)->target_os
4880 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
4881 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
4885 init_table.lazy_plt = &elf_x86_64_lazy_plt;
4886 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
4889 if (ABI_64_P (info->output_bfd))
4891 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4892 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4896 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4897 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4902 init_table.lazy_plt = &elf_x86_64_nacl_plt;
4903 init_table.non_lazy_plt = NULL;
4904 init_table.lazy_ibt_plt = NULL;
4905 init_table.non_lazy_ibt_plt = NULL;
4908 if (ABI_64_P (info->output_bfd))
4910 init_table.r_info = elf64_r_info;
4911 init_table.r_sym = elf64_r_sym;
4915 init_table.r_info = elf32_r_info;
4916 init_table.r_sym = elf32_r_sym;
4919 init_table.need_global_offset_table
4920 = info->output_bfd->xvec == &x86_64_elf64_sol2_vec;
4922 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
4925 static const struct bfd_elf_special_section
4926 elf_x86_64_special_sections[]=
4928 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4929 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4930 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
4931 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4932 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4933 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4934 { NULL, 0, 0, 0, 0 }
4937 #define TARGET_LITTLE_SYM x86_64_elf64_vec
4938 #define TARGET_LITTLE_NAME "elf64-x86-64"
4939 #define ELF_ARCH bfd_arch_i386
4940 #define ELF_TARGET_ID X86_64_ELF_DATA
4941 #define ELF_MACHINE_CODE EM_X86_64
4942 #define ELF_MAXPAGESIZE 0x200000
4943 #define ELF_MINPAGESIZE 0x1000
4944 #define ELF_COMMONPAGESIZE 0x1000
4946 #define elf_backend_can_gc_sections 1
4947 #define elf_backend_can_refcount 1
4948 #define elf_backend_want_got_plt 1
4949 #define elf_backend_plt_readonly 1
4950 #define elf_backend_want_plt_sym 0
4951 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
4952 #define elf_backend_rela_normal 1
4953 #define elf_backend_plt_alignment 4
4954 #define elf_backend_extern_protected_data 1
4955 #define elf_backend_caches_rawsize 1
4956 #define elf_backend_dtrel_excludes_plt 1
4957 #define elf_backend_want_dynrelro 1
4959 #define elf_info_to_howto elf_x86_64_info_to_howto
4961 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
4962 #define bfd_elf64_bfd_reloc_name_lookup \
4963 elf_x86_64_reloc_name_lookup
4965 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
4966 #define elf_backend_check_relocs elf_x86_64_check_relocs
4967 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
4968 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
4969 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
4970 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
4971 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
4972 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
4974 #define elf_backend_write_core_note elf_x86_64_write_core_note
4976 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
4977 #define elf_backend_relocate_section elf_x86_64_relocate_section
4978 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
4979 #define elf_backend_object_p elf64_x86_64_elf_object_p
4980 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
4982 #define elf_backend_section_from_shdr \
4983 elf_x86_64_section_from_shdr
4985 #define elf_backend_section_from_bfd_section \
4986 elf_x86_64_elf_section_from_bfd_section
4987 #define elf_backend_add_symbol_hook \
4988 elf_x86_64_add_symbol_hook
4989 #define elf_backend_symbol_processing \
4990 elf_x86_64_symbol_processing
4991 #define elf_backend_common_section_index \
4992 elf_x86_64_common_section_index
4993 #define elf_backend_common_section \
4994 elf_x86_64_common_section
4995 #define elf_backend_common_definition \
4996 elf_x86_64_common_definition
4997 #define elf_backend_merge_symbol \
4998 elf_x86_64_merge_symbol
4999 #define elf_backend_special_sections \
5000 elf_x86_64_special_sections
5001 #define elf_backend_additional_program_headers \
5002 elf_x86_64_additional_program_headers
5003 #define elf_backend_setup_gnu_properties \
5004 elf_x86_64_link_setup_gnu_properties
5005 #define elf_backend_hide_symbol \
5006 _bfd_x86_elf_hide_symbol
5008 #include "elf64-target.h"
5010 /* CloudABI support. */
5012 #undef TARGET_LITTLE_SYM
5013 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5014 #undef TARGET_LITTLE_NAME
5015 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5018 #define ELF_OSABI ELFOSABI_CLOUDABI
5021 #define elf64_bed elf64_x86_64_cloudabi_bed
5023 #include "elf64-target.h"
5025 /* FreeBSD support. */
5027 #undef TARGET_LITTLE_SYM
5028 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5029 #undef TARGET_LITTLE_NAME
5030 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5033 #define ELF_OSABI ELFOSABI_FREEBSD
5036 #define elf64_bed elf64_x86_64_fbsd_bed
5038 #include "elf64-target.h"
5040 /* Solaris 2 support. */
5042 #undef TARGET_LITTLE_SYM
5043 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5044 #undef TARGET_LITTLE_NAME
5045 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5047 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5048 objects won't be recognized. */
5052 #define elf64_bed elf64_x86_64_sol2_bed
5054 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5056 #undef elf_backend_static_tls_alignment
5057 #define elf_backend_static_tls_alignment 16
5059 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5061 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5063 #undef elf_backend_want_plt_sym
5064 #define elf_backend_want_plt_sym 1
5066 #undef elf_backend_strtab_flags
5067 #define elf_backend_strtab_flags SHF_STRINGS
5070 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5071 bfd *obfd ATTRIBUTE_UNUSED,
5072 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5073 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5075 /* PR 19938: FIXME: Need to add code for setting the sh_info
5076 and sh_link fields of Solaris specific section types. */
5080 #undef elf_backend_copy_special_section_fields
5081 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5083 #include "elf64-target.h"
5085 /* Native Client support. */
5088 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5090 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5091 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5095 #undef TARGET_LITTLE_SYM
5096 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5097 #undef TARGET_LITTLE_NAME
5098 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5100 #define elf64_bed elf64_x86_64_nacl_bed
5102 #undef ELF_MAXPAGESIZE
5103 #undef ELF_MINPAGESIZE
5104 #undef ELF_COMMONPAGESIZE
5105 #define ELF_MAXPAGESIZE 0x10000
5106 #define ELF_MINPAGESIZE 0x10000
5107 #define ELF_COMMONPAGESIZE 0x10000
5109 /* Restore defaults. */
5111 #undef elf_backend_static_tls_alignment
5112 #undef elf_backend_want_plt_sym
5113 #define elf_backend_want_plt_sym 0
5114 #undef elf_backend_strtab_flags
5115 #undef elf_backend_copy_special_section_fields
5117 /* NaCl uses substantially different PLT entries for the same effects. */
5119 #undef elf_backend_plt_alignment
5120 #define elf_backend_plt_alignment 5
5121 #define NACL_PLT_ENTRY_SIZE 64
5122 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5124 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5126 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5127 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5128 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5129 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5130 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5132 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5133 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5135 /* 32 bytes of nop to pad out to the standard size. */
5136 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5137 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5138 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5139 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5140 0x66, /* excess data16 prefix */
5144 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5146 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5147 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5148 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5149 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5151 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5152 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5153 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5155 /* Lazy GOT entries point here (32-byte aligned). */
5156 0x68, /* pushq immediate */
5157 0, 0, 0, 0, /* replaced with index into relocation table. */
5158 0xe9, /* jmp relative */
5159 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5161 /* 22 bytes of nop to pad out to the standard size. */
5162 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5163 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5164 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5167 /* .eh_frame covering the .plt section. */
5169 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5171 #if (PLT_CIE_LENGTH != 20 \
5172 || PLT_FDE_LENGTH != 36 \
5173 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5174 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5175 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5177 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5178 0, 0, 0, 0, /* CIE ID */
5179 1, /* CIE version */
5180 'z', 'R', 0, /* Augmentation string */
5181 1, /* Code alignment factor */
5182 0x78, /* Data alignment factor */
5183 16, /* Return address column */
5184 1, /* Augmentation size */
5185 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5186 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5187 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5188 DW_CFA_nop, DW_CFA_nop,
5190 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5191 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5192 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5193 0, 0, 0, 0, /* .plt size goes here */
5194 0, /* Augmentation size */
5195 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5196 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5197 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5198 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5199 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5200 13, /* Block length */
5201 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5202 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5203 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5204 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5205 DW_CFA_nop, DW_CFA_nop
5208 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5210 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5211 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5212 elf_x86_64_nacl_plt_entry, /* plt_entry */
5213 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5214 2, /* plt0_got1_offset */
5215 9, /* plt0_got2_offset */
5216 13, /* plt0_got2_insn_end */
5217 3, /* plt_got_offset */
5218 33, /* plt_reloc_offset */
5219 38, /* plt_plt_offset */
5220 7, /* plt_got_insn_size */
5221 42, /* plt_plt_insn_end */
5222 32, /* plt_lazy_offset */
5223 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5224 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5225 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5226 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5229 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5234 #undef elf_backend_arch_data
5235 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5237 #undef elf_backend_object_p
5238 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5239 #undef elf_backend_modify_segment_map
5240 #define elf_backend_modify_segment_map nacl_modify_segment_map
5241 #undef elf_backend_modify_program_headers
5242 #define elf_backend_modify_program_headers nacl_modify_program_headers
5243 #undef elf_backend_final_write_processing
5244 #define elf_backend_final_write_processing nacl_final_write_processing
5246 #include "elf64-target.h"
5248 /* Native Client x32 support. */
5251 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5253 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5254 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5258 #undef TARGET_LITTLE_SYM
5259 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5260 #undef TARGET_LITTLE_NAME
5261 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5263 #define elf32_bed elf32_x86_64_nacl_bed
5265 #define bfd_elf32_bfd_reloc_type_lookup \
5266 elf_x86_64_reloc_type_lookup
5267 #define bfd_elf32_bfd_reloc_name_lookup \
5268 elf_x86_64_reloc_name_lookup
5269 #define bfd_elf32_get_synthetic_symtab \
5270 elf_x86_64_get_synthetic_symtab
5272 #undef elf_backend_object_p
5273 #define elf_backend_object_p \
5274 elf32_x86_64_nacl_elf_object_p
5276 #undef elf_backend_bfd_from_remote_memory
5277 #define elf_backend_bfd_from_remote_memory \
5278 _bfd_elf32_bfd_from_remote_memory
5280 #undef elf_backend_size_info
5281 #define elf_backend_size_info \
5282 _bfd_elf32_size_info
5284 #include "elf32-target.h"
5286 /* Restore defaults. */
5287 #undef elf_backend_object_p
5288 #define elf_backend_object_p elf64_x86_64_elf_object_p
5289 #undef elf_backend_bfd_from_remote_memory
5290 #undef elf_backend_size_info
5291 #undef elf_backend_modify_segment_map
5292 #undef elf_backend_modify_program_headers
5293 #undef elf_backend_final_write_processing
5295 /* Intel L1OM support. */
5298 elf64_l1om_elf_object_p (bfd *abfd)
5300 /* Set the right machine number for an L1OM elf64 file. */
5301 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5305 #undef TARGET_LITTLE_SYM
5306 #define TARGET_LITTLE_SYM l1om_elf64_vec
5307 #undef TARGET_LITTLE_NAME
5308 #define TARGET_LITTLE_NAME "elf64-l1om"
5310 #define ELF_ARCH bfd_arch_l1om
5312 #undef ELF_MACHINE_CODE
5313 #define ELF_MACHINE_CODE EM_L1OM
5318 #define elf64_bed elf64_l1om_bed
5320 #undef elf_backend_object_p
5321 #define elf_backend_object_p elf64_l1om_elf_object_p
5323 /* Restore defaults. */
5324 #undef ELF_MAXPAGESIZE
5325 #undef ELF_MINPAGESIZE
5326 #undef ELF_COMMONPAGESIZE
5327 #define ELF_MAXPAGESIZE 0x200000
5328 #define ELF_MINPAGESIZE 0x1000
5329 #define ELF_COMMONPAGESIZE 0x1000
5330 #undef elf_backend_plt_alignment
5331 #define elf_backend_plt_alignment 4
5332 #undef elf_backend_arch_data
5333 #define elf_backend_arch_data &elf_x86_64_arch_bed
5335 #include "elf64-target.h"
5337 /* FreeBSD L1OM support. */
5339 #undef TARGET_LITTLE_SYM
5340 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5341 #undef TARGET_LITTLE_NAME
5342 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5345 #define ELF_OSABI ELFOSABI_FREEBSD
5348 #define elf64_bed elf64_l1om_fbsd_bed
5350 #include "elf64-target.h"
5352 /* Intel K1OM support. */
5355 elf64_k1om_elf_object_p (bfd *abfd)
5357 /* Set the right machine number for an K1OM elf64 file. */
5358 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5362 #undef TARGET_LITTLE_SYM
5363 #define TARGET_LITTLE_SYM k1om_elf64_vec
5364 #undef TARGET_LITTLE_NAME
5365 #define TARGET_LITTLE_NAME "elf64-k1om"
5367 #define ELF_ARCH bfd_arch_k1om
5369 #undef ELF_MACHINE_CODE
5370 #define ELF_MACHINE_CODE EM_K1OM
5375 #define elf64_bed elf64_k1om_bed
5377 #undef elf_backend_object_p
5378 #define elf_backend_object_p elf64_k1om_elf_object_p
5380 #undef elf_backend_static_tls_alignment
5382 #undef elf_backend_want_plt_sym
5383 #define elf_backend_want_plt_sym 0
5385 #include "elf64-target.h"
5387 /* FreeBSD K1OM support. */
5389 #undef TARGET_LITTLE_SYM
5390 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5391 #undef TARGET_LITTLE_NAME
5392 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5395 #define ELF_OSABI ELFOSABI_FREEBSD
5398 #define elf64_bed elf64_k1om_fbsd_bed
5400 #include "elf64-target.h"
5402 /* 32bit x86-64 support. */
5404 #undef TARGET_LITTLE_SYM
5405 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5406 #undef TARGET_LITTLE_NAME
5407 #define TARGET_LITTLE_NAME "elf32-x86-64"
5411 #define ELF_ARCH bfd_arch_i386
5413 #undef ELF_MACHINE_CODE
5414 #define ELF_MACHINE_CODE EM_X86_64
5418 #undef elf_backend_object_p
5419 #define elf_backend_object_p \
5420 elf32_x86_64_elf_object_p
5422 #undef elf_backend_bfd_from_remote_memory
5423 #define elf_backend_bfd_from_remote_memory \
5424 _bfd_elf32_bfd_from_remote_memory
5426 #undef elf_backend_size_info
5427 #define elf_backend_size_info \
5428 _bfd_elf32_size_info
5430 #include "elf32-target.h"