1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2018 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
22 #include "elfxx-x86.h"
25 #include "libiberty.h"
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
212 /* Map BFD relocs to the x86_64 elf relocs. */
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
219 static const struct elf_reloc_map x86_64_reloc_map[] =
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
272 if (r_type == (unsigned int) R_X86_64_32)
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
282 if (r_type >= (unsigned int) R_X86_64_standard)
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%pB: invalid relocation type %d"),
287 r_type = R_X86_64_NONE;
292 i = r_type - (unsigned int) R_X86_64_vt_offset;
293 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
294 return &x86_64_elf_howto_table[i];
297 /* Given a BFD reloc type, return a HOWTO structure. */
298 static reloc_howto_type *
299 elf_x86_64_reloc_type_lookup (bfd *abfd,
300 bfd_reloc_code_real_type code)
304 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
307 if (x86_64_reloc_map[i].bfd_reloc_val == code)
308 return elf_x86_64_rtype_to_howto (abfd,
309 x86_64_reloc_map[i].elf_reloc_val);
314 static reloc_howto_type *
315 elf_x86_64_reloc_name_lookup (bfd *abfd,
320 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
322 /* Get x32 R_X86_64_32. */
323 reloc_howto_type *reloc
324 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
325 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
329 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
330 if (x86_64_elf_howto_table[i].name != NULL
331 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
332 return &x86_64_elf_howto_table[i];
337 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
340 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
341 Elf_Internal_Rela *dst)
345 r_type = ELF32_R_TYPE (dst->r_info);
346 if (r_type != (unsigned int) R_X86_64_GNU_VTINHERIT
347 && r_type != (unsigned int) R_X86_64_GNU_VTENTRY)
348 r_type &= ~R_X86_64_converted_reloc_bit;
349 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
351 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
354 /* Support for core dump NOTE sections. */
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
361 switch (note->descsz)
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
403 switch (note->descsz)
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
443 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
446 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
448 const char *fname, *psargs;
459 va_start (ap, note_type);
460 fname = va_arg (ap, const char *);
461 psargs = va_arg (ap, const char *);
464 if (bed->s->elfclass == ELFCLASS32)
467 memset (&data, 0, sizeof (data));
468 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
469 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
470 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
471 &data, sizeof (data));
476 memset (&data, 0, sizeof (data));
477 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
478 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
479 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
480 &data, sizeof (data));
485 va_start (ap, note_type);
486 pid = va_arg (ap, long);
487 cursig = va_arg (ap, int);
488 gregs = va_arg (ap, const void *);
491 if (bed->s->elfclass == ELFCLASS32)
493 if (bed->elf_machine_code == EM_X86_64)
495 prstatusx32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
506 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_cursig = cursig;
509 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
510 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
511 &prstat, sizeof (prstat));
517 memset (&prstat, 0, sizeof (prstat));
519 prstat.pr_cursig = cursig;
520 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
521 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
522 &prstat, sizeof (prstat));
529 /* Functions for the x86-64 ELF linker. */
531 /* The size in bytes of an entry in the global offset table. */
533 #define GOT_ENTRY_SIZE 8
535 /* The size in bytes of an entry in the lazy procedure linkage table. */
537 #define LAZY_PLT_ENTRY_SIZE 16
539 /* The size in bytes of an entry in the non-lazy procedure linkage
542 #define NON_LAZY_PLT_ENTRY_SIZE 8
544 /* The first entry in a lazy procedure linkage table looks like this.
545 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
548 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
550 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
551 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
552 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
555 /* Subsequent entries in a lazy procedure linkage table look like this. */
557 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
559 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
560 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
561 0x68, /* pushq immediate */
562 0, 0, 0, 0, /* replaced with index into relocation table. */
563 0xe9, /* jmp relative */
564 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
567 /* The first entry in a lazy procedure linkage table with BND prefix
570 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
572 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
573 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
574 0x0f, 0x1f, 0 /* nopl (%rax) */
577 /* Subsequent entries for branches with BND prefx in a lazy procedure
578 linkage table look like this. */
580 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
582 0x68, 0, 0, 0, 0, /* pushq immediate */
583 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
584 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
587 /* The first entry in the IBT-enabled lazy procedure linkage table is the
588 the same as the lazy PLT with BND prefix so that bound registers are
589 preserved when control is passed to dynamic linker. Subsequent
590 entries for a IBT-enabled lazy procedure linkage table look like
593 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
595 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
596 0x68, 0, 0, 0, 0, /* pushq immediate */
597 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
601 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
602 is the same as the normal lazy PLT. Subsequent entries for an
603 x32 IBT-enabled lazy procedure linkage table look like this. */
605 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
607 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
608 0x68, 0, 0, 0, 0, /* pushq immediate */
609 0xe9, 0, 0, 0, 0, /* jmpq relative */
610 0x66, 0x90 /* xchg %ax,%ax */
613 /* Entries in the non-lazey procedure linkage table look like this. */
615 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
617 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x66, 0x90 /* xchg %ax,%ax */
622 /* Entries for branches with BND prefix in the non-lazey procedure
623 linkage table look like this. */
625 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
627 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
628 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
632 /* Entries for branches with IBT-enabled in the non-lazey procedure
633 linkage table look like this. They have the same size as the lazy
636 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
638 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
639 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
640 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
641 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
644 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
645 linkage table look like this. They have the same size as the lazy
648 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
650 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
651 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
652 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
653 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
656 /* .eh_frame covering the lazy .plt section. */
658 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
660 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
661 0, 0, 0, 0, /* CIE ID */
663 'z', 'R', 0, /* Augmentation string */
664 1, /* Code alignment factor */
665 0x78, /* Data alignment factor */
666 16, /* Return address column */
667 1, /* Augmentation size */
668 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
669 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
670 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
671 DW_CFA_nop, DW_CFA_nop,
673 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
674 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
675 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
676 0, 0, 0, 0, /* .plt size goes here */
677 0, /* Augmentation size */
678 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
679 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
680 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
681 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
682 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
683 11, /* Block length */
684 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
685 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
686 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
687 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
688 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
691 /* .eh_frame covering the lazy BND .plt section. */
693 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
695 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
696 0, 0, 0, 0, /* CIE ID */
698 'z', 'R', 0, /* Augmentation string */
699 1, /* Code alignment factor */
700 0x78, /* Data alignment factor */
701 16, /* Return address column */
702 1, /* Augmentation size */
703 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
704 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
705 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
706 DW_CFA_nop, DW_CFA_nop,
708 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
709 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
710 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
711 0, 0, 0, 0, /* .plt size goes here */
712 0, /* Augmentation size */
713 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
714 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
715 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
716 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
717 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
718 11, /* Block length */
719 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
720 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
721 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
722 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
723 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
726 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
728 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
730 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
731 0, 0, 0, 0, /* CIE ID */
733 'z', 'R', 0, /* Augmentation string */
734 1, /* Code alignment factor */
735 0x78, /* Data alignment factor */
736 16, /* Return address column */
737 1, /* Augmentation size */
738 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
739 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
740 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
741 DW_CFA_nop, DW_CFA_nop,
743 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
744 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
745 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
746 0, 0, 0, 0, /* .plt size goes here */
747 0, /* Augmentation size */
748 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
749 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
750 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
751 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
752 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
753 11, /* Block length */
754 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
755 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
756 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
757 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
758 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
761 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
763 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
765 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
766 0, 0, 0, 0, /* CIE ID */
768 'z', 'R', 0, /* Augmentation string */
769 1, /* Code alignment factor */
770 0x78, /* Data alignment factor */
771 16, /* Return address column */
772 1, /* Augmentation size */
773 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
774 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
775 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
776 DW_CFA_nop, DW_CFA_nop,
778 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
779 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
780 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
781 0, 0, 0, 0, /* .plt size goes here */
782 0, /* Augmentation size */
783 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
784 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
785 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
786 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
787 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
788 11, /* Block length */
789 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
790 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
791 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
792 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
793 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
796 /* .eh_frame covering the non-lazy .plt section. */
798 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
800 #define PLT_GOT_FDE_LENGTH 20
801 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
802 0, 0, 0, 0, /* CIE ID */
804 'z', 'R', 0, /* Augmentation string */
805 1, /* Code alignment factor */
806 0x78, /* Data alignment factor */
807 16, /* Return address column */
808 1, /* Augmentation size */
809 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
810 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
811 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
812 DW_CFA_nop, DW_CFA_nop,
814 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
815 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
816 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
817 0, 0, 0, 0, /* non-lazy .plt size goes here */
818 0, /* Augmentation size */
819 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
820 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
823 /* These are the standard parameters. */
824 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
826 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
827 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
828 elf_x86_64_lazy_plt_entry, /* plt_entry */
829 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
830 2, /* plt0_got1_offset */
831 8, /* plt0_got2_offset */
832 12, /* plt0_got2_insn_end */
833 2, /* plt_got_offset */
834 7, /* plt_reloc_offset */
835 12, /* plt_plt_offset */
836 6, /* plt_got_insn_size */
837 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
838 6, /* plt_lazy_offset */
839 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
840 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
841 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
842 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
845 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
847 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
848 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
849 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
850 2, /* plt_got_offset */
851 6, /* plt_got_insn_size */
852 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
853 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
856 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
858 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
859 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
860 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
861 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
862 2, /* plt0_got1_offset */
863 1+8, /* plt0_got2_offset */
864 1+12, /* plt0_got2_insn_end */
865 1+2, /* plt_got_offset */
866 1, /* plt_reloc_offset */
867 7, /* plt_plt_offset */
868 1+6, /* plt_got_insn_size */
869 11, /* plt_plt_insn_end */
870 0, /* plt_lazy_offset */
871 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
872 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
873 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
877 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
879 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
880 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
881 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
882 1+2, /* plt_got_offset */
883 1+6, /* plt_got_insn_size */
884 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
885 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
888 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
890 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
891 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
892 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
893 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
894 2, /* plt0_got1_offset */
895 1+8, /* plt0_got2_offset */
896 1+12, /* plt0_got2_insn_end */
897 4+1+2, /* plt_got_offset */
898 4+1, /* plt_reloc_offset */
899 4+1+6, /* plt_plt_offset */
900 4+1+6, /* plt_got_insn_size */
901 4+1+5+5, /* plt_plt_insn_end */
902 0, /* plt_lazy_offset */
903 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
904 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
905 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
906 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
909 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
911 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
912 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
913 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
914 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
915 2, /* plt0_got1_offset */
916 8, /* plt0_got2_offset */
917 12, /* plt0_got2_insn_end */
918 4+2, /* plt_got_offset */
919 4+1, /* plt_reloc_offset */
920 4+6, /* plt_plt_offset */
921 4+6, /* plt_got_insn_size */
922 4+5+5, /* plt_plt_insn_end */
923 0, /* plt_lazy_offset */
924 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
925 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
926 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
927 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
930 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
932 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
933 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
934 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
935 4+1+2, /* plt_got_offset */
936 4+1+6, /* plt_got_insn_size */
937 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
938 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
941 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
943 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
944 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
946 4+2, /* plt_got_offset */
947 4+6, /* plt_got_insn_size */
948 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
949 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
952 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
957 #define elf_backend_arch_data &elf_x86_64_arch_bed
960 elf64_x86_64_elf_object_p (bfd *abfd)
962 /* Set the right machine number for an x86-64 elf64 file. */
963 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
968 elf32_x86_64_elf_object_p (bfd *abfd)
970 /* Set the right machine number for an x86-64 elf32 file. */
971 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
975 /* Return TRUE if the TLS access code sequence support transition
979 elf_x86_64_check_tls_transition (bfd *abfd,
980 struct bfd_link_info *info,
983 Elf_Internal_Shdr *symtab_hdr,
984 struct elf_link_hash_entry **sym_hashes,
986 const Elf_Internal_Rela *rel,
987 const Elf_Internal_Rela *relend)
990 unsigned long r_symndx;
991 bfd_boolean largepic = FALSE;
992 struct elf_link_hash_entry *h;
994 struct elf_x86_link_hash_table *htab;
996 bfd_boolean indirect_call;
998 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
999 offset = rel->r_offset;
1002 case R_X86_64_TLSGD:
1003 case R_X86_64_TLSLD:
1004 if ((rel + 1) >= relend)
1007 if (r_type == R_X86_64_TLSGD)
1009 /* Check transition from GD access model. For 64bit, only
1010 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1011 .word 0x6666; rex64; call __tls_get_addr@PLT
1013 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1015 call *__tls_get_addr@GOTPCREL(%rip)
1016 which may be converted to
1017 addr32 call __tls_get_addr
1018 can transit to different access model. For 32bit, only
1019 leaq foo@tlsgd(%rip), %rdi
1020 .word 0x6666; rex64; call __tls_get_addr@PLT
1022 leaq foo@tlsgd(%rip), %rdi
1024 call *__tls_get_addr@GOTPCREL(%rip)
1025 which may be converted to
1026 addr32 call __tls_get_addr
1027 can transit to different access model. For largepic,
1029 leaq foo@tlsgd(%rip), %rdi
1030 movabsq $__tls_get_addr@pltoff, %rax
1034 leaq foo@tlsgd(%rip), %rdi
1035 movabsq $__tls_get_addr@pltoff, %rax
1039 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1041 if ((offset + 12) > sec->size)
1044 call = contents + offset + 4;
1046 || !((call[1] == 0x48
1054 && call[3] == 0xe8)))
1056 if (!ABI_64_P (abfd)
1057 || (offset + 19) > sec->size
1059 || memcmp (call - 7, leaq + 1, 3) != 0
1060 || memcmp (call, "\x48\xb8", 2) != 0
1064 || !((call[10] == 0x48 && call[12] == 0xd8)
1065 || (call[10] == 0x4c && call[12] == 0xf8)))
1069 else if (ABI_64_P (abfd))
1072 || memcmp (contents + offset - 4, leaq, 4) != 0)
1078 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1081 indirect_call = call[2] == 0xff;
1085 /* Check transition from LD access model. Only
1086 leaq foo@tlsld(%rip), %rdi;
1087 call __tls_get_addr@PLT
1089 leaq foo@tlsld(%rip), %rdi;
1090 call *__tls_get_addr@GOTPCREL(%rip)
1091 which may be converted to
1092 addr32 call __tls_get_addr
1093 can transit to different access model. For largepic
1095 leaq foo@tlsld(%rip), %rdi
1096 movabsq $__tls_get_addr@pltoff, %rax
1100 leaq foo@tlsld(%rip), %rdi
1101 movabsq $__tls_get_addr@pltoff, %rax
1105 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1107 if (offset < 3 || (offset + 9) > sec->size)
1110 if (memcmp (contents + offset - 3, lea, 3) != 0)
1113 call = contents + offset + 4;
1114 if (!(call[0] == 0xe8
1115 || (call[0] == 0xff && call[1] == 0x15)
1116 || (call[0] == 0x67 && call[1] == 0xe8)))
1118 if (!ABI_64_P (abfd)
1119 || (offset + 19) > sec->size
1120 || memcmp (call, "\x48\xb8", 2) != 0
1124 || !((call[10] == 0x48 && call[12] == 0xd8)
1125 || (call[10] == 0x4c && call[12] == 0xf8)))
1129 indirect_call = call[0] == 0xff;
1132 r_symndx = htab->r_sym (rel[1].r_info);
1133 if (r_symndx < symtab_hdr->sh_info)
1136 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1138 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1142 r_type = (ELF32_R_TYPE (rel[1].r_info)
1143 & ~R_X86_64_converted_reloc_bit);
1145 return r_type == R_X86_64_PLTOFF64;
1146 else if (indirect_call)
1147 return r_type == R_X86_64_GOTPCRELX;
1149 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1152 case R_X86_64_GOTTPOFF:
1153 /* Check transition from IE access model:
1154 mov foo@gottpoff(%rip), %reg
1155 add foo@gottpoff(%rip), %reg
1158 /* Check REX prefix first. */
1159 if (offset >= 3 && (offset + 4) <= sec->size)
1161 val = bfd_get_8 (abfd, contents + offset - 3);
1162 if (val != 0x48 && val != 0x4c)
1164 /* X32 may have 0x44 REX prefix or no REX prefix. */
1165 if (ABI_64_P (abfd))
1171 /* X32 may not have any REX prefix. */
1172 if (ABI_64_P (abfd))
1174 if (offset < 2 || (offset + 3) > sec->size)
1178 val = bfd_get_8 (abfd, contents + offset - 2);
1179 if (val != 0x8b && val != 0x03)
1182 val = bfd_get_8 (abfd, contents + offset - 1);
1183 return (val & 0xc7) == 5;
1185 case R_X86_64_GOTPC32_TLSDESC:
1186 /* Check transition from GDesc access model:
1187 leaq x@tlsdesc(%rip), %rax
1189 Make sure it's a leaq adding rip to a 32-bit offset
1190 into any register, although it's probably almost always
1193 if (offset < 3 || (offset + 4) > sec->size)
1196 val = bfd_get_8 (abfd, contents + offset - 3);
1197 if ((val & 0xfb) != 0x48)
1200 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1203 val = bfd_get_8 (abfd, contents + offset - 1);
1204 return (val & 0xc7) == 0x05;
1206 case R_X86_64_TLSDESC_CALL:
1207 /* Check transition from GDesc access model:
1208 call *x@tlsdesc(%rax)
1210 if (offset + 2 <= sec->size)
1212 /* Make sure that it's a call *x@tlsdesc(%rax). */
1213 call = contents + offset;
1214 return call[0] == 0xff && call[1] == 0x10;
1224 /* Return TRUE if the TLS access transition is OK or no transition
1225 will be performed. Update R_TYPE if there is a transition. */
1228 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1229 asection *sec, bfd_byte *contents,
1230 Elf_Internal_Shdr *symtab_hdr,
1231 struct elf_link_hash_entry **sym_hashes,
1232 unsigned int *r_type, int tls_type,
1233 const Elf_Internal_Rela *rel,
1234 const Elf_Internal_Rela *relend,
1235 struct elf_link_hash_entry *h,
1236 unsigned long r_symndx,
1237 bfd_boolean from_relocate_section)
1239 unsigned int from_type = *r_type;
1240 unsigned int to_type = from_type;
1241 bfd_boolean check = TRUE;
1243 /* Skip TLS transition for functions. */
1245 && (h->type == STT_FUNC
1246 || h->type == STT_GNU_IFUNC))
1251 case R_X86_64_TLSGD:
1252 case R_X86_64_GOTPC32_TLSDESC:
1253 case R_X86_64_TLSDESC_CALL:
1254 case R_X86_64_GOTTPOFF:
1255 if (bfd_link_executable (info))
1258 to_type = R_X86_64_TPOFF32;
1260 to_type = R_X86_64_GOTTPOFF;
1263 /* When we are called from elf_x86_64_relocate_section, there may
1264 be additional transitions based on TLS_TYPE. */
1265 if (from_relocate_section)
1267 unsigned int new_to_type = to_type;
1269 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1270 new_to_type = R_X86_64_TPOFF32;
1272 if (to_type == R_X86_64_TLSGD
1273 || to_type == R_X86_64_GOTPC32_TLSDESC
1274 || to_type == R_X86_64_TLSDESC_CALL)
1276 if (tls_type == GOT_TLS_IE)
1277 new_to_type = R_X86_64_GOTTPOFF;
1280 /* We checked the transition before when we were called from
1281 elf_x86_64_check_relocs. We only want to check the new
1282 transition which hasn't been checked before. */
1283 check = new_to_type != to_type && from_type == to_type;
1284 to_type = new_to_type;
1289 case R_X86_64_TLSLD:
1290 if (bfd_link_executable (info))
1291 to_type = R_X86_64_TPOFF32;
1298 /* Return TRUE if there is no transition. */
1299 if (from_type == to_type)
1302 /* Check if the transition can be performed. */
1304 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1305 symtab_hdr, sym_hashes,
1306 from_type, rel, relend))
1308 reloc_howto_type *from, *to;
1311 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1312 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1315 name = h->root.root.string;
1318 struct elf_x86_link_hash_table *htab;
1320 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1325 Elf_Internal_Sym *isym;
1327 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1329 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1334 /* xgettext:c-format */
1335 (_("%pB: TLS transition from %s to %s against `%s' at %#Lx "
1336 "in section `%pA' failed"),
1337 abfd, from->name, to->name, name, rel->r_offset, sec);
1338 bfd_set_error (bfd_error_bad_value);
1346 /* Rename some of the generic section flags to better document how they
1348 #define check_relocs_failed sec_flg0
1351 elf_x86_64_need_pic (struct bfd_link_info *info,
1352 bfd *input_bfd, asection *sec,
1353 struct elf_link_hash_entry *h,
1354 Elf_Internal_Shdr *symtab_hdr,
1355 Elf_Internal_Sym *isym,
1356 reloc_howto_type *howto)
1359 const char *und = "";
1360 const char *pic = "";
1366 name = h->root.root.string;
1367 switch (ELF_ST_VISIBILITY (h->other))
1370 v = _("hidden symbol ");
1373 v = _("internal symbol ");
1376 v = _("protected symbol ");
1379 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1380 v = _("protected symbol ");
1383 pic = _("; recompile with -fPIC");
1387 if (!h->def_regular && !h->def_dynamic)
1388 und = _("undefined ");
1392 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1393 pic = _("; recompile with -fPIC");
1396 if (bfd_link_dll (info))
1397 object = _("a shared object");
1398 else if (bfd_link_pie (info))
1399 object = _("a PIE object");
1401 object = _("a PDE object");
1403 /* xgettext:c-format */
1404 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1405 "not be used when making %s%s"),
1406 input_bfd, howto->name, und, v, name,
1408 bfd_set_error (bfd_error_bad_value);
1409 sec->check_relocs_failed = 1;
1413 /* With the local symbol, foo, we convert
1414 mov foo@GOTPCREL(%rip), %reg
1418 call/jmp *foo@GOTPCREL(%rip)
1420 nop call foo/jmp foo nop
1421 When PIC is false, convert
1422 test %reg, foo@GOTPCREL(%rip)
1426 binop foo@GOTPCREL(%rip), %reg
1429 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1433 elf_x86_64_convert_load_reloc (bfd *abfd,
1435 unsigned int *r_type_p,
1436 Elf_Internal_Rela *irel,
1437 struct elf_link_hash_entry *h,
1438 bfd_boolean *converted,
1439 struct bfd_link_info *link_info)
1441 struct elf_x86_link_hash_table *htab;
1443 bfd_boolean no_overflow;
1445 bfd_boolean to_reloc_pc32;
1447 bfd_signed_vma raddend;
1448 unsigned int opcode;
1450 unsigned int r_type = *r_type_p;
1451 unsigned int r_symndx;
1452 bfd_vma roff = irel->r_offset;
1454 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1457 raddend = irel->r_addend;
1458 /* Addend for 32-bit PC-relative relocation must be -4. */
1462 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1463 is_pic = bfd_link_pic (link_info);
1465 relocx = (r_type == R_X86_64_GOTPCRELX
1466 || r_type == R_X86_64_REX_GOTPCRELX);
1468 /* TRUE if --no-relax is used. */
1469 no_overflow = link_info->disable_target_specific_optimizations > 1;
1471 r_symndx = htab->r_sym (irel->r_info);
1473 opcode = bfd_get_8 (abfd, contents + roff - 2);
1475 /* Convert mov to lea since it has been done for a while. */
1478 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1479 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1480 test, xor instructions. */
1485 /* We convert only to R_X86_64_PC32:
1487 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1488 3. no_overflow is true.
1491 to_reloc_pc32 = (opcode == 0xff
1496 /* Get the symbol referred to by the reloc. */
1499 Elf_Internal_Sym *isym
1500 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1502 /* Skip relocation against undefined symbols. */
1503 if (isym->st_shndx == SHN_UNDEF)
1506 if (isym->st_shndx == SHN_ABS)
1507 tsec = bfd_abs_section_ptr;
1508 else if (isym->st_shndx == SHN_COMMON)
1509 tsec = bfd_com_section_ptr;
1510 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1511 tsec = &_bfd_elf_large_com_section;
1513 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1517 /* Undefined weak symbol is only bound locally in executable
1518 and its reference is resolved as 0 without relocation
1519 overflow. We can only perform this optimization for
1520 GOTPCRELX relocations since we need to modify REX byte.
1521 It is OK convert mov with R_X86_64_GOTPCREL to
1523 bfd_boolean local_ref;
1524 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1526 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1527 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1528 if ((relocx || opcode == 0x8b)
1529 && (h->root.type == bfd_link_hash_undefweak
1535 /* Skip for branch instructions since R_X86_64_PC32
1542 /* For non-branch instructions, we can convert to
1543 R_X86_64_32/R_X86_64_32S since we know if there
1545 to_reloc_pc32 = FALSE;
1548 /* Since we don't know the current PC when PIC is true,
1549 we can't convert to R_X86_64_PC32. */
1550 if (to_reloc_pc32 && is_pic)
1555 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1556 ld.so may use its link-time address. */
1557 else if (h->start_stop
1560 || h->root.type == bfd_link_hash_defined
1561 || h->root.type == bfd_link_hash_defweak)
1562 && h != htab->elf.hdynamic
1565 /* bfd_link_hash_new or bfd_link_hash_undefined is
1566 set by an assignment in a linker script in
1567 bfd_elf_record_link_assignment. start_stop is set
1568 on __start_SECNAME/__stop_SECNAME which mark section
1573 && (h->root.type == bfd_link_hash_new
1574 || h->root.type == bfd_link_hash_undefined
1575 || ((h->root.type == bfd_link_hash_defined
1576 || h->root.type == bfd_link_hash_defweak)
1577 && h->root.u.def.section == bfd_und_section_ptr))))
1579 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1584 tsec = h->root.u.def.section;
1590 /* Don't convert GOTPCREL relocation against large section. */
1591 if (elf_section_data (tsec) != NULL
1592 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1595 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1602 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1607 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1609 modrm = bfd_get_8 (abfd, contents + roff - 1);
1612 /* Convert to "jmp foo nop". */
1615 nop_offset = irel->r_offset + 3;
1616 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1617 irel->r_offset -= 1;
1618 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1622 struct elf_x86_link_hash_entry *eh
1623 = (struct elf_x86_link_hash_entry *) h;
1625 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1628 /* To support TLS optimization, always use addr32 prefix for
1629 "call *__tls_get_addr@GOTPCREL(%rip)". */
1630 if (eh && eh->tls_get_addr)
1633 nop_offset = irel->r_offset - 2;
1637 nop = link_info->call_nop_byte;
1638 if (link_info->call_nop_as_suffix)
1640 nop_offset = irel->r_offset + 3;
1641 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1642 irel->r_offset -= 1;
1643 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1646 nop_offset = irel->r_offset - 2;
1649 bfd_put_8 (abfd, nop, contents + nop_offset);
1650 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1651 r_type = R_X86_64_PC32;
1656 unsigned int rex_mask = REX_R;
1658 if (r_type == R_X86_64_REX_GOTPCRELX)
1659 rex = bfd_get_8 (abfd, contents + roff - 3);
1667 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1668 "lea foo(%rip), %reg". */
1670 r_type = R_X86_64_PC32;
1674 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1675 "mov $foo, %reg". */
1677 modrm = bfd_get_8 (abfd, contents + roff - 1);
1678 modrm = 0xc0 | (modrm & 0x38) >> 3;
1679 if ((rex & REX_W) != 0
1680 && ABI_64_P (link_info->output_bfd))
1682 /* Keep the REX_W bit in REX byte for LP64. */
1683 r_type = R_X86_64_32S;
1684 goto rewrite_modrm_rex;
1688 /* If the REX_W bit in REX byte isn't needed,
1689 use R_X86_64_32 and clear the W bit to avoid
1690 sign-extend imm32 to imm64. */
1691 r_type = R_X86_64_32;
1692 /* Clear the W bit in REX byte. */
1694 goto rewrite_modrm_rex;
1700 /* R_X86_64_PC32 isn't supported. */
1704 modrm = bfd_get_8 (abfd, contents + roff - 1);
1707 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1708 "test $foo, %reg". */
1709 modrm = 0xc0 | (modrm & 0x38) >> 3;
1714 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1715 "binop $foo, %reg". */
1716 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1720 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1721 overflow when sign-extending imm32 to imm64. */
1722 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1725 bfd_put_8 (abfd, modrm, contents + roff - 1);
1729 /* Move the R bit to the B bit in REX byte. */
1730 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1731 bfd_put_8 (abfd, rex, contents + roff - 3);
1734 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1738 bfd_put_8 (abfd, opcode, contents + roff - 2);
1742 irel->r_info = htab->r_info (r_symndx,
1743 r_type | R_X86_64_converted_reloc_bit);
1750 /* Look through the relocs for a section during the first phase, and
1751 calculate needed space in the global offset table, procedure
1752 linkage table, and dynamic reloc sections. */
1755 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1757 const Elf_Internal_Rela *relocs)
1759 struct elf_x86_link_hash_table *htab;
1760 Elf_Internal_Shdr *symtab_hdr;
1761 struct elf_link_hash_entry **sym_hashes;
1762 const Elf_Internal_Rela *rel;
1763 const Elf_Internal_Rela *rel_end;
1766 bfd_boolean converted;
1768 if (bfd_link_relocatable (info))
1771 /* Don't do anything special with non-loaded, non-alloced sections.
1772 In particular, any relocs in such sections should not affect GOT
1773 and PLT reference counting (ie. we don't allow them to create GOT
1774 or PLT entries), there's no possibility or desire to optimize TLS
1775 relocs, and there's not much point in propagating relocs to shared
1776 libs that the dynamic linker won't relocate. */
1777 if ((sec->flags & SEC_ALLOC) == 0)
1780 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1783 sec->check_relocs_failed = 1;
1787 BFD_ASSERT (is_x86_elf (abfd, htab));
1789 /* Get the section contents. */
1790 if (elf_section_data (sec)->this_hdr.contents != NULL)
1791 contents = elf_section_data (sec)->this_hdr.contents;
1792 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1794 sec->check_relocs_failed = 1;
1798 symtab_hdr = &elf_symtab_hdr (abfd);
1799 sym_hashes = elf_sym_hashes (abfd);
1805 rel_end = relocs + sec->reloc_count;
1806 for (rel = relocs; rel < rel_end; rel++)
1808 unsigned int r_type;
1809 unsigned int r_symndx;
1810 struct elf_link_hash_entry *h;
1811 struct elf_x86_link_hash_entry *eh;
1812 Elf_Internal_Sym *isym;
1814 bfd_boolean size_reloc;
1815 bfd_boolean converted_reloc;
1817 r_symndx = htab->r_sym (rel->r_info);
1818 r_type = ELF32_R_TYPE (rel->r_info);
1820 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1822 /* xgettext:c-format */
1823 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1828 if (r_symndx < symtab_hdr->sh_info)
1830 /* A local symbol. */
1831 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1836 /* Check relocation against local STT_GNU_IFUNC symbol. */
1837 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1839 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1844 /* Fake a STT_GNU_IFUNC symbol. */
1845 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1847 h->type = STT_GNU_IFUNC;
1850 h->forced_local = 1;
1851 h->root.type = bfd_link_hash_defined;
1859 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1860 while (h->root.type == bfd_link_hash_indirect
1861 || h->root.type == bfd_link_hash_warning)
1862 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1865 /* Check invalid x32 relocations. */
1866 if (!ABI_64_P (abfd))
1872 case R_X86_64_DTPOFF64:
1873 case R_X86_64_TPOFF64:
1875 case R_X86_64_GOTOFF64:
1876 case R_X86_64_GOT64:
1877 case R_X86_64_GOTPCREL64:
1878 case R_X86_64_GOTPC64:
1879 case R_X86_64_GOTPLT64:
1880 case R_X86_64_PLTOFF64:
1883 name = h->root.root.string;
1885 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1888 /* xgettext:c-format */
1889 (_("%pB: relocation %s against symbol `%s' isn't "
1890 "supported in x32 mode"), abfd,
1891 x86_64_elf_howto_table[r_type].name, name);
1892 bfd_set_error (bfd_error_bad_value);
1900 /* It is referenced by a non-shared object. */
1903 if (h->type == STT_GNU_IFUNC)
1904 elf_tdata (info->output_bfd)->has_gnu_symbols
1905 |= elf_gnu_symbol_ifunc;
1908 converted_reloc = FALSE;
1909 if ((r_type == R_X86_64_GOTPCREL
1910 || r_type == R_X86_64_GOTPCRELX
1911 || r_type == R_X86_64_REX_GOTPCRELX)
1912 && (h == NULL || h->type != STT_GNU_IFUNC))
1914 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1915 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1916 irel, h, &converted_reloc,
1920 if (converted_reloc)
1924 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1925 symtab_hdr, sym_hashes,
1926 &r_type, GOT_UNKNOWN,
1927 rel, rel_end, h, r_symndx, FALSE))
1930 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1931 if (h == htab->elf.hgot)
1932 htab->got_referenced = TRUE;
1934 eh = (struct elf_x86_link_hash_entry *) h;
1937 case R_X86_64_TLSLD:
1938 htab->tls_ld_or_ldm_got.refcount = 1;
1941 case R_X86_64_TPOFF32:
1942 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1943 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1944 &x86_64_elf_howto_table[r_type]);
1946 eh->zero_undefweak &= 0x2;
1949 case R_X86_64_GOTTPOFF:
1950 if (!bfd_link_executable (info))
1951 info->flags |= DF_STATIC_TLS;
1954 case R_X86_64_GOT32:
1955 case R_X86_64_GOTPCREL:
1956 case R_X86_64_GOTPCRELX:
1957 case R_X86_64_REX_GOTPCRELX:
1958 case R_X86_64_TLSGD:
1959 case R_X86_64_GOT64:
1960 case R_X86_64_GOTPCREL64:
1961 case R_X86_64_GOTPLT64:
1962 case R_X86_64_GOTPC32_TLSDESC:
1963 case R_X86_64_TLSDESC_CALL:
1964 /* This symbol requires a global offset table entry. */
1966 int tls_type, old_tls_type;
1970 default: tls_type = GOT_NORMAL; break;
1971 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1972 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1973 case R_X86_64_GOTPC32_TLSDESC:
1974 case R_X86_64_TLSDESC_CALL:
1975 tls_type = GOT_TLS_GDESC; break;
1980 h->got.refcount = 1;
1981 old_tls_type = eh->tls_type;
1985 bfd_signed_vma *local_got_refcounts;
1987 /* This is a global offset table entry for a local symbol. */
1988 local_got_refcounts = elf_local_got_refcounts (abfd);
1989 if (local_got_refcounts == NULL)
1993 size = symtab_hdr->sh_info;
1994 size *= sizeof (bfd_signed_vma)
1995 + sizeof (bfd_vma) + sizeof (char);
1996 local_got_refcounts = ((bfd_signed_vma *)
1997 bfd_zalloc (abfd, size));
1998 if (local_got_refcounts == NULL)
2000 elf_local_got_refcounts (abfd) = local_got_refcounts;
2001 elf_x86_local_tlsdesc_gotent (abfd)
2002 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2003 elf_x86_local_got_tls_type (abfd)
2004 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2006 local_got_refcounts[r_symndx] = 1;
2008 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2011 /* If a TLS symbol is accessed using IE at least once,
2012 there is no point to use dynamic model for it. */
2013 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2014 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2015 || tls_type != GOT_TLS_IE))
2017 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2018 tls_type = old_tls_type;
2019 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2020 && GOT_TLS_GD_ANY_P (tls_type))
2021 tls_type |= old_tls_type;
2025 name = h->root.root.string;
2027 name = bfd_elf_sym_name (abfd, symtab_hdr,
2030 /* xgettext:c-format */
2031 (_("%pB: '%s' accessed both as normal and"
2032 " thread local symbol"),
2034 bfd_set_error (bfd_error_bad_value);
2039 if (old_tls_type != tls_type)
2042 eh->tls_type = tls_type;
2044 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2049 case R_X86_64_GOTOFF64:
2050 case R_X86_64_GOTPC32:
2051 case R_X86_64_GOTPC64:
2054 eh->zero_undefweak &= 0x2;
2057 case R_X86_64_PLT32:
2058 case R_X86_64_PLT32_BND:
2059 /* This symbol requires a procedure linkage table entry. We
2060 actually build the entry in adjust_dynamic_symbol,
2061 because this might be a case of linking PIC code which is
2062 never referenced by a dynamic object, in which case we
2063 don't need to generate a procedure linkage table entry
2066 /* If this is a local symbol, we resolve it directly without
2067 creating a procedure linkage table entry. */
2071 eh->zero_undefweak &= 0x2;
2073 h->plt.refcount = 1;
2076 case R_X86_64_PLTOFF64:
2077 /* This tries to form the 'address' of a function relative
2078 to GOT. For global symbols we need a PLT entry. */
2082 h->plt.refcount = 1;
2086 case R_X86_64_SIZE32:
2087 case R_X86_64_SIZE64:
2092 if (!ABI_64_P (abfd))
2098 /* Check relocation overflow as these relocs may lead to
2099 run-time relocation overflow. Don't error out for
2100 sections we don't care about, such as debug sections or
2101 when relocation overflow check is disabled. */
2102 if (!info->no_reloc_overflow_check
2104 && (bfd_link_pic (info)
2105 || (bfd_link_executable (info)
2109 && (sec->flags & SEC_READONLY) == 0)))
2110 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2111 &x86_64_elf_howto_table[r_type]);
2117 case R_X86_64_PC32_BND:
2121 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2122 eh->zero_undefweak |= 0x2;
2123 /* We are called after all symbols have been resolved. Only
2124 relocation against STT_GNU_IFUNC symbol must go through
2127 && (bfd_link_executable (info)
2128 || h->type == STT_GNU_IFUNC))
2130 bfd_boolean func_pointer_ref = FALSE;
2132 if (r_type == R_X86_64_PC32)
2134 /* Since something like ".long foo - ." may be used
2135 as pointer, make sure that PLT is used if foo is
2136 a function defined in a shared library. */
2137 if ((sec->flags & SEC_CODE) == 0)
2139 h->pointer_equality_needed = 1;
2140 if (bfd_link_pie (info)
2141 && h->type == STT_FUNC
2146 h->plt.refcount = 1;
2150 else if (r_type != R_X86_64_PC32_BND
2151 && r_type != R_X86_64_PC64)
2153 h->pointer_equality_needed = 1;
2154 /* At run-time, R_X86_64_64 can be resolved for both
2155 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2156 can only be resolved for x32. */
2157 if ((sec->flags & SEC_READONLY) == 0
2158 && (r_type == R_X86_64_64
2159 || (!ABI_64_P (abfd)
2160 && (r_type == R_X86_64_32
2161 || r_type == R_X86_64_32S))))
2162 func_pointer_ref = TRUE;
2165 if (!func_pointer_ref)
2167 /* If this reloc is in a read-only section, we might
2168 need a copy reloc. We can't check reliably at this
2169 stage whether the section is read-only, as input
2170 sections have not yet been mapped to output sections.
2171 Tentatively set the flag for now, and correct in
2172 adjust_dynamic_symbol. */
2175 /* We may need a .plt entry if the symbol is a function
2176 defined in a shared lib or is a function referenced
2177 from the code or read-only section. */
2179 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2180 h->plt.refcount = 1;
2186 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2187 htab->pointer_r_type))
2189 struct elf_dyn_relocs *p;
2190 struct elf_dyn_relocs **head;
2192 /* We must copy these reloc types into the output file.
2193 Create a reloc section in dynobj and make room for
2197 sreloc = _bfd_elf_make_dynamic_reloc_section
2198 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2199 abfd, /*rela?*/ TRUE);
2205 /* If this is a global symbol, we count the number of
2206 relocations we need for this symbol. */
2208 head = &eh->dyn_relocs;
2211 /* Track dynamic relocs needed for local syms too.
2212 We really need local syms available to do this
2217 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2222 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2226 /* Beware of type punned pointers vs strict aliasing
2228 vpp = &(elf_section_data (s)->local_dynrel);
2229 head = (struct elf_dyn_relocs **)vpp;
2233 if (p == NULL || p->sec != sec)
2235 bfd_size_type amt = sizeof *p;
2237 p = ((struct elf_dyn_relocs *)
2238 bfd_alloc (htab->elf.dynobj, amt));
2249 /* Count size relocation as PC-relative relocation. */
2250 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2255 /* This relocation describes the C++ object vtable hierarchy.
2256 Reconstruct it for later use during GC. */
2257 case R_X86_64_GNU_VTINHERIT:
2258 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2262 /* This relocation describes which C++ vtable entries are actually
2263 used. Record for later use during GC. */
2264 case R_X86_64_GNU_VTENTRY:
2265 BFD_ASSERT (h != NULL);
2267 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2276 if (elf_section_data (sec)->this_hdr.contents != contents)
2278 if (!converted && !info->keep_memory)
2282 /* Cache the section contents for elf_link_input_bfd if any
2283 load is converted or --no-keep-memory isn't used. */
2284 elf_section_data (sec)->this_hdr.contents = contents;
2288 /* Cache relocations if any load is converted. */
2289 if (elf_section_data (sec)->relocs != relocs && converted)
2290 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2295 if (elf_section_data (sec)->this_hdr.contents != contents)
2297 sec->check_relocs_failed = 1;
2301 /* Return the relocation value for @tpoff relocation
2302 if STT_TLS virtual address is ADDRESS. */
2305 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2307 struct elf_link_hash_table *htab = elf_hash_table (info);
2308 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2309 bfd_vma static_tls_size;
2311 /* If tls_segment is NULL, we should have signalled an error already. */
2312 if (htab->tls_sec == NULL)
2315 /* Consider special static TLS alignment requirements. */
2316 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2317 return address - static_tls_size - htab->tls_sec->vma;
2320 /* Relocate an x86_64 ELF section. */
2323 elf_x86_64_relocate_section (bfd *output_bfd,
2324 struct bfd_link_info *info,
2326 asection *input_section,
2328 Elf_Internal_Rela *relocs,
2329 Elf_Internal_Sym *local_syms,
2330 asection **local_sections)
2332 struct elf_x86_link_hash_table *htab;
2333 Elf_Internal_Shdr *symtab_hdr;
2334 struct elf_link_hash_entry **sym_hashes;
2335 bfd_vma *local_got_offsets;
2336 bfd_vma *local_tlsdesc_gotents;
2337 Elf_Internal_Rela *rel;
2338 Elf_Internal_Rela *wrel;
2339 Elf_Internal_Rela *relend;
2340 unsigned int plt_entry_size;
2342 /* Skip if check_relocs failed. */
2343 if (input_section->check_relocs_failed)
2346 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2350 BFD_ASSERT (is_x86_elf (input_bfd, htab));
2352 plt_entry_size = htab->plt.plt_entry_size;
2353 symtab_hdr = &elf_symtab_hdr (input_bfd);
2354 sym_hashes = elf_sym_hashes (input_bfd);
2355 local_got_offsets = elf_local_got_offsets (input_bfd);
2356 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2358 _bfd_x86_elf_set_tls_module_base (info);
2360 rel = wrel = relocs;
2361 relend = relocs + input_section->reloc_count;
2362 for (; rel < relend; wrel++, rel++)
2364 unsigned int r_type, r_type_tls;
2365 reloc_howto_type *howto;
2366 unsigned long r_symndx;
2367 struct elf_link_hash_entry *h;
2368 struct elf_x86_link_hash_entry *eh;
2369 Elf_Internal_Sym *sym;
2371 bfd_vma off, offplt, plt_offset;
2373 bfd_boolean unresolved_reloc;
2374 bfd_reloc_status_type r;
2376 asection *base_got, *resolved_plt;
2378 bfd_boolean resolved_to_zero;
2379 bfd_boolean relative_reloc;
2380 bfd_boolean converted_reloc;
2381 bfd_boolean need_copy_reloc_in_pie;
2383 r_type = ELF32_R_TYPE (rel->r_info);
2384 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2385 || r_type == (int) R_X86_64_GNU_VTENTRY)
2392 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2393 r_type &= ~R_X86_64_converted_reloc_bit;
2395 if (r_type >= (int) R_X86_64_standard)
2396 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2398 if (r_type != (int) R_X86_64_32
2399 || ABI_64_P (output_bfd))
2400 howto = x86_64_elf_howto_table + r_type;
2402 howto = (x86_64_elf_howto_table
2403 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
2404 r_symndx = htab->r_sym (rel->r_info);
2408 unresolved_reloc = FALSE;
2409 if (r_symndx < symtab_hdr->sh_info)
2411 sym = local_syms + r_symndx;
2412 sec = local_sections[r_symndx];
2414 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2416 st_size = sym->st_size;
2418 /* Relocate against local STT_GNU_IFUNC symbol. */
2419 if (!bfd_link_relocatable (info)
2420 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2422 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2427 /* Set STT_GNU_IFUNC symbol value. */
2428 h->root.u.def.value = sym->st_value;
2429 h->root.u.def.section = sec;
2434 bfd_boolean warned ATTRIBUTE_UNUSED;
2435 bfd_boolean ignored ATTRIBUTE_UNUSED;
2437 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2438 r_symndx, symtab_hdr, sym_hashes,
2440 unresolved_reloc, warned, ignored);
2444 if (sec != NULL && discarded_section (sec))
2446 _bfd_clear_contents (howto, input_bfd, input_section,
2447 contents + rel->r_offset);
2448 wrel->r_offset = rel->r_offset;
2452 /* For ld -r, remove relocations in debug sections against
2453 sections defined in discarded sections. Not done for
2454 eh_frame editing code expects to be present. */
2455 if (bfd_link_relocatable (info)
2456 && (input_section->flags & SEC_DEBUGGING))
2462 if (bfd_link_relocatable (info))
2469 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2471 if (r_type == R_X86_64_64)
2473 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2474 zero-extend it to 64bit if addend is zero. */
2475 r_type = R_X86_64_32;
2476 memset (contents + rel->r_offset + 4, 0, 4);
2478 else if (r_type == R_X86_64_SIZE64)
2480 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2481 zero-extend it to 64bit if addend is zero. */
2482 r_type = R_X86_64_SIZE32;
2483 memset (contents + rel->r_offset + 4, 0, 4);
2487 eh = (struct elf_x86_link_hash_entry *) h;
2489 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2490 it here if it is defined in a non-shared object. */
2492 && h->type == STT_GNU_IFUNC
2498 if ((input_section->flags & SEC_ALLOC) == 0)
2500 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2501 sections because such sections are not SEC_ALLOC and
2502 thus ld.so will not process them. */
2503 if ((input_section->flags & SEC_DEBUGGING) != 0)
2513 case R_X86_64_GOTPCREL:
2514 case R_X86_64_GOTPCRELX:
2515 case R_X86_64_REX_GOTPCRELX:
2516 case R_X86_64_GOTPCREL64:
2517 base_got = htab->elf.sgot;
2518 off = h->got.offset;
2520 if (base_got == NULL)
2523 if (off == (bfd_vma) -1)
2525 /* We can't use h->got.offset here to save state, or
2526 even just remember the offset, as finish_dynamic_symbol
2527 would use that as offset into .got. */
2529 if (h->plt.offset == (bfd_vma) -1)
2532 if (htab->elf.splt != NULL)
2534 plt_index = (h->plt.offset / plt_entry_size
2535 - htab->plt.has_plt0);
2536 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2537 base_got = htab->elf.sgotplt;
2541 plt_index = h->plt.offset / plt_entry_size;
2542 off = plt_index * GOT_ENTRY_SIZE;
2543 base_got = htab->elf.igotplt;
2546 if (h->dynindx == -1
2550 /* This references the local defitionion. We must
2551 initialize this entry in the global offset table.
2552 Since the offset must always be a multiple of 8,
2553 we use the least significant bit to record
2554 whether we have initialized it already.
2556 When doing a dynamic link, we create a .rela.got
2557 relocation entry to initialize the value. This
2558 is done in the finish_dynamic_symbol routine. */
2563 bfd_put_64 (output_bfd, relocation,
2564 base_got->contents + off);
2565 /* Note that this is harmless for the GOTPLT64
2566 case, as -1 | 1 still is -1. */
2572 relocation = (base_got->output_section->vma
2573 + base_got->output_offset + off);
2578 if (h->plt.offset == (bfd_vma) -1)
2580 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2581 if (r_type == htab->pointer_r_type
2582 && (input_section->flags & SEC_CODE) == 0)
2583 goto do_ifunc_pointer;
2584 goto bad_ifunc_reloc;
2587 /* STT_GNU_IFUNC symbol must go through PLT. */
2588 if (htab->elf.splt != NULL)
2590 if (htab->plt_second != NULL)
2592 resolved_plt = htab->plt_second;
2593 plt_offset = eh->plt_second.offset;
2597 resolved_plt = htab->elf.splt;
2598 plt_offset = h->plt.offset;
2603 resolved_plt = htab->elf.iplt;
2604 plt_offset = h->plt.offset;
2607 relocation = (resolved_plt->output_section->vma
2608 + resolved_plt->output_offset + plt_offset);
2614 if (h->root.root.string)
2615 name = h->root.root.string;
2617 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2620 /* xgettext:c-format */
2621 (_("%pB: relocation %s against STT_GNU_IFUNC "
2622 "symbol `%s' isn't supported"), input_bfd,
2624 bfd_set_error (bfd_error_bad_value);
2628 if (bfd_link_pic (info))
2633 if (ABI_64_P (output_bfd))
2638 if (rel->r_addend != 0)
2640 if (h->root.root.string)
2641 name = h->root.root.string;
2643 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2646 /* xgettext:c-format */
2647 (_("%pB: relocation %s against STT_GNU_IFUNC "
2648 "symbol `%s' has non-zero addend: %Ld"),
2649 input_bfd, howto->name, name, rel->r_addend);
2650 bfd_set_error (bfd_error_bad_value);
2654 /* Generate dynamic relcoation only when there is a
2655 non-GOT reference in a shared object or there is no
2657 if ((bfd_link_pic (info) && h->non_got_ref)
2658 || h->plt.offset == (bfd_vma) -1)
2660 Elf_Internal_Rela outrel;
2663 /* Need a dynamic relocation to get the real function
2665 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2669 if (outrel.r_offset == (bfd_vma) -1
2670 || outrel.r_offset == (bfd_vma) -2)
2673 outrel.r_offset += (input_section->output_section->vma
2674 + input_section->output_offset);
2676 if (POINTER_LOCAL_IFUNC_P (info, h))
2678 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2679 h->root.root.string,
2680 h->root.u.def.section->owner);
2682 /* This symbol is resolved locally. */
2683 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2684 outrel.r_addend = (h->root.u.def.value
2685 + h->root.u.def.section->output_section->vma
2686 + h->root.u.def.section->output_offset);
2690 outrel.r_info = htab->r_info (h->dynindx, r_type);
2691 outrel.r_addend = 0;
2694 /* Dynamic relocations are stored in
2695 1. .rela.ifunc section in PIC object.
2696 2. .rela.got section in dynamic executable.
2697 3. .rela.iplt section in static executable. */
2698 if (bfd_link_pic (info))
2699 sreloc = htab->elf.irelifunc;
2700 else if (htab->elf.splt != NULL)
2701 sreloc = htab->elf.srelgot;
2703 sreloc = htab->elf.irelplt;
2704 elf_append_rela (output_bfd, sreloc, &outrel);
2706 /* If this reloc is against an external symbol, we
2707 do not want to fiddle with the addend. Otherwise,
2708 we need to include the symbol value so that it
2709 becomes an addend for the dynamic reloc. For an
2710 internal symbol, we have updated addend. */
2715 case R_X86_64_PC32_BND:
2717 case R_X86_64_PLT32:
2718 case R_X86_64_PLT32_BND:
2723 resolved_to_zero = (eh != NULL
2724 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2726 /* When generating a shared object, the relocations handled here are
2727 copied into the output file to be resolved at run time. */
2730 case R_X86_64_GOT32:
2731 case R_X86_64_GOT64:
2732 /* Relocation is to the entry for this symbol in the global
2734 case R_X86_64_GOTPCREL:
2735 case R_X86_64_GOTPCRELX:
2736 case R_X86_64_REX_GOTPCRELX:
2737 case R_X86_64_GOTPCREL64:
2738 /* Use global offset table entry as symbol value. */
2739 case R_X86_64_GOTPLT64:
2740 /* This is obsolete and treated the same as GOT64. */
2741 base_got = htab->elf.sgot;
2743 if (htab->elf.sgot == NULL)
2746 relative_reloc = FALSE;
2749 off = h->got.offset;
2751 && h->plt.offset != (bfd_vma)-1
2752 && off == (bfd_vma)-1)
2754 /* We can't use h->got.offset here to save
2755 state, or even just remember the offset, as
2756 finish_dynamic_symbol would use that as offset into
2758 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2759 - htab->plt.has_plt0);
2760 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2761 base_got = htab->elf.sgotplt;
2764 if (RESOLVED_LOCALLY_P (info, h, htab))
2766 /* We must initialize this entry in the global offset
2767 table. Since the offset must always be a multiple
2768 of 8, we use the least significant bit to record
2769 whether we have initialized it already.
2771 When doing a dynamic link, we create a .rela.got
2772 relocation entry to initialize the value. This is
2773 done in the finish_dynamic_symbol routine. */
2778 bfd_put_64 (output_bfd, relocation,
2779 base_got->contents + off);
2780 /* Note that this is harmless for the GOTPLT64 case,
2781 as -1 | 1 still is -1. */
2784 if (GENERATE_RELATIVE_RELOC_P (info, h))
2786 /* If this symbol isn't dynamic in PIC,
2787 generate R_X86_64_RELATIVE here. */
2788 eh->no_finish_dynamic_symbol = 1;
2789 relative_reloc = TRUE;
2794 unresolved_reloc = FALSE;
2798 if (local_got_offsets == NULL)
2801 off = local_got_offsets[r_symndx];
2803 /* The offset must always be a multiple of 8. We use
2804 the least significant bit to record whether we have
2805 already generated the necessary reloc. */
2810 bfd_put_64 (output_bfd, relocation,
2811 base_got->contents + off);
2812 local_got_offsets[r_symndx] |= 1;
2814 if (bfd_link_pic (info))
2815 relative_reloc = TRUE;
2822 Elf_Internal_Rela outrel;
2824 /* We need to generate a R_X86_64_RELATIVE reloc
2825 for the dynamic linker. */
2826 s = htab->elf.srelgot;
2830 outrel.r_offset = (base_got->output_section->vma
2831 + base_got->output_offset
2833 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2834 outrel.r_addend = relocation;
2835 elf_append_rela (output_bfd, s, &outrel);
2838 if (off >= (bfd_vma) -2)
2841 relocation = base_got->output_section->vma
2842 + base_got->output_offset + off;
2843 if (r_type != R_X86_64_GOTPCREL
2844 && r_type != R_X86_64_GOTPCRELX
2845 && r_type != R_X86_64_REX_GOTPCRELX
2846 && r_type != R_X86_64_GOTPCREL64)
2847 relocation -= htab->elf.sgotplt->output_section->vma
2848 - htab->elf.sgotplt->output_offset;
2852 case R_X86_64_GOTOFF64:
2853 /* Relocation is relative to the start of the global offset
2856 /* Check to make sure it isn't a protected function or data
2857 symbol for shared library since it may not be local when
2858 used as function address or with copy relocation. We also
2859 need to make sure that a symbol is referenced locally. */
2860 if (bfd_link_pic (info) && h)
2862 if (!h->def_regular)
2866 switch (ELF_ST_VISIBILITY (h->other))
2869 v = _("hidden symbol");
2872 v = _("internal symbol");
2875 v = _("protected symbol");
2883 /* xgettext:c-format */
2884 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
2885 " `%s' can not be used when making a shared object"),
2886 input_bfd, v, h->root.root.string);
2887 bfd_set_error (bfd_error_bad_value);
2890 else if (!bfd_link_executable (info)
2891 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2892 && (h->type == STT_FUNC
2893 || h->type == STT_OBJECT)
2894 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2897 /* xgettext:c-format */
2898 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
2899 " `%s' can not be used when making a shared object"),
2901 h->type == STT_FUNC ? "function" : "data",
2902 h->root.root.string);
2903 bfd_set_error (bfd_error_bad_value);
2908 /* Note that sgot is not involved in this
2909 calculation. We always want the start of .got.plt. If we
2910 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2911 permitted by the ABI, we might have to change this
2913 relocation -= htab->elf.sgotplt->output_section->vma
2914 + htab->elf.sgotplt->output_offset;
2917 case R_X86_64_GOTPC32:
2918 case R_X86_64_GOTPC64:
2919 /* Use global offset table as symbol value. */
2920 relocation = htab->elf.sgotplt->output_section->vma
2921 + htab->elf.sgotplt->output_offset;
2922 unresolved_reloc = FALSE;
2925 case R_X86_64_PLTOFF64:
2926 /* Relocation is PLT entry relative to GOT. For local
2927 symbols it's the symbol itself relative to GOT. */
2929 /* See PLT32 handling. */
2930 && (h->plt.offset != (bfd_vma) -1
2931 || eh->plt_got.offset != (bfd_vma) -1)
2932 && htab->elf.splt != NULL)
2934 if (eh->plt_got.offset != (bfd_vma) -1)
2936 /* Use the GOT PLT. */
2937 resolved_plt = htab->plt_got;
2938 plt_offset = eh->plt_got.offset;
2940 else if (htab->plt_second != NULL)
2942 resolved_plt = htab->plt_second;
2943 plt_offset = eh->plt_second.offset;
2947 resolved_plt = htab->elf.splt;
2948 plt_offset = h->plt.offset;
2951 relocation = (resolved_plt->output_section->vma
2952 + resolved_plt->output_offset
2954 unresolved_reloc = FALSE;
2957 relocation -= htab->elf.sgotplt->output_section->vma
2958 + htab->elf.sgotplt->output_offset;
2961 case R_X86_64_PLT32:
2962 case R_X86_64_PLT32_BND:
2963 /* Relocation is to the entry for this symbol in the
2964 procedure linkage table. */
2966 /* Resolve a PLT32 reloc against a local symbol directly,
2967 without using the procedure linkage table. */
2971 if ((h->plt.offset == (bfd_vma) -1
2972 && eh->plt_got.offset == (bfd_vma) -1)
2973 || htab->elf.splt == NULL)
2975 /* We didn't make a PLT entry for this symbol. This
2976 happens when statically linking PIC code, or when
2977 using -Bsymbolic. */
2982 if (h->plt.offset != (bfd_vma) -1)
2984 if (htab->plt_second != NULL)
2986 resolved_plt = htab->plt_second;
2987 plt_offset = eh->plt_second.offset;
2991 resolved_plt = htab->elf.splt;
2992 plt_offset = h->plt.offset;
2997 /* Use the GOT PLT. */
2998 resolved_plt = htab->plt_got;
2999 plt_offset = eh->plt_got.offset;
3002 relocation = (resolved_plt->output_section->vma
3003 + resolved_plt->output_offset
3005 unresolved_reloc = FALSE;
3008 case R_X86_64_SIZE32:
3009 case R_X86_64_SIZE64:
3010 /* Set to symbol size. */
3011 relocation = st_size;
3017 case R_X86_64_PC32_BND:
3018 /* Don't complain about -fPIC if the symbol is undefined when
3019 building executable unless it is unresolved weak symbol,
3020 references a dynamic definition in PIE or -z nocopyreloc
3022 if ((input_section->flags & SEC_ALLOC) != 0
3023 && (input_section->flags & SEC_READONLY) != 0
3025 && ((bfd_link_executable (info)
3026 && ((h->root.type == bfd_link_hash_undefweak
3027 && !resolved_to_zero)
3028 || (bfd_link_pie (info)
3031 || ((info->nocopyreloc
3032 || (eh->def_protected
3033 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3035 && !(h->root.u.def.section->flags & SEC_CODE))))
3036 || bfd_link_dll (info)))
3038 bfd_boolean fail = FALSE;
3039 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3041 /* Symbol is referenced locally. Make sure it is
3043 fail = !(h->def_regular || ELF_COMMON_DEF_P (h));
3045 else if (!(bfd_link_pie (info)
3046 && (h->needs_copy || eh->needs_copy)))
3048 /* Symbol doesn't need copy reloc and isn't referenced
3049 locally. Address of protected function may not be
3050 reachable at run-time. */
3051 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3052 || (ELF_ST_VISIBILITY (h->other) == STV_PROTECTED
3053 && h->type == STT_FUNC));
3057 return elf_x86_64_need_pic (info, input_bfd, input_section,
3058 h, NULL, NULL, howto);
3060 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3061 as function address. */
3063 && (input_section->flags & SEC_CODE) == 0
3064 && bfd_link_pie (info)
3065 && h->type == STT_FUNC
3076 /* FIXME: The ABI says the linker should make sure the value is
3077 the same when it's zeroextended to 64 bit. */
3080 if ((input_section->flags & SEC_ALLOC) == 0)
3083 need_copy_reloc_in_pie = (bfd_link_pie (info)
3088 == bfd_link_hash_undefined))
3089 && (X86_PCREL_TYPE_P (r_type)
3090 || X86_SIZE_TYPE_P (r_type)));
3092 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3093 need_copy_reloc_in_pie,
3094 resolved_to_zero, FALSE))
3096 Elf_Internal_Rela outrel;
3097 bfd_boolean skip, relocate;
3100 /* When generating a shared object, these relocations
3101 are copied into the output file to be resolved at run
3107 _bfd_elf_section_offset (output_bfd, info, input_section,
3109 if (outrel.r_offset == (bfd_vma) -1)
3111 else if (outrel.r_offset == (bfd_vma) -2)
3112 skip = TRUE, relocate = TRUE;
3114 outrel.r_offset += (input_section->output_section->vma
3115 + input_section->output_offset);
3118 memset (&outrel, 0, sizeof outrel);
3120 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3122 outrel.r_info = htab->r_info (h->dynindx, r_type);
3123 outrel.r_addend = rel->r_addend;
3127 /* This symbol is local, or marked to become local.
3128 When relocation overflow check is disabled, we
3129 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3130 if (r_type == htab->pointer_r_type
3131 || (r_type == R_X86_64_32
3132 && info->no_reloc_overflow_check))
3135 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3136 outrel.r_addend = relocation + rel->r_addend;
3138 else if (r_type == R_X86_64_64
3139 && !ABI_64_P (output_bfd))
3142 outrel.r_info = htab->r_info (0,
3143 R_X86_64_RELATIVE64);
3144 outrel.r_addend = relocation + rel->r_addend;
3145 /* Check addend overflow. */
3146 if ((outrel.r_addend & 0x80000000)
3147 != (rel->r_addend & 0x80000000))
3150 int addend = rel->r_addend;
3151 if (h && h->root.root.string)
3152 name = h->root.root.string;
3154 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3157 /* xgettext:c-format */
3158 (_("%pB: addend %s%#x in relocation %s against "
3159 "symbol `%s' at %#Lx in section `%pA' is "
3161 input_bfd, addend < 0 ? "-" : "", addend,
3162 howto->name, name, rel->r_offset, input_section);
3163 bfd_set_error (bfd_error_bad_value);
3171 if (bfd_is_abs_section (sec))
3173 else if (sec == NULL || sec->owner == NULL)
3175 bfd_set_error (bfd_error_bad_value);
3182 /* We are turning this relocation into one
3183 against a section symbol. It would be
3184 proper to subtract the symbol's value,
3185 osec->vma, from the emitted reloc addend,
3186 but ld.so expects buggy relocs. */
3187 osec = sec->output_section;
3188 sindx = elf_section_data (osec)->dynindx;
3191 asection *oi = htab->elf.text_index_section;
3192 sindx = elf_section_data (oi)->dynindx;
3194 BFD_ASSERT (sindx != 0);
3197 outrel.r_info = htab->r_info (sindx, r_type);
3198 outrel.r_addend = relocation + rel->r_addend;
3202 sreloc = elf_section_data (input_section)->sreloc;
3204 if (sreloc == NULL || sreloc->contents == NULL)
3206 r = bfd_reloc_notsupported;
3207 goto check_relocation_error;
3210 elf_append_rela (output_bfd, sreloc, &outrel);
3212 /* If this reloc is against an external symbol, we do
3213 not want to fiddle with the addend. Otherwise, we
3214 need to include the symbol value so that it becomes
3215 an addend for the dynamic reloc. */
3222 case R_X86_64_TLSGD:
3223 case R_X86_64_GOTPC32_TLSDESC:
3224 case R_X86_64_TLSDESC_CALL:
3225 case R_X86_64_GOTTPOFF:
3226 tls_type = GOT_UNKNOWN;
3227 if (h == NULL && local_got_offsets)
3228 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3230 tls_type = elf_x86_hash_entry (h)->tls_type;
3232 r_type_tls = r_type;
3233 if (! elf_x86_64_tls_transition (info, input_bfd,
3234 input_section, contents,
3235 symtab_hdr, sym_hashes,
3236 &r_type_tls, tls_type, rel,
3237 relend, h, r_symndx, TRUE))
3240 if (r_type_tls == R_X86_64_TPOFF32)
3242 bfd_vma roff = rel->r_offset;
3244 BFD_ASSERT (! unresolved_reloc);
3246 if (r_type == R_X86_64_TLSGD)
3248 /* GD->LE transition. For 64bit, change
3249 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3250 .word 0x6666; rex64; call __tls_get_addr@PLT
3252 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3254 call *__tls_get_addr@GOTPCREL(%rip)
3255 which may be converted to
3256 addr32 call __tls_get_addr
3259 leaq foo@tpoff(%rax), %rax
3261 leaq foo@tlsgd(%rip), %rdi
3262 .word 0x6666; rex64; call __tls_get_addr@PLT
3264 leaq foo@tlsgd(%rip), %rdi
3266 call *__tls_get_addr@GOTPCREL(%rip)
3267 which may be converted to
3268 addr32 call __tls_get_addr
3271 leaq foo@tpoff(%rax), %rax
3272 For largepic, change:
3273 leaq foo@tlsgd(%rip), %rdi
3274 movabsq $__tls_get_addr@pltoff, %rax
3279 leaq foo@tpoff(%rax), %rax
3280 nopw 0x0(%rax,%rax,1) */
3282 if (ABI_64_P (output_bfd))
3284 if (contents[roff + 5] == 0xb8)
3286 memcpy (contents + roff - 3,
3287 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3288 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3292 memcpy (contents + roff - 4,
3293 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3297 memcpy (contents + roff - 3,
3298 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3300 bfd_put_32 (output_bfd,
3301 elf_x86_64_tpoff (info, relocation),
3302 contents + roff + 8 + largepic);
3303 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3304 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3309 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3311 /* GDesc -> LE transition.
3312 It's originally something like:
3313 leaq x@tlsdesc(%rip), %rax
3316 movl $x@tpoff, %rax. */
3318 unsigned int val, type;
3320 type = bfd_get_8 (input_bfd, contents + roff - 3);
3321 val = bfd_get_8 (input_bfd, contents + roff - 1);
3322 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3323 contents + roff - 3);
3324 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3325 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3326 contents + roff - 1);
3327 bfd_put_32 (output_bfd,
3328 elf_x86_64_tpoff (info, relocation),
3332 else if (r_type == R_X86_64_TLSDESC_CALL)
3334 /* GDesc -> LE transition.
3339 bfd_put_8 (output_bfd, 0x66, contents + roff);
3340 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3343 else if (r_type == R_X86_64_GOTTPOFF)
3345 /* IE->LE transition:
3346 For 64bit, originally it can be one of:
3347 movq foo@gottpoff(%rip), %reg
3348 addq foo@gottpoff(%rip), %reg
3351 leaq foo(%reg), %reg
3353 For 32bit, originally it can be one of:
3354 movq foo@gottpoff(%rip), %reg
3355 addl foo@gottpoff(%rip), %reg
3358 leal foo(%reg), %reg
3361 unsigned int val, type, reg;
3364 val = bfd_get_8 (input_bfd, contents + roff - 3);
3367 type = bfd_get_8 (input_bfd, contents + roff - 2);
3368 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3374 bfd_put_8 (output_bfd, 0x49,
3375 contents + roff - 3);
3376 else if (!ABI_64_P (output_bfd) && val == 0x44)
3377 bfd_put_8 (output_bfd, 0x41,
3378 contents + roff - 3);
3379 bfd_put_8 (output_bfd, 0xc7,
3380 contents + roff - 2);
3381 bfd_put_8 (output_bfd, 0xc0 | reg,
3382 contents + roff - 1);
3386 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3389 bfd_put_8 (output_bfd, 0x49,
3390 contents + roff - 3);
3391 else if (!ABI_64_P (output_bfd) && val == 0x44)
3392 bfd_put_8 (output_bfd, 0x41,
3393 contents + roff - 3);
3394 bfd_put_8 (output_bfd, 0x81,
3395 contents + roff - 2);
3396 bfd_put_8 (output_bfd, 0xc0 | reg,
3397 contents + roff - 1);
3401 /* addq/addl -> leaq/leal */
3403 bfd_put_8 (output_bfd, 0x4d,
3404 contents + roff - 3);
3405 else if (!ABI_64_P (output_bfd) && val == 0x44)
3406 bfd_put_8 (output_bfd, 0x45,
3407 contents + roff - 3);
3408 bfd_put_8 (output_bfd, 0x8d,
3409 contents + roff - 2);
3410 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3411 contents + roff - 1);
3413 bfd_put_32 (output_bfd,
3414 elf_x86_64_tpoff (info, relocation),
3422 if (htab->elf.sgot == NULL)
3427 off = h->got.offset;
3428 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3432 if (local_got_offsets == NULL)
3435 off = local_got_offsets[r_symndx];
3436 offplt = local_tlsdesc_gotents[r_symndx];
3443 Elf_Internal_Rela outrel;
3447 if (htab->elf.srelgot == NULL)
3450 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3452 if (GOT_TLS_GDESC_P (tls_type))
3454 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3455 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3456 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3457 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3458 + htab->elf.sgotplt->output_offset
3460 + htab->sgotplt_jump_table_size);
3461 sreloc = htab->elf.srelplt;
3463 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3465 outrel.r_addend = 0;
3466 elf_append_rela (output_bfd, sreloc, &outrel);
3469 sreloc = htab->elf.srelgot;
3471 outrel.r_offset = (htab->elf.sgot->output_section->vma
3472 + htab->elf.sgot->output_offset + off);
3474 if (GOT_TLS_GD_P (tls_type))
3475 dr_type = R_X86_64_DTPMOD64;
3476 else if (GOT_TLS_GDESC_P (tls_type))
3479 dr_type = R_X86_64_TPOFF64;
3481 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3482 outrel.r_addend = 0;
3483 if ((dr_type == R_X86_64_TPOFF64
3484 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3485 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3486 outrel.r_info = htab->r_info (indx, dr_type);
3488 elf_append_rela (output_bfd, sreloc, &outrel);
3490 if (GOT_TLS_GD_P (tls_type))
3494 BFD_ASSERT (! unresolved_reloc);
3495 bfd_put_64 (output_bfd,
3496 relocation - _bfd_x86_elf_dtpoff_base (info),
3497 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3501 bfd_put_64 (output_bfd, 0,
3502 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3503 outrel.r_info = htab->r_info (indx,
3505 outrel.r_offset += GOT_ENTRY_SIZE;
3506 elf_append_rela (output_bfd, sreloc,
3515 local_got_offsets[r_symndx] |= 1;
3518 if (off >= (bfd_vma) -2
3519 && ! GOT_TLS_GDESC_P (tls_type))
3521 if (r_type_tls == r_type)
3523 if (r_type == R_X86_64_GOTPC32_TLSDESC
3524 || r_type == R_X86_64_TLSDESC_CALL)
3525 relocation = htab->elf.sgotplt->output_section->vma
3526 + htab->elf.sgotplt->output_offset
3527 + offplt + htab->sgotplt_jump_table_size;
3529 relocation = htab->elf.sgot->output_section->vma
3530 + htab->elf.sgot->output_offset + off;
3531 unresolved_reloc = FALSE;
3535 bfd_vma roff = rel->r_offset;
3537 if (r_type == R_X86_64_TLSGD)
3539 /* GD->IE transition. For 64bit, change
3540 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3541 .word 0x6666; rex64; call __tls_get_addr@PLT
3543 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3545 call *__tls_get_addr@GOTPCREL(%rip
3546 which may be converted to
3547 addr32 call __tls_get_addr
3550 addq foo@gottpoff(%rip), %rax
3552 leaq foo@tlsgd(%rip), %rdi
3553 .word 0x6666; rex64; call __tls_get_addr@PLT
3555 leaq foo@tlsgd(%rip), %rdi
3557 call *__tls_get_addr@GOTPCREL(%rip)
3558 which may be converted to
3559 addr32 call __tls_get_addr
3562 addq foo@gottpoff(%rip), %rax
3563 For largepic, change:
3564 leaq foo@tlsgd(%rip), %rdi
3565 movabsq $__tls_get_addr@pltoff, %rax
3570 addq foo@gottpoff(%rax), %rax
3571 nopw 0x0(%rax,%rax,1) */
3573 if (ABI_64_P (output_bfd))
3575 if (contents[roff + 5] == 0xb8)
3577 memcpy (contents + roff - 3,
3578 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3579 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3583 memcpy (contents + roff - 4,
3584 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3588 memcpy (contents + roff - 3,
3589 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3592 relocation = (htab->elf.sgot->output_section->vma
3593 + htab->elf.sgot->output_offset + off
3596 - input_section->output_section->vma
3597 - input_section->output_offset
3599 bfd_put_32 (output_bfd, relocation,
3600 contents + roff + 8 + largepic);
3601 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3606 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3608 /* GDesc -> IE transition.
3609 It's originally something like:
3610 leaq x@tlsdesc(%rip), %rax
3613 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3615 /* Now modify the instruction as appropriate. To
3616 turn a leaq into a movq in the form we use it, it
3617 suffices to change the second byte from 0x8d to
3619 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3621 bfd_put_32 (output_bfd,
3622 htab->elf.sgot->output_section->vma
3623 + htab->elf.sgot->output_offset + off
3625 - input_section->output_section->vma
3626 - input_section->output_offset
3631 else if (r_type == R_X86_64_TLSDESC_CALL)
3633 /* GDesc -> IE transition.
3640 bfd_put_8 (output_bfd, 0x66, contents + roff);
3641 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3649 case R_X86_64_TLSLD:
3650 if (! elf_x86_64_tls_transition (info, input_bfd,
3651 input_section, contents,
3652 symtab_hdr, sym_hashes,
3653 &r_type, GOT_UNKNOWN, rel,
3654 relend, h, r_symndx, TRUE))
3657 if (r_type != R_X86_64_TLSLD)
3659 /* LD->LE transition:
3660 leaq foo@tlsld(%rip), %rdi
3661 call __tls_get_addr@PLT
3662 For 64bit, we change it into:
3663 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3664 For 32bit, we change it into:
3665 nopl 0x0(%rax); movl %fs:0, %eax
3667 leaq foo@tlsld(%rip), %rdi;
3668 call *__tls_get_addr@GOTPCREL(%rip)
3669 which may be converted to
3670 addr32 call __tls_get_addr
3671 For 64bit, we change it into:
3672 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3673 For 32bit, we change it into:
3674 nopw 0x0(%rax); movl %fs:0, %eax
3675 For largepic, change:
3676 leaq foo@tlsgd(%rip), %rdi
3677 movabsq $__tls_get_addr@pltoff, %rax
3681 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3684 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3685 if (ABI_64_P (output_bfd))
3687 if (contents[rel->r_offset + 5] == 0xb8)
3688 memcpy (contents + rel->r_offset - 3,
3689 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3690 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3691 else if (contents[rel->r_offset + 4] == 0xff
3692 || contents[rel->r_offset + 4] == 0x67)
3693 memcpy (contents + rel->r_offset - 3,
3694 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3697 memcpy (contents + rel->r_offset - 3,
3698 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3702 if (contents[rel->r_offset + 4] == 0xff)
3703 memcpy (contents + rel->r_offset - 3,
3704 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3707 memcpy (contents + rel->r_offset - 3,
3708 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3710 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3711 and R_X86_64_PLTOFF64. */
3717 if (htab->elf.sgot == NULL)
3720 off = htab->tls_ld_or_ldm_got.offset;
3725 Elf_Internal_Rela outrel;
3727 if (htab->elf.srelgot == NULL)
3730 outrel.r_offset = (htab->elf.sgot->output_section->vma
3731 + htab->elf.sgot->output_offset + off);
3733 bfd_put_64 (output_bfd, 0,
3734 htab->elf.sgot->contents + off);
3735 bfd_put_64 (output_bfd, 0,
3736 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3737 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3738 outrel.r_addend = 0;
3739 elf_append_rela (output_bfd, htab->elf.srelgot,
3741 htab->tls_ld_or_ldm_got.offset |= 1;
3743 relocation = htab->elf.sgot->output_section->vma
3744 + htab->elf.sgot->output_offset + off;
3745 unresolved_reloc = FALSE;
3748 case R_X86_64_DTPOFF32:
3749 if (!bfd_link_executable (info)
3750 || (input_section->flags & SEC_CODE) == 0)
3751 relocation -= _bfd_x86_elf_dtpoff_base (info);
3753 relocation = elf_x86_64_tpoff (info, relocation);
3756 case R_X86_64_TPOFF32:
3757 case R_X86_64_TPOFF64:
3758 BFD_ASSERT (bfd_link_executable (info));
3759 relocation = elf_x86_64_tpoff (info, relocation);
3762 case R_X86_64_DTPOFF64:
3763 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3764 relocation -= _bfd_x86_elf_dtpoff_base (info);
3771 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3772 because such sections are not SEC_ALLOC and thus ld.so will
3773 not process them. */
3774 if (unresolved_reloc
3775 && !((input_section->flags & SEC_DEBUGGING) != 0
3777 && _bfd_elf_section_offset (output_bfd, info, input_section,
3778 rel->r_offset) != (bfd_vma) -1)
3783 sec = h->root.u.def.section;
3784 if ((info->nocopyreloc
3785 || (eh->def_protected
3786 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3787 && !(h->root.u.def.section->flags & SEC_CODE))
3788 return elf_x86_64_need_pic (info, input_bfd, input_section,
3789 h, NULL, NULL, howto);
3794 /* xgettext:c-format */
3795 (_("%pB(%pA+%#Lx): unresolvable %s relocation against symbol `%s'"),
3800 h->root.root.string);
3806 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3807 contents, rel->r_offset,
3808 relocation, rel->r_addend);
3810 check_relocation_error:
3811 if (r != bfd_reloc_ok)
3816 name = h->root.root.string;
3819 name = bfd_elf_string_from_elf_section (input_bfd,
3820 symtab_hdr->sh_link,
3825 name = bfd_section_name (input_bfd, sec);
3828 if (r == bfd_reloc_overflow)
3830 if (converted_reloc)
3832 info->callbacks->einfo
3833 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3836 (*info->callbacks->reloc_overflow)
3837 (info, (h ? &h->root : NULL), name, howto->name,
3838 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3843 /* xgettext:c-format */
3844 (_("%pB(%pA+%#Lx): reloc against `%s': error %d"),
3845 input_bfd, input_section,
3846 rel->r_offset, name, (int) r);
3857 Elf_Internal_Shdr *rel_hdr;
3858 size_t deleted = rel - wrel;
3860 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3861 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3862 if (rel_hdr->sh_size == 0)
3864 /* It is too late to remove an empty reloc section. Leave
3866 ??? What is wrong with an empty section??? */
3867 rel_hdr->sh_size = rel_hdr->sh_entsize;
3870 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3871 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3872 input_section->reloc_count -= deleted;
3878 /* Finish up dynamic symbol handling. We set the contents of various
3879 dynamic sections here. */
3882 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3883 struct bfd_link_info *info,
3884 struct elf_link_hash_entry *h,
3885 Elf_Internal_Sym *sym)
3887 struct elf_x86_link_hash_table *htab;
3888 bfd_boolean use_plt_second;
3889 struct elf_x86_link_hash_entry *eh;
3890 bfd_boolean local_undefweak;
3892 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3896 /* Use the second PLT section only if there is .plt section. */
3897 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3899 eh = (struct elf_x86_link_hash_entry *) h;
3900 if (eh->no_finish_dynamic_symbol)
3903 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3904 resolved undefined weak symbols in executable so that their
3905 references have value 0 at run-time. */
3906 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3908 if (h->plt.offset != (bfd_vma) -1)
3911 bfd_vma got_offset, plt_offset;
3912 Elf_Internal_Rela rela;
3914 asection *plt, *gotplt, *relplt, *resolved_plt;
3915 const struct elf_backend_data *bed;
3916 bfd_vma plt_got_pcrel_offset;
3918 /* When building a static executable, use .iplt, .igot.plt and
3919 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3920 if (htab->elf.splt != NULL)
3922 plt = htab->elf.splt;
3923 gotplt = htab->elf.sgotplt;
3924 relplt = htab->elf.srelplt;
3928 plt = htab->elf.iplt;
3929 gotplt = htab->elf.igotplt;
3930 relplt = htab->elf.irelplt;
3933 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
3935 /* Get the index in the procedure linkage table which
3936 corresponds to this symbol. This is the index of this symbol
3937 in all the symbols for which we are making plt entries. The
3938 first entry in the procedure linkage table is reserved.
3940 Get the offset into the .got table of the entry that
3941 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
3942 bytes. The first three are reserved for the dynamic linker.
3944 For static executables, we don't reserve anything. */
3946 if (plt == htab->elf.splt)
3948 got_offset = (h->plt.offset / htab->plt.plt_entry_size
3949 - htab->plt.has_plt0);
3950 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
3954 got_offset = h->plt.offset / htab->plt.plt_entry_size;
3955 got_offset = got_offset * GOT_ENTRY_SIZE;
3958 /* Fill in the entry in the procedure linkage table. */
3959 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
3960 htab->plt.plt_entry_size);
3963 memcpy (htab->plt_second->contents + eh->plt_second.offset,
3964 htab->non_lazy_plt->plt_entry,
3965 htab->non_lazy_plt->plt_entry_size);
3967 resolved_plt = htab->plt_second;
3968 plt_offset = eh->plt_second.offset;
3973 plt_offset = h->plt.offset;
3976 /* Insert the relocation positions of the plt section. */
3978 /* Put offset the PC-relative instruction referring to the GOT entry,
3979 subtracting the size of that instruction. */
3980 plt_got_pcrel_offset = (gotplt->output_section->vma
3981 + gotplt->output_offset
3983 - resolved_plt->output_section->vma
3984 - resolved_plt->output_offset
3986 - htab->plt.plt_got_insn_size);
3988 /* Check PC-relative offset overflow in PLT entry. */
3989 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
3990 /* xgettext:c-format */
3991 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
3992 output_bfd, h->root.root.string);
3994 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
3995 (resolved_plt->contents + plt_offset
3996 + htab->plt.plt_got_offset));
3998 /* Fill in the entry in the global offset table, initially this
3999 points to the second part of the PLT entry. Leave the entry
4000 as zero for undefined weak symbol in PIE. No PLT relocation
4001 against undefined weak symbol in PIE. */
4002 if (!local_undefweak)
4004 if (htab->plt.has_plt0)
4005 bfd_put_64 (output_bfd, (plt->output_section->vma
4006 + plt->output_offset
4008 + htab->lazy_plt->plt_lazy_offset),
4009 gotplt->contents + got_offset);
4011 /* Fill in the entry in the .rela.plt section. */
4012 rela.r_offset = (gotplt->output_section->vma
4013 + gotplt->output_offset
4015 if (PLT_LOCAL_IFUNC_P (info, h))
4017 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4018 h->root.root.string,
4019 h->root.u.def.section->owner);
4021 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4022 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4023 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4024 rela.r_addend = (h->root.u.def.value
4025 + h->root.u.def.section->output_section->vma
4026 + h->root.u.def.section->output_offset);
4027 /* R_X86_64_IRELATIVE comes last. */
4028 plt_index = htab->next_irelative_index--;
4032 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4034 plt_index = htab->next_jump_slot_index++;
4037 /* Don't fill the second and third slots in PLT entry for
4038 static executables nor without PLT0. */
4039 if (plt == htab->elf.splt && htab->plt.has_plt0)
4042 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4044 /* Put relocation index. */
4045 bfd_put_32 (output_bfd, plt_index,
4046 (plt->contents + h->plt.offset
4047 + htab->lazy_plt->plt_reloc_offset));
4049 /* Put offset for jmp .PLT0 and check for overflow. We don't
4050 check relocation index for overflow since branch displacement
4051 will overflow first. */
4052 if (plt0_offset > 0x80000000)
4053 /* xgettext:c-format */
4054 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4055 output_bfd, h->root.root.string);
4056 bfd_put_32 (output_bfd, - plt0_offset,
4057 (plt->contents + h->plt.offset
4058 + htab->lazy_plt->plt_plt_offset));
4061 bed = get_elf_backend_data (output_bfd);
4062 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4063 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4066 else if (eh->plt_got.offset != (bfd_vma) -1)
4068 bfd_vma got_offset, plt_offset;
4069 asection *plt, *got;
4070 bfd_boolean got_after_plt;
4071 int32_t got_pcrel_offset;
4073 /* Set the entry in the GOT procedure linkage table. */
4074 plt = htab->plt_got;
4075 got = htab->elf.sgot;
4076 got_offset = h->got.offset;
4078 if (got_offset == (bfd_vma) -1
4079 || (h->type == STT_GNU_IFUNC && h->def_regular)
4084 /* Use the non-lazy PLT entry template for the GOT PLT since they
4085 are the identical. */
4086 /* Fill in the entry in the GOT procedure linkage table. */
4087 plt_offset = eh->plt_got.offset;
4088 memcpy (plt->contents + plt_offset,
4089 htab->non_lazy_plt->plt_entry,
4090 htab->non_lazy_plt->plt_entry_size);
4092 /* Put offset the PC-relative instruction referring to the GOT
4093 entry, subtracting the size of that instruction. */
4094 got_pcrel_offset = (got->output_section->vma
4095 + got->output_offset
4097 - plt->output_section->vma
4098 - plt->output_offset
4100 - htab->non_lazy_plt->plt_got_insn_size);
4102 /* Check PC-relative offset overflow in GOT PLT entry. */
4103 got_after_plt = got->output_section->vma > plt->output_section->vma;
4104 if ((got_after_plt && got_pcrel_offset < 0)
4105 || (!got_after_plt && got_pcrel_offset > 0))
4106 /* xgettext:c-format */
4107 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4108 output_bfd, h->root.root.string);
4110 bfd_put_32 (output_bfd, got_pcrel_offset,
4111 (plt->contents + plt_offset
4112 + htab->non_lazy_plt->plt_got_offset));
4115 if (!local_undefweak
4117 && (h->plt.offset != (bfd_vma) -1
4118 || eh->plt_got.offset != (bfd_vma) -1))
4120 /* Mark the symbol as undefined, rather than as defined in
4121 the .plt section. Leave the value if there were any
4122 relocations where pointer equality matters (this is a clue
4123 for the dynamic linker, to make function pointer
4124 comparisons work between an application and shared
4125 library), otherwise set it to zero. If a function is only
4126 called from a binary, there is no need to slow down
4127 shared libraries because of that. */
4128 sym->st_shndx = SHN_UNDEF;
4129 if (!h->pointer_equality_needed)
4133 /* Don't generate dynamic GOT relocation against undefined weak
4134 symbol in executable. */
4135 if (h->got.offset != (bfd_vma) -1
4136 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4137 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4138 && !local_undefweak)
4140 Elf_Internal_Rela rela;
4141 asection *relgot = htab->elf.srelgot;
4143 /* This symbol has an entry in the global offset table. Set it
4145 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4148 rela.r_offset = (htab->elf.sgot->output_section->vma
4149 + htab->elf.sgot->output_offset
4150 + (h->got.offset &~ (bfd_vma) 1));
4152 /* If this is a static link, or it is a -Bsymbolic link and the
4153 symbol is defined locally or was forced to be local because
4154 of a version file, we just want to emit a RELATIVE reloc.
4155 The entry in the global offset table will already have been
4156 initialized in the relocate_section function. */
4158 && h->type == STT_GNU_IFUNC)
4160 if (h->plt.offset == (bfd_vma) -1)
4162 /* STT_GNU_IFUNC is referenced without PLT. */
4163 if (htab->elf.splt == NULL)
4165 /* use .rel[a].iplt section to store .got relocations
4166 in static executable. */
4167 relgot = htab->elf.irelplt;
4169 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4171 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4172 h->root.root.string,
4173 h->root.u.def.section->owner);
4175 rela.r_info = htab->r_info (0,
4176 R_X86_64_IRELATIVE);
4177 rela.r_addend = (h->root.u.def.value
4178 + h->root.u.def.section->output_section->vma
4179 + h->root.u.def.section->output_offset);
4184 else if (bfd_link_pic (info))
4186 /* Generate R_X86_64_GLOB_DAT. */
4194 if (!h->pointer_equality_needed)
4197 /* For non-shared object, we can't use .got.plt, which
4198 contains the real function addres if we need pointer
4199 equality. We load the GOT entry with the PLT entry. */
4200 if (htab->plt_second != NULL)
4202 plt = htab->plt_second;
4203 plt_offset = eh->plt_second.offset;
4207 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4208 plt_offset = h->plt.offset;
4210 bfd_put_64 (output_bfd, (plt->output_section->vma
4211 + plt->output_offset
4213 htab->elf.sgot->contents + h->got.offset);
4217 else if (bfd_link_pic (info)
4218 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4220 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
4222 BFD_ASSERT((h->got.offset & 1) != 0);
4223 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4224 rela.r_addend = (h->root.u.def.value
4225 + h->root.u.def.section->output_section->vma
4226 + h->root.u.def.section->output_offset);
4230 BFD_ASSERT((h->got.offset & 1) == 0);
4232 bfd_put_64 (output_bfd, (bfd_vma) 0,
4233 htab->elf.sgot->contents + h->got.offset);
4234 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4238 elf_append_rela (output_bfd, relgot, &rela);
4243 Elf_Internal_Rela rela;
4246 /* This symbol needs a copy reloc. Set it up. */
4247 VERIFY_COPY_RELOC (h, htab)
4249 rela.r_offset = (h->root.u.def.value
4250 + h->root.u.def.section->output_section->vma
4251 + h->root.u.def.section->output_offset);
4252 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4254 if (h->root.u.def.section == htab->elf.sdynrelro)
4255 s = htab->elf.sreldynrelro;
4257 s = htab->elf.srelbss;
4258 elf_append_rela (output_bfd, s, &rela);
4264 /* Finish up local dynamic symbol handling. We set the contents of
4265 various dynamic sections here. */
4268 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4270 struct elf_link_hash_entry *h
4271 = (struct elf_link_hash_entry *) *slot;
4272 struct bfd_link_info *info
4273 = (struct bfd_link_info *) inf;
4275 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4279 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4280 here since undefined weak symbol may not be dynamic and may not be
4281 called for elf_x86_64_finish_dynamic_symbol. */
4284 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4287 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4288 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4290 if (h->root.type != bfd_link_hash_undefweak
4291 || h->dynindx != -1)
4294 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4298 /* Used to decide how to sort relocs in an optimal manner for the
4299 dynamic linker, before writing them out. */
4301 static enum elf_reloc_type_class
4302 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4303 const asection *rel_sec ATTRIBUTE_UNUSED,
4304 const Elf_Internal_Rela *rela)
4306 bfd *abfd = info->output_bfd;
4307 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4308 struct elf_x86_link_hash_table *htab
4309 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4311 if (htab->elf.dynsym != NULL
4312 && htab->elf.dynsym->contents != NULL)
4314 /* Check relocation against STT_GNU_IFUNC symbol if there are
4316 unsigned long r_symndx = htab->r_sym (rela->r_info);
4317 if (r_symndx != STN_UNDEF)
4319 Elf_Internal_Sym sym;
4320 if (!bed->s->swap_symbol_in (abfd,
4321 (htab->elf.dynsym->contents
4322 + r_symndx * bed->s->sizeof_sym),
4326 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4327 return reloc_class_ifunc;
4331 switch ((int) ELF32_R_TYPE (rela->r_info))
4333 case R_X86_64_IRELATIVE:
4334 return reloc_class_ifunc;
4335 case R_X86_64_RELATIVE:
4336 case R_X86_64_RELATIVE64:
4337 return reloc_class_relative;
4338 case R_X86_64_JUMP_SLOT:
4339 return reloc_class_plt;
4341 return reloc_class_copy;
4343 return reloc_class_normal;
4347 /* Finish up the dynamic sections. */
4350 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4351 struct bfd_link_info *info)
4353 struct elf_x86_link_hash_table *htab;
4355 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4359 if (! htab->elf.dynamic_sections_created)
4362 if (htab->elf.splt && htab->elf.splt->size > 0)
4364 elf_section_data (htab->elf.splt->output_section)
4365 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4367 if (htab->plt.has_plt0)
4369 /* Fill in the special first entry in the procedure linkage
4371 memcpy (htab->elf.splt->contents,
4372 htab->lazy_plt->plt0_entry,
4373 htab->lazy_plt->plt0_entry_size);
4374 /* Add offset for pushq GOT+8(%rip), since the instruction
4375 uses 6 bytes subtract this value. */
4376 bfd_put_32 (output_bfd,
4377 (htab->elf.sgotplt->output_section->vma
4378 + htab->elf.sgotplt->output_offset
4380 - htab->elf.splt->output_section->vma
4381 - htab->elf.splt->output_offset
4383 (htab->elf.splt->contents
4384 + htab->lazy_plt->plt0_got1_offset));
4385 /* Add offset for the PC-relative instruction accessing
4386 GOT+16, subtracting the offset to the end of that
4388 bfd_put_32 (output_bfd,
4389 (htab->elf.sgotplt->output_section->vma
4390 + htab->elf.sgotplt->output_offset
4392 - htab->elf.splt->output_section->vma
4393 - htab->elf.splt->output_offset
4394 - htab->lazy_plt->plt0_got2_insn_end),
4395 (htab->elf.splt->contents
4396 + htab->lazy_plt->plt0_got2_offset));
4399 if (htab->tlsdesc_plt)
4401 bfd_put_64 (output_bfd, (bfd_vma) 0,
4402 htab->elf.sgot->contents + htab->tlsdesc_got);
4404 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4405 htab->lazy_plt->plt0_entry,
4406 htab->lazy_plt->plt0_entry_size);
4408 /* Add offset for pushq GOT+8(%rip), since the
4409 instruction uses 6 bytes subtract this value. */
4410 bfd_put_32 (output_bfd,
4411 (htab->elf.sgotplt->output_section->vma
4412 + htab->elf.sgotplt->output_offset
4414 - htab->elf.splt->output_section->vma
4415 - htab->elf.splt->output_offset
4418 (htab->elf.splt->contents
4420 + htab->lazy_plt->plt0_got1_offset));
4421 /* Add offset for the PC-relative instruction accessing
4422 GOT+TDG, where TDG stands for htab->tlsdesc_got,
4423 subtracting the offset to the end of that
4425 bfd_put_32 (output_bfd,
4426 (htab->elf.sgot->output_section->vma
4427 + htab->elf.sgot->output_offset
4429 - htab->elf.splt->output_section->vma
4430 - htab->elf.splt->output_offset
4432 - htab->lazy_plt->plt0_got2_insn_end),
4433 (htab->elf.splt->contents
4435 + htab->lazy_plt->plt0_got2_offset));
4439 /* Fill PLT entries for undefined weak symbols in PIE. */
4440 if (bfd_link_pie (info))
4441 bfd_hash_traverse (&info->hash->table,
4442 elf_x86_64_pie_finish_undefweak_symbol,
4448 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4449 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4450 It has to be done before elf_link_sort_relocs is called so that
4451 dynamic relocations are properly sorted. */
4454 elf_x86_64_output_arch_local_syms
4455 (bfd *output_bfd ATTRIBUTE_UNUSED,
4456 struct bfd_link_info *info,
4457 void *flaginfo ATTRIBUTE_UNUSED,
4458 int (*func) (void *, const char *,
4461 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4463 struct elf_x86_link_hash_table *htab
4464 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4468 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4469 htab_traverse (htab->loc_hash_table,
4470 elf_x86_64_finish_local_dynamic_symbol,
4476 /* Forward declaration. */
4477 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4479 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4480 dynamic relocations. */
4483 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4484 long symcount ATTRIBUTE_UNUSED,
4485 asymbol **syms ATTRIBUTE_UNUSED,
4492 bfd_byte *plt_contents;
4494 const struct elf_x86_lazy_plt_layout *lazy_plt;
4495 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4496 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4497 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4498 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4499 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4501 enum elf_x86_plt_type plt_type;
4502 struct elf_x86_plt plts[] =
4504 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4505 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4506 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4507 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4508 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4513 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4516 if (dynsymcount <= 0)
4519 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4523 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4525 lazy_plt = &elf_x86_64_lazy_plt;
4526 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4527 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4528 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4529 if (ABI_64_P (abfd))
4531 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4532 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4536 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4537 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4542 lazy_plt = &elf_x86_64_nacl_plt;
4543 non_lazy_plt = NULL;
4544 lazy_bnd_plt = NULL;
4545 non_lazy_bnd_plt = NULL;
4546 lazy_ibt_plt = NULL;
4547 non_lazy_ibt_plt = NULL;
4551 for (j = 0; plts[j].name != NULL; j++)
4553 plt = bfd_get_section_by_name (abfd, plts[j].name);
4554 if (plt == NULL || plt->size == 0)
4557 /* Get the PLT section contents. */
4558 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4559 if (plt_contents == NULL)
4561 if (!bfd_get_section_contents (abfd, (asection *) plt,
4562 plt_contents, 0, plt->size))
4564 free (plt_contents);
4568 /* Check what kind of PLT it is. */
4569 plt_type = plt_unknown;
4570 if (plts[j].type == plt_unknown
4571 && (plt->size >= (lazy_plt->plt_entry_size
4572 + lazy_plt->plt_entry_size)))
4574 /* Match lazy PLT first. Need to check the first two
4576 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4577 lazy_plt->plt0_got1_offset) == 0)
4578 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4580 plt_type = plt_lazy;
4581 else if (lazy_bnd_plt != NULL
4582 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4583 lazy_bnd_plt->plt0_got1_offset) == 0)
4584 && (memcmp (plt_contents + 6,
4585 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4587 plt_type = plt_lazy | plt_second;
4588 /* The fist entry in the lazy IBT PLT is the same as the
4590 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4591 lazy_ibt_plt->plt_entry,
4592 lazy_ibt_plt->plt_got_offset) == 0))
4593 lazy_plt = lazy_ibt_plt;
4595 lazy_plt = lazy_bnd_plt;
4599 if (non_lazy_plt != NULL
4600 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4601 && plt->size >= non_lazy_plt->plt_entry_size)
4603 /* Match non-lazy PLT. */
4604 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4605 non_lazy_plt->plt_got_offset) == 0)
4606 plt_type = plt_non_lazy;
4609 if (plt_type == plt_unknown || plt_type == plt_second)
4611 if (non_lazy_bnd_plt != NULL
4612 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4613 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4614 non_lazy_bnd_plt->plt_got_offset) == 0))
4616 /* Match BND PLT. */
4617 plt_type = plt_second;
4618 non_lazy_plt = non_lazy_bnd_plt;
4620 else if (non_lazy_ibt_plt != NULL
4621 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4622 && (memcmp (plt_contents,
4623 non_lazy_ibt_plt->plt_entry,
4624 non_lazy_ibt_plt->plt_got_offset) == 0))
4626 /* Match IBT PLT. */
4627 plt_type = plt_second;
4628 non_lazy_plt = non_lazy_ibt_plt;
4632 if (plt_type == plt_unknown)
4634 free (plt_contents);
4639 plts[j].type = plt_type;
4641 if ((plt_type & plt_lazy))
4643 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4644 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4645 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4646 /* Skip PLT0 in lazy PLT. */
4651 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4652 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4653 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4657 /* Skip lazy PLT when the second PLT is used. */
4658 if (plt_type == (plt_lazy | plt_second))
4662 n = plt->size / plts[j].plt_entry_size;
4667 plts[j].contents = plt_contents;
4670 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4671 (bfd_vma) 0, plts, dynsyms,
4675 /* Handle an x86-64 specific section when reading an object file. This
4676 is called when elfcode.h finds a section with an unknown type. */
4679 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4680 const char *name, int shindex)
4682 if (hdr->sh_type != SHT_X86_64_UNWIND)
4685 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4691 /* Hook called by the linker routine which adds symbols from an object
4692 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4696 elf_x86_64_add_symbol_hook (bfd *abfd,
4697 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4698 Elf_Internal_Sym *sym,
4699 const char **namep ATTRIBUTE_UNUSED,
4700 flagword *flagsp ATTRIBUTE_UNUSED,
4706 switch (sym->st_shndx)
4708 case SHN_X86_64_LCOMMON:
4709 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4712 lcomm = bfd_make_section_with_flags (abfd,
4716 | SEC_LINKER_CREATED));
4719 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4722 *valp = sym->st_size;
4730 /* Given a BFD section, try to locate the corresponding ELF section
4734 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4735 asection *sec, int *index_return)
4737 if (sec == &_bfd_elf_large_com_section)
4739 *index_return = SHN_X86_64_LCOMMON;
4745 /* Process a symbol. */
4748 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4751 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4753 switch (elfsym->internal_elf_sym.st_shndx)
4755 case SHN_X86_64_LCOMMON:
4756 asym->section = &_bfd_elf_large_com_section;
4757 asym->value = elfsym->internal_elf_sym.st_size;
4758 /* Common symbol doesn't set BSF_GLOBAL. */
4759 asym->flags &= ~BSF_GLOBAL;
4765 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4767 return (sym->st_shndx == SHN_COMMON
4768 || sym->st_shndx == SHN_X86_64_LCOMMON);
4772 elf_x86_64_common_section_index (asection *sec)
4774 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4777 return SHN_X86_64_LCOMMON;
4781 elf_x86_64_common_section (asection *sec)
4783 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4784 return bfd_com_section_ptr;
4786 return &_bfd_elf_large_com_section;
4790 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4791 const Elf_Internal_Sym *sym,
4796 const asection *oldsec)
4798 /* A normal common symbol and a large common symbol result in a
4799 normal common symbol. We turn the large common symbol into a
4802 && h->root.type == bfd_link_hash_common
4804 && bfd_is_com_section (*psec)
4807 if (sym->st_shndx == SHN_COMMON
4808 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4810 h->root.u.c.p->section
4811 = bfd_make_section_old_way (oldbfd, "COMMON");
4812 h->root.u.c.p->section->flags = SEC_ALLOC;
4814 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4815 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4816 *psec = bfd_com_section_ptr;
4823 elf_x86_64_additional_program_headers (bfd *abfd,
4824 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4829 /* Check to see if we need a large readonly segment. */
4830 s = bfd_get_section_by_name (abfd, ".lrodata");
4831 if (s && (s->flags & SEC_LOAD))
4834 /* Check to see if we need a large data segment. Since .lbss sections
4835 is placed right after the .bss section, there should be no need for
4836 a large data segment just because of .lbss. */
4837 s = bfd_get_section_by_name (abfd, ".ldata");
4838 if (s && (s->flags & SEC_LOAD))
4844 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
4847 elf_x86_64_relocs_compatible (const bfd_target *input,
4848 const bfd_target *output)
4850 return ((xvec_get_elf_backend_data (input)->s->elfclass
4851 == xvec_get_elf_backend_data (output)->s->elfclass)
4852 && _bfd_elf_relocs_compatible (input, output));
4855 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
4856 with GNU properties if found. Otherwise, return NULL. */
4859 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
4861 struct elf_x86_init_table init_table;
4863 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
4864 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
4865 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
4866 != (int) R_X86_64_GNU_VTINHERIT)
4867 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
4868 != (int) R_X86_64_GNU_VTENTRY))
4871 /* This is unused for x86-64. */
4872 init_table.plt0_pad_byte = 0x90;
4874 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
4878 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
4879 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
4883 init_table.lazy_plt = &elf_x86_64_lazy_plt;
4884 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
4887 if (ABI_64_P (info->output_bfd))
4889 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4890 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4894 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4895 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4900 init_table.lazy_plt = &elf_x86_64_nacl_plt;
4901 init_table.non_lazy_plt = NULL;
4902 init_table.lazy_ibt_plt = NULL;
4903 init_table.non_lazy_ibt_plt = NULL;
4906 if (ABI_64_P (info->output_bfd))
4908 init_table.r_info = elf64_r_info;
4909 init_table.r_sym = elf64_r_sym;
4913 init_table.r_info = elf32_r_info;
4914 init_table.r_sym = elf32_r_sym;
4917 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
4920 static const struct bfd_elf_special_section
4921 elf_x86_64_special_sections[]=
4923 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4924 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4925 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
4926 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4927 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4928 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4929 { NULL, 0, 0, 0, 0 }
4932 #define TARGET_LITTLE_SYM x86_64_elf64_vec
4933 #define TARGET_LITTLE_NAME "elf64-x86-64"
4934 #define ELF_ARCH bfd_arch_i386
4935 #define ELF_TARGET_ID X86_64_ELF_DATA
4936 #define ELF_MACHINE_CODE EM_X86_64
4937 #define ELF_MAXPAGESIZE 0x200000
4938 #define ELF_MINPAGESIZE 0x1000
4939 #define ELF_COMMONPAGESIZE 0x1000
4941 #define elf_backend_can_gc_sections 1
4942 #define elf_backend_can_refcount 1
4943 #define elf_backend_want_got_plt 1
4944 #define elf_backend_plt_readonly 1
4945 #define elf_backend_want_plt_sym 0
4946 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
4947 #define elf_backend_rela_normal 1
4948 #define elf_backend_plt_alignment 4
4949 #define elf_backend_extern_protected_data 1
4950 #define elf_backend_caches_rawsize 1
4951 #define elf_backend_dtrel_excludes_plt 1
4952 #define elf_backend_want_dynrelro 1
4954 #define elf_info_to_howto elf_x86_64_info_to_howto
4956 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
4957 #define bfd_elf64_bfd_reloc_name_lookup \
4958 elf_x86_64_reloc_name_lookup
4960 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
4961 #define elf_backend_check_relocs elf_x86_64_check_relocs
4962 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
4963 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
4964 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
4965 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
4966 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
4967 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
4969 #define elf_backend_write_core_note elf_x86_64_write_core_note
4971 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
4972 #define elf_backend_relocate_section elf_x86_64_relocate_section
4973 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
4974 #define elf_backend_object_p elf64_x86_64_elf_object_p
4975 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
4977 #define elf_backend_section_from_shdr \
4978 elf_x86_64_section_from_shdr
4980 #define elf_backend_section_from_bfd_section \
4981 elf_x86_64_elf_section_from_bfd_section
4982 #define elf_backend_add_symbol_hook \
4983 elf_x86_64_add_symbol_hook
4984 #define elf_backend_symbol_processing \
4985 elf_x86_64_symbol_processing
4986 #define elf_backend_common_section_index \
4987 elf_x86_64_common_section_index
4988 #define elf_backend_common_section \
4989 elf_x86_64_common_section
4990 #define elf_backend_common_definition \
4991 elf_x86_64_common_definition
4992 #define elf_backend_merge_symbol \
4993 elf_x86_64_merge_symbol
4994 #define elf_backend_special_sections \
4995 elf_x86_64_special_sections
4996 #define elf_backend_additional_program_headers \
4997 elf_x86_64_additional_program_headers
4998 #define elf_backend_setup_gnu_properties \
4999 elf_x86_64_link_setup_gnu_properties
5000 #define elf_backend_hide_symbol \
5001 _bfd_x86_elf_hide_symbol
5003 #include "elf64-target.h"
5005 /* CloudABI support. */
5007 #undef TARGET_LITTLE_SYM
5008 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5009 #undef TARGET_LITTLE_NAME
5010 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5013 #define ELF_OSABI ELFOSABI_CLOUDABI
5016 #define elf64_bed elf64_x86_64_cloudabi_bed
5018 #include "elf64-target.h"
5020 /* FreeBSD support. */
5022 #undef TARGET_LITTLE_SYM
5023 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5024 #undef TARGET_LITTLE_NAME
5025 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5028 #define ELF_OSABI ELFOSABI_FREEBSD
5031 #define elf64_bed elf64_x86_64_fbsd_bed
5033 #include "elf64-target.h"
5035 /* Solaris 2 support. */
5037 #undef TARGET_LITTLE_SYM
5038 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5039 #undef TARGET_LITTLE_NAME
5040 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5042 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5047 #undef elf_backend_arch_data
5048 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5050 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5051 objects won't be recognized. */
5055 #define elf64_bed elf64_x86_64_sol2_bed
5057 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5059 #undef elf_backend_static_tls_alignment
5060 #define elf_backend_static_tls_alignment 16
5062 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5064 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5066 #undef elf_backend_want_plt_sym
5067 #define elf_backend_want_plt_sym 1
5069 #undef elf_backend_strtab_flags
5070 #define elf_backend_strtab_flags SHF_STRINGS
5073 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5074 bfd *obfd ATTRIBUTE_UNUSED,
5075 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5076 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5078 /* PR 19938: FIXME: Need to add code for setting the sh_info
5079 and sh_link fields of Solaris specific section types. */
5083 #undef elf_backend_copy_special_section_fields
5084 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5086 #include "elf64-target.h"
5088 /* Native Client support. */
5091 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5093 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5094 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5098 #undef TARGET_LITTLE_SYM
5099 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5100 #undef TARGET_LITTLE_NAME
5101 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5103 #define elf64_bed elf64_x86_64_nacl_bed
5105 #undef ELF_MAXPAGESIZE
5106 #undef ELF_MINPAGESIZE
5107 #undef ELF_COMMONPAGESIZE
5108 #define ELF_MAXPAGESIZE 0x10000
5109 #define ELF_MINPAGESIZE 0x10000
5110 #define ELF_COMMONPAGESIZE 0x10000
5112 /* Restore defaults. */
5114 #undef elf_backend_static_tls_alignment
5115 #undef elf_backend_want_plt_sym
5116 #define elf_backend_want_plt_sym 0
5117 #undef elf_backend_strtab_flags
5118 #undef elf_backend_copy_special_section_fields
5120 /* NaCl uses substantially different PLT entries for the same effects. */
5122 #undef elf_backend_plt_alignment
5123 #define elf_backend_plt_alignment 5
5124 #define NACL_PLT_ENTRY_SIZE 64
5125 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5127 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5129 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5130 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5131 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5132 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5133 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5135 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5136 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5138 /* 32 bytes of nop to pad out to the standard size. */
5139 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5140 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5141 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5142 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5143 0x66, /* excess data16 prefix */
5147 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5149 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5150 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5151 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5152 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5154 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5155 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5156 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5158 /* Lazy GOT entries point here (32-byte aligned). */
5159 0x68, /* pushq immediate */
5160 0, 0, 0, 0, /* replaced with index into relocation table. */
5161 0xe9, /* jmp relative */
5162 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5164 /* 22 bytes of nop to pad out to the standard size. */
5165 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5166 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5167 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5170 /* .eh_frame covering the .plt section. */
5172 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5174 #if (PLT_CIE_LENGTH != 20 \
5175 || PLT_FDE_LENGTH != 36 \
5176 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5177 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5178 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5180 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5181 0, 0, 0, 0, /* CIE ID */
5182 1, /* CIE version */
5183 'z', 'R', 0, /* Augmentation string */
5184 1, /* Code alignment factor */
5185 0x78, /* Data alignment factor */
5186 16, /* Return address column */
5187 1, /* Augmentation size */
5188 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5189 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5190 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5191 DW_CFA_nop, DW_CFA_nop,
5193 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5194 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5195 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5196 0, 0, 0, 0, /* .plt size goes here */
5197 0, /* Augmentation size */
5198 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5199 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5200 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5201 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5202 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5203 13, /* Block length */
5204 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5205 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5206 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5207 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5208 DW_CFA_nop, DW_CFA_nop
5211 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5213 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5214 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5215 elf_x86_64_nacl_plt_entry, /* plt_entry */
5216 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5217 2, /* plt0_got1_offset */
5218 9, /* plt0_got2_offset */
5219 13, /* plt0_got2_insn_end */
5220 3, /* plt_got_offset */
5221 33, /* plt_reloc_offset */
5222 38, /* plt_plt_offset */
5223 7, /* plt_got_insn_size */
5224 42, /* plt_plt_insn_end */
5225 32, /* plt_lazy_offset */
5226 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5227 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5228 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5229 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5232 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5237 #undef elf_backend_arch_data
5238 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5240 #undef elf_backend_object_p
5241 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5242 #undef elf_backend_modify_segment_map
5243 #define elf_backend_modify_segment_map nacl_modify_segment_map
5244 #undef elf_backend_modify_program_headers
5245 #define elf_backend_modify_program_headers nacl_modify_program_headers
5246 #undef elf_backend_final_write_processing
5247 #define elf_backend_final_write_processing nacl_final_write_processing
5249 #include "elf64-target.h"
5251 /* Native Client x32 support. */
5254 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5256 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5257 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5261 #undef TARGET_LITTLE_SYM
5262 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5263 #undef TARGET_LITTLE_NAME
5264 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5266 #define elf32_bed elf32_x86_64_nacl_bed
5268 #define bfd_elf32_bfd_reloc_type_lookup \
5269 elf_x86_64_reloc_type_lookup
5270 #define bfd_elf32_bfd_reloc_name_lookup \
5271 elf_x86_64_reloc_name_lookup
5272 #define bfd_elf32_get_synthetic_symtab \
5273 elf_x86_64_get_synthetic_symtab
5275 #undef elf_backend_object_p
5276 #define elf_backend_object_p \
5277 elf32_x86_64_nacl_elf_object_p
5279 #undef elf_backend_bfd_from_remote_memory
5280 #define elf_backend_bfd_from_remote_memory \
5281 _bfd_elf32_bfd_from_remote_memory
5283 #undef elf_backend_size_info
5284 #define elf_backend_size_info \
5285 _bfd_elf32_size_info
5287 #include "elf32-target.h"
5289 /* Restore defaults. */
5290 #undef elf_backend_object_p
5291 #define elf_backend_object_p elf64_x86_64_elf_object_p
5292 #undef elf_backend_bfd_from_remote_memory
5293 #undef elf_backend_size_info
5294 #undef elf_backend_modify_segment_map
5295 #undef elf_backend_modify_program_headers
5296 #undef elf_backend_final_write_processing
5298 /* Intel L1OM support. */
5301 elf64_l1om_elf_object_p (bfd *abfd)
5303 /* Set the right machine number for an L1OM elf64 file. */
5304 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5308 #undef TARGET_LITTLE_SYM
5309 #define TARGET_LITTLE_SYM l1om_elf64_vec
5310 #undef TARGET_LITTLE_NAME
5311 #define TARGET_LITTLE_NAME "elf64-l1om"
5313 #define ELF_ARCH bfd_arch_l1om
5315 #undef ELF_MACHINE_CODE
5316 #define ELF_MACHINE_CODE EM_L1OM
5321 #define elf64_bed elf64_l1om_bed
5323 #undef elf_backend_object_p
5324 #define elf_backend_object_p elf64_l1om_elf_object_p
5326 /* Restore defaults. */
5327 #undef ELF_MAXPAGESIZE
5328 #undef ELF_MINPAGESIZE
5329 #undef ELF_COMMONPAGESIZE
5330 #define ELF_MAXPAGESIZE 0x200000
5331 #define ELF_MINPAGESIZE 0x1000
5332 #define ELF_COMMONPAGESIZE 0x1000
5333 #undef elf_backend_plt_alignment
5334 #define elf_backend_plt_alignment 4
5335 #undef elf_backend_arch_data
5336 #define elf_backend_arch_data &elf_x86_64_arch_bed
5338 #include "elf64-target.h"
5340 /* FreeBSD L1OM support. */
5342 #undef TARGET_LITTLE_SYM
5343 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5344 #undef TARGET_LITTLE_NAME
5345 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5348 #define ELF_OSABI ELFOSABI_FREEBSD
5351 #define elf64_bed elf64_l1om_fbsd_bed
5353 #include "elf64-target.h"
5355 /* Intel K1OM support. */
5358 elf64_k1om_elf_object_p (bfd *abfd)
5360 /* Set the right machine number for an K1OM elf64 file. */
5361 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5365 #undef TARGET_LITTLE_SYM
5366 #define TARGET_LITTLE_SYM k1om_elf64_vec
5367 #undef TARGET_LITTLE_NAME
5368 #define TARGET_LITTLE_NAME "elf64-k1om"
5370 #define ELF_ARCH bfd_arch_k1om
5372 #undef ELF_MACHINE_CODE
5373 #define ELF_MACHINE_CODE EM_K1OM
5378 #define elf64_bed elf64_k1om_bed
5380 #undef elf_backend_object_p
5381 #define elf_backend_object_p elf64_k1om_elf_object_p
5383 #undef elf_backend_static_tls_alignment
5385 #undef elf_backend_want_plt_sym
5386 #define elf_backend_want_plt_sym 0
5388 #include "elf64-target.h"
5390 /* FreeBSD K1OM support. */
5392 #undef TARGET_LITTLE_SYM
5393 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5394 #undef TARGET_LITTLE_NAME
5395 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5398 #define ELF_OSABI ELFOSABI_FREEBSD
5401 #define elf64_bed elf64_k1om_fbsd_bed
5403 #include "elf64-target.h"
5405 /* 32bit x86-64 support. */
5407 #undef TARGET_LITTLE_SYM
5408 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5409 #undef TARGET_LITTLE_NAME
5410 #define TARGET_LITTLE_NAME "elf32-x86-64"
5414 #define ELF_ARCH bfd_arch_i386
5416 #undef ELF_MACHINE_CODE
5417 #define ELF_MACHINE_CODE EM_X86_64
5421 #undef elf_backend_object_p
5422 #define elf_backend_object_p \
5423 elf32_x86_64_elf_object_p
5425 #undef elf_backend_bfd_from_remote_memory
5426 #define elf_backend_bfd_from_remote_memory \
5427 _bfd_elf32_bfd_from_remote_memory
5429 #undef elf_backend_size_info
5430 #define elf_backend_size_info \
5431 _bfd_elf32_size_info
5433 #include "elf32-target.h"