1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2018 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
22 #include "elfxx-x86.h"
25 #include "libiberty.h"
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
212 /* Map BFD relocs to the x86_64 elf relocs. */
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
219 static const struct elf_reloc_map x86_64_reloc_map[] =
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
272 if (r_type == (unsigned int) R_X86_64_32)
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
282 if (r_type >= (unsigned int) R_X86_64_standard)
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%B: invalid relocation type %d"),
287 r_type = R_X86_64_NONE;
292 i = r_type - (unsigned int) R_X86_64_vt_offset;
293 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
294 return &x86_64_elf_howto_table[i];
297 /* Given a BFD reloc type, return a HOWTO structure. */
298 static reloc_howto_type *
299 elf_x86_64_reloc_type_lookup (bfd *abfd,
300 bfd_reloc_code_real_type code)
304 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
307 if (x86_64_reloc_map[i].bfd_reloc_val == code)
308 return elf_x86_64_rtype_to_howto (abfd,
309 x86_64_reloc_map[i].elf_reloc_val);
314 static reloc_howto_type *
315 elf_x86_64_reloc_name_lookup (bfd *abfd,
320 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
322 /* Get x32 R_X86_64_32. */
323 reloc_howto_type *reloc
324 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
325 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
329 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
330 if (x86_64_elf_howto_table[i].name != NULL
331 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
332 return &x86_64_elf_howto_table[i];
337 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
340 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr,
341 Elf_Internal_Rela *dst)
345 r_type = ELF32_R_TYPE (dst->r_info);
346 if (r_type != (unsigned int) R_X86_64_GNU_VTINHERIT
347 && r_type != (unsigned int) R_X86_64_GNU_VTENTRY)
348 r_type &= ~R_X86_64_converted_reloc_bit;
349 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
351 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
354 /* Support for core dump NOTE sections. */
356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
361 switch (note->descsz)
366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
381 elf_tdata (abfd)->core->signal
382 = bfd_get_16 (abfd, note->descdata + 12);
385 elf_tdata (abfd)->core->lwpid
386 = bfd_get_32 (abfd, note->descdata + 32);
395 /* Make a ".reg/999" section. */
396 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
397 size, note->descpos + offset);
401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
403 switch (note->descsz)
408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
409 elf_tdata (abfd)->core->pid
410 = bfd_get_32 (abfd, note->descdata + 12);
411 elf_tdata (abfd)->core->program
412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
413 elf_tdata (abfd)->core->command
414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
418 elf_tdata (abfd)->core->pid
419 = bfd_get_32 (abfd, note->descdata + 24);
420 elf_tdata (abfd)->core->program
421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
422 elf_tdata (abfd)->core->command
423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
426 /* Note that for some reason, a spurious space is tacked
427 onto the end of the args in some (at least one anyway)
428 implementations, so strip it off if it exists. */
431 char *command = elf_tdata (abfd)->core->command;
432 int n = strlen (command);
434 if (0 < n && command[n - 1] == ' ')
435 command[n - 1] = '\0';
443 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
446 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
448 const char *fname, *psargs;
459 va_start (ap, note_type);
460 fname = va_arg (ap, const char *);
461 psargs = va_arg (ap, const char *);
464 if (bed->s->elfclass == ELFCLASS32)
467 memset (&data, 0, sizeof (data));
468 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
469 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
470 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
471 &data, sizeof (data));
476 memset (&data, 0, sizeof (data));
477 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
478 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
479 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
480 &data, sizeof (data));
485 va_start (ap, note_type);
486 pid = va_arg (ap, long);
487 cursig = va_arg (ap, int);
488 gregs = va_arg (ap, const void *);
491 if (bed->s->elfclass == ELFCLASS32)
493 if (bed->elf_machine_code == EM_X86_64)
495 prstatusx32_t prstat;
496 memset (&prstat, 0, sizeof (prstat));
498 prstat.pr_cursig = cursig;
499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
501 &prstat, sizeof (prstat));
506 memset (&prstat, 0, sizeof (prstat));
508 prstat.pr_cursig = cursig;
509 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
510 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
511 &prstat, sizeof (prstat));
517 memset (&prstat, 0, sizeof (prstat));
519 prstat.pr_cursig = cursig;
520 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
521 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
522 &prstat, sizeof (prstat));
529 /* Functions for the x86-64 ELF linker. */
531 /* The size in bytes of an entry in the global offset table. */
533 #define GOT_ENTRY_SIZE 8
535 /* The size in bytes of an entry in the lazy procedure linkage table. */
537 #define LAZY_PLT_ENTRY_SIZE 16
539 /* The size in bytes of an entry in the non-lazy procedure linkage
542 #define NON_LAZY_PLT_ENTRY_SIZE 8
544 /* The first entry in a lazy procedure linkage table looks like this.
545 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
548 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
550 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
551 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
552 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
555 /* Subsequent entries in a lazy procedure linkage table look like this. */
557 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
559 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
560 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
561 0x68, /* pushq immediate */
562 0, 0, 0, 0, /* replaced with index into relocation table. */
563 0xe9, /* jmp relative */
564 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
567 /* The first entry in a lazy procedure linkage table with BND prefix
570 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
572 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
573 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
574 0x0f, 0x1f, 0 /* nopl (%rax) */
577 /* Subsequent entries for branches with BND prefx in a lazy procedure
578 linkage table look like this. */
580 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
582 0x68, 0, 0, 0, 0, /* pushq immediate */
583 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
584 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
587 /* The first entry in the IBT-enabled lazy procedure linkage table is the
588 the same as the lazy PLT with BND prefix so that bound registers are
589 preserved when control is passed to dynamic linker. Subsequent
590 entries for a IBT-enabled lazy procedure linkage table look like
593 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
595 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
596 0x68, 0, 0, 0, 0, /* pushq immediate */
597 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
601 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
602 is the same as the normal lazy PLT. Subsequent entries for an
603 x32 IBT-enabled lazy procedure linkage table look like this. */
605 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
607 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
608 0x68, 0, 0, 0, 0, /* pushq immediate */
609 0xe9, 0, 0, 0, 0, /* jmpq relative */
610 0x66, 0x90 /* xchg %ax,%ax */
613 /* Entries in the non-lazey procedure linkage table look like this. */
615 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
617 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
618 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
619 0x66, 0x90 /* xchg %ax,%ax */
622 /* Entries for branches with BND prefix in the non-lazey procedure
623 linkage table look like this. */
625 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
627 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
628 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
632 /* Entries for branches with IBT-enabled in the non-lazey procedure
633 linkage table look like this. They have the same size as the lazy
636 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
638 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
639 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
640 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
641 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
644 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
645 linkage table look like this. They have the same size as the lazy
648 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
650 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
651 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
652 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
653 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
656 /* .eh_frame covering the lazy .plt section. */
658 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
660 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
661 0, 0, 0, 0, /* CIE ID */
663 'z', 'R', 0, /* Augmentation string */
664 1, /* Code alignment factor */
665 0x78, /* Data alignment factor */
666 16, /* Return address column */
667 1, /* Augmentation size */
668 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
669 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
670 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
671 DW_CFA_nop, DW_CFA_nop,
673 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
674 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
675 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
676 0, 0, 0, 0, /* .plt size goes here */
677 0, /* Augmentation size */
678 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
679 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
680 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
681 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
682 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
683 11, /* Block length */
684 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
685 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
686 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
687 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
688 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
691 /* .eh_frame covering the lazy BND .plt section. */
693 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
695 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
696 0, 0, 0, 0, /* CIE ID */
698 'z', 'R', 0, /* Augmentation string */
699 1, /* Code alignment factor */
700 0x78, /* Data alignment factor */
701 16, /* Return address column */
702 1, /* Augmentation size */
703 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
704 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
705 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
706 DW_CFA_nop, DW_CFA_nop,
708 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
709 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
710 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
711 0, 0, 0, 0, /* .plt size goes here */
712 0, /* Augmentation size */
713 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
714 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
715 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
716 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
717 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
718 11, /* Block length */
719 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
720 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
721 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
722 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
723 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
726 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
728 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
730 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
731 0, 0, 0, 0, /* CIE ID */
733 'z', 'R', 0, /* Augmentation string */
734 1, /* Code alignment factor */
735 0x78, /* Data alignment factor */
736 16, /* Return address column */
737 1, /* Augmentation size */
738 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
739 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
740 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
741 DW_CFA_nop, DW_CFA_nop,
743 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
744 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
745 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
746 0, 0, 0, 0, /* .plt size goes here */
747 0, /* Augmentation size */
748 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
749 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
750 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
751 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
752 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
753 11, /* Block length */
754 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
755 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
756 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
757 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
758 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
761 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
763 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
765 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
766 0, 0, 0, 0, /* CIE ID */
768 'z', 'R', 0, /* Augmentation string */
769 1, /* Code alignment factor */
770 0x78, /* Data alignment factor */
771 16, /* Return address column */
772 1, /* Augmentation size */
773 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
774 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
775 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
776 DW_CFA_nop, DW_CFA_nop,
778 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
779 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
780 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
781 0, 0, 0, 0, /* .plt size goes here */
782 0, /* Augmentation size */
783 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
784 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
785 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
786 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
787 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
788 11, /* Block length */
789 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
790 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
791 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
792 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
793 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
796 /* .eh_frame covering the non-lazy .plt section. */
798 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
800 #define PLT_GOT_FDE_LENGTH 20
801 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
802 0, 0, 0, 0, /* CIE ID */
804 'z', 'R', 0, /* Augmentation string */
805 1, /* Code alignment factor */
806 0x78, /* Data alignment factor */
807 16, /* Return address column */
808 1, /* Augmentation size */
809 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
810 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
811 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
812 DW_CFA_nop, DW_CFA_nop,
814 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
815 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
816 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
817 0, 0, 0, 0, /* non-lazy .plt size goes here */
818 0, /* Augmentation size */
819 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
820 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
823 /* These are the standard parameters. */
824 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
826 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
827 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
828 elf_x86_64_lazy_plt_entry, /* plt_entry */
829 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
830 2, /* plt0_got1_offset */
831 8, /* plt0_got2_offset */
832 12, /* plt0_got2_insn_end */
833 2, /* plt_got_offset */
834 7, /* plt_reloc_offset */
835 12, /* plt_plt_offset */
836 6, /* plt_got_insn_size */
837 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
838 6, /* plt_lazy_offset */
839 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
840 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
841 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
842 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
845 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
847 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
848 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
849 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
850 2, /* plt_got_offset */
851 6, /* plt_got_insn_size */
852 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
853 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
856 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
858 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
859 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
860 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
861 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
862 2, /* plt0_got1_offset */
863 1+8, /* plt0_got2_offset */
864 1+12, /* plt0_got2_insn_end */
865 1+2, /* plt_got_offset */
866 1, /* plt_reloc_offset */
867 7, /* plt_plt_offset */
868 1+6, /* plt_got_insn_size */
869 11, /* plt_plt_insn_end */
870 0, /* plt_lazy_offset */
871 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
872 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
873 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
874 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
877 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
879 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
880 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
881 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
882 1+2, /* plt_got_offset */
883 1+6, /* plt_got_insn_size */
884 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
885 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
888 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
890 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
891 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
892 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
893 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
894 2, /* plt0_got1_offset */
895 1+8, /* plt0_got2_offset */
896 1+12, /* plt0_got2_insn_end */
897 4+1+2, /* plt_got_offset */
898 4+1, /* plt_reloc_offset */
899 4+1+6, /* plt_plt_offset */
900 4+1+6, /* plt_got_insn_size */
901 4+1+5+5, /* plt_plt_insn_end */
902 0, /* plt_lazy_offset */
903 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
904 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
905 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
906 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
909 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
911 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
912 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
913 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
914 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
915 2, /* plt0_got1_offset */
916 8, /* plt0_got2_offset */
917 12, /* plt0_got2_insn_end */
918 4+2, /* plt_got_offset */
919 4+1, /* plt_reloc_offset */
920 4+6, /* plt_plt_offset */
921 4+6, /* plt_got_insn_size */
922 4+5+5, /* plt_plt_insn_end */
923 0, /* plt_lazy_offset */
924 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
925 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
926 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
927 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
930 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
932 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
933 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
934 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
935 4+1+2, /* plt_got_offset */
936 4+1+6, /* plt_got_insn_size */
937 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
938 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
941 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
943 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
944 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
945 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
946 4+2, /* plt_got_offset */
947 4+6, /* plt_got_insn_size */
948 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
949 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
952 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
957 #define elf_backend_arch_data &elf_x86_64_arch_bed
960 elf64_x86_64_elf_object_p (bfd *abfd)
962 /* Set the right machine number for an x86-64 elf64 file. */
963 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
968 elf32_x86_64_elf_object_p (bfd *abfd)
970 /* Set the right machine number for an x86-64 elf32 file. */
971 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
975 /* Return TRUE if the TLS access code sequence support transition
979 elf_x86_64_check_tls_transition (bfd *abfd,
980 struct bfd_link_info *info,
983 Elf_Internal_Shdr *symtab_hdr,
984 struct elf_link_hash_entry **sym_hashes,
986 const Elf_Internal_Rela *rel,
987 const Elf_Internal_Rela *relend)
990 unsigned long r_symndx;
991 bfd_boolean largepic = FALSE;
992 struct elf_link_hash_entry *h;
994 struct elf_x86_link_hash_table *htab;
996 bfd_boolean indirect_call;
998 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
999 offset = rel->r_offset;
1002 case R_X86_64_TLSGD:
1003 case R_X86_64_TLSLD:
1004 if ((rel + 1) >= relend)
1007 if (r_type == R_X86_64_TLSGD)
1009 /* Check transition from GD access model. For 64bit, only
1010 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1011 .word 0x6666; rex64; call __tls_get_addr@PLT
1013 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1015 call *__tls_get_addr@GOTPCREL(%rip)
1016 which may be converted to
1017 addr32 call __tls_get_addr
1018 can transit to different access model. For 32bit, only
1019 leaq foo@tlsgd(%rip), %rdi
1020 .word 0x6666; rex64; call __tls_get_addr@PLT
1022 leaq foo@tlsgd(%rip), %rdi
1024 call *__tls_get_addr@GOTPCREL(%rip)
1025 which may be converted to
1026 addr32 call __tls_get_addr
1027 can transit to different access model. For largepic,
1029 leaq foo@tlsgd(%rip), %rdi
1030 movabsq $__tls_get_addr@pltoff, %rax
1034 leaq foo@tlsgd(%rip), %rdi
1035 movabsq $__tls_get_addr@pltoff, %rax
1039 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1041 if ((offset + 12) > sec->size)
1044 call = contents + offset + 4;
1046 || !((call[1] == 0x48
1054 && call[3] == 0xe8)))
1056 if (!ABI_64_P (abfd)
1057 || (offset + 19) > sec->size
1059 || memcmp (call - 7, leaq + 1, 3) != 0
1060 || memcmp (call, "\x48\xb8", 2) != 0
1064 || !((call[10] == 0x48 && call[12] == 0xd8)
1065 || (call[10] == 0x4c && call[12] == 0xf8)))
1069 else if (ABI_64_P (abfd))
1072 || memcmp (contents + offset - 4, leaq, 4) != 0)
1078 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1081 indirect_call = call[2] == 0xff;
1085 /* Check transition from LD access model. Only
1086 leaq foo@tlsld(%rip), %rdi;
1087 call __tls_get_addr@PLT
1089 leaq foo@tlsld(%rip), %rdi;
1090 call *__tls_get_addr@GOTPCREL(%rip)
1091 which may be converted to
1092 addr32 call __tls_get_addr
1093 can transit to different access model. For largepic
1095 leaq foo@tlsld(%rip), %rdi
1096 movabsq $__tls_get_addr@pltoff, %rax
1100 leaq foo@tlsld(%rip), %rdi
1101 movabsq $__tls_get_addr@pltoff, %rax
1105 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1107 if (offset < 3 || (offset + 9) > sec->size)
1110 if (memcmp (contents + offset - 3, lea, 3) != 0)
1113 call = contents + offset + 4;
1114 if (!(call[0] == 0xe8
1115 || (call[0] == 0xff && call[1] == 0x15)
1116 || (call[0] == 0x67 && call[1] == 0xe8)))
1118 if (!ABI_64_P (abfd)
1119 || (offset + 19) > sec->size
1120 || memcmp (call, "\x48\xb8", 2) != 0
1124 || !((call[10] == 0x48 && call[12] == 0xd8)
1125 || (call[10] == 0x4c && call[12] == 0xf8)))
1129 indirect_call = call[0] == 0xff;
1132 r_symndx = htab->r_sym (rel[1].r_info);
1133 if (r_symndx < symtab_hdr->sh_info)
1136 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1138 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1142 r_type = (ELF32_R_TYPE (rel[1].r_info)
1143 & ~R_X86_64_converted_reloc_bit);
1145 return r_type == R_X86_64_PLTOFF64;
1146 else if (indirect_call)
1147 return r_type == R_X86_64_GOTPCRELX;
1149 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1152 case R_X86_64_GOTTPOFF:
1153 /* Check transition from IE access model:
1154 mov foo@gottpoff(%rip), %reg
1155 add foo@gottpoff(%rip), %reg
1158 /* Check REX prefix first. */
1159 if (offset >= 3 && (offset + 4) <= sec->size)
1161 val = bfd_get_8 (abfd, contents + offset - 3);
1162 if (val != 0x48 && val != 0x4c)
1164 /* X32 may have 0x44 REX prefix or no REX prefix. */
1165 if (ABI_64_P (abfd))
1171 /* X32 may not have any REX prefix. */
1172 if (ABI_64_P (abfd))
1174 if (offset < 2 || (offset + 3) > sec->size)
1178 val = bfd_get_8 (abfd, contents + offset - 2);
1179 if (val != 0x8b && val != 0x03)
1182 val = bfd_get_8 (abfd, contents + offset - 1);
1183 return (val & 0xc7) == 5;
1185 case R_X86_64_GOTPC32_TLSDESC:
1186 /* Check transition from GDesc access model:
1187 leaq x@tlsdesc(%rip), %rax
1189 Make sure it's a leaq adding rip to a 32-bit offset
1190 into any register, although it's probably almost always
1193 if (offset < 3 || (offset + 4) > sec->size)
1196 val = bfd_get_8 (abfd, contents + offset - 3);
1197 if ((val & 0xfb) != 0x48)
1200 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1203 val = bfd_get_8 (abfd, contents + offset - 1);
1204 return (val & 0xc7) == 0x05;
1206 case R_X86_64_TLSDESC_CALL:
1207 /* Check transition from GDesc access model:
1208 call *x@tlsdesc(%rax)
1210 if (offset + 2 <= sec->size)
1212 /* Make sure that it's a call *x@tlsdesc(%rax). */
1213 call = contents + offset;
1214 return call[0] == 0xff && call[1] == 0x10;
1224 /* Return TRUE if the TLS access transition is OK or no transition
1225 will be performed. Update R_TYPE if there is a transition. */
1228 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1229 asection *sec, bfd_byte *contents,
1230 Elf_Internal_Shdr *symtab_hdr,
1231 struct elf_link_hash_entry **sym_hashes,
1232 unsigned int *r_type, int tls_type,
1233 const Elf_Internal_Rela *rel,
1234 const Elf_Internal_Rela *relend,
1235 struct elf_link_hash_entry *h,
1236 unsigned long r_symndx,
1237 bfd_boolean from_relocate_section)
1239 unsigned int from_type = *r_type;
1240 unsigned int to_type = from_type;
1241 bfd_boolean check = TRUE;
1243 /* Skip TLS transition for functions. */
1245 && (h->type == STT_FUNC
1246 || h->type == STT_GNU_IFUNC))
1251 case R_X86_64_TLSGD:
1252 case R_X86_64_GOTPC32_TLSDESC:
1253 case R_X86_64_TLSDESC_CALL:
1254 case R_X86_64_GOTTPOFF:
1255 if (bfd_link_executable (info))
1258 to_type = R_X86_64_TPOFF32;
1260 to_type = R_X86_64_GOTTPOFF;
1263 /* When we are called from elf_x86_64_relocate_section, there may
1264 be additional transitions based on TLS_TYPE. */
1265 if (from_relocate_section)
1267 unsigned int new_to_type = to_type;
1269 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1270 new_to_type = R_X86_64_TPOFF32;
1272 if (to_type == R_X86_64_TLSGD
1273 || to_type == R_X86_64_GOTPC32_TLSDESC
1274 || to_type == R_X86_64_TLSDESC_CALL)
1276 if (tls_type == GOT_TLS_IE)
1277 new_to_type = R_X86_64_GOTTPOFF;
1280 /* We checked the transition before when we were called from
1281 elf_x86_64_check_relocs. We only want to check the new
1282 transition which hasn't been checked before. */
1283 check = new_to_type != to_type && from_type == to_type;
1284 to_type = new_to_type;
1289 case R_X86_64_TLSLD:
1290 if (bfd_link_executable (info))
1291 to_type = R_X86_64_TPOFF32;
1298 /* Return TRUE if there is no transition. */
1299 if (from_type == to_type)
1302 /* Check if the transition can be performed. */
1304 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1305 symtab_hdr, sym_hashes,
1306 from_type, rel, relend))
1308 reloc_howto_type *from, *to;
1311 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1312 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1315 name = h->root.root.string;
1318 struct elf_x86_link_hash_table *htab;
1320 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1325 Elf_Internal_Sym *isym;
1327 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1329 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1334 /* xgettext:c-format */
1335 (_("%B: TLS transition from %s to %s against `%s' at %#Lx "
1336 "in section `%A' failed"),
1337 abfd, from->name, to->name, name, rel->r_offset, sec);
1338 bfd_set_error (bfd_error_bad_value);
1346 /* Rename some of the generic section flags to better document how they
1348 #define check_relocs_failed sec_flg0
1351 elf_x86_64_need_pic (struct bfd_link_info *info,
1352 bfd *input_bfd, asection *sec,
1353 struct elf_link_hash_entry *h,
1354 Elf_Internal_Shdr *symtab_hdr,
1355 Elf_Internal_Sym *isym,
1356 reloc_howto_type *howto)
1359 const char *und = "";
1360 const char *pic = "";
1366 name = h->root.root.string;
1367 switch (ELF_ST_VISIBILITY (h->other))
1370 v = _("hidden symbol ");
1373 v = _("internal symbol ");
1376 v = _("protected symbol ");
1379 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1380 v = _("protected symbol ");
1383 pic = _("; recompile with -fPIC");
1387 if (!h->def_regular && !h->def_dynamic)
1388 und = _("undefined ");
1392 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1393 pic = _("; recompile with -fPIC");
1396 if (bfd_link_dll (info))
1397 object = _("a shared object");
1398 else if (bfd_link_pie (info))
1399 object = _("a PIE object");
1401 object = _("a PDE object");
1403 /* xgettext:c-format */
1404 _bfd_error_handler (_("%B: relocation %s against %s%s`%s' can "
1405 "not be used when making %s%s"),
1406 input_bfd, howto->name, und, v, name,
1408 bfd_set_error (bfd_error_bad_value);
1409 sec->check_relocs_failed = 1;
1413 /* With the local symbol, foo, we convert
1414 mov foo@GOTPCREL(%rip), %reg
1418 call/jmp *foo@GOTPCREL(%rip)
1420 nop call foo/jmp foo nop
1421 When PIC is false, convert
1422 test %reg, foo@GOTPCREL(%rip)
1426 binop foo@GOTPCREL(%rip), %reg
1429 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1433 elf_x86_64_convert_load_reloc (bfd *abfd,
1435 unsigned int *r_type_p,
1436 Elf_Internal_Rela *irel,
1437 struct elf_link_hash_entry *h,
1438 bfd_boolean *converted,
1439 struct bfd_link_info *link_info)
1441 struct elf_x86_link_hash_table *htab;
1443 bfd_boolean no_overflow;
1445 bfd_boolean to_reloc_pc32;
1447 bfd_signed_vma raddend;
1448 unsigned int opcode;
1450 unsigned int r_type = *r_type_p;
1451 unsigned int r_symndx;
1452 bfd_vma roff = irel->r_offset;
1454 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1457 raddend = irel->r_addend;
1458 /* Addend for 32-bit PC-relative relocation must be -4. */
1462 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1463 is_pic = bfd_link_pic (link_info);
1465 relocx = (r_type == R_X86_64_GOTPCRELX
1466 || r_type == R_X86_64_REX_GOTPCRELX);
1468 /* TRUE if --no-relax is used. */
1469 no_overflow = link_info->disable_target_specific_optimizations > 1;
1471 r_symndx = htab->r_sym (irel->r_info);
1473 opcode = bfd_get_8 (abfd, contents + roff - 2);
1475 /* Convert mov to lea since it has been done for a while. */
1478 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1479 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1480 test, xor instructions. */
1485 /* We convert only to R_X86_64_PC32:
1487 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1488 3. no_overflow is true.
1491 to_reloc_pc32 = (opcode == 0xff
1496 /* Get the symbol referred to by the reloc. */
1499 Elf_Internal_Sym *isym
1500 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1502 /* Skip relocation against undefined symbols. */
1503 if (isym->st_shndx == SHN_UNDEF)
1506 if (isym->st_shndx == SHN_ABS)
1507 tsec = bfd_abs_section_ptr;
1508 else if (isym->st_shndx == SHN_COMMON)
1509 tsec = bfd_com_section_ptr;
1510 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1511 tsec = &_bfd_elf_large_com_section;
1513 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1517 /* Undefined weak symbol is only bound locally in executable
1518 and its reference is resolved as 0 without relocation
1519 overflow. We can only perform this optimization for
1520 GOTPCRELX relocations since we need to modify REX byte.
1521 It is OK convert mov with R_X86_64_GOTPCREL to
1523 bfd_boolean local_ref;
1524 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1526 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1527 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1528 if ((relocx || opcode == 0x8b)
1529 && (h->root.type == bfd_link_hash_undefweak
1535 /* Skip for branch instructions since R_X86_64_PC32
1542 /* For non-branch instructions, we can convert to
1543 R_X86_64_32/R_X86_64_32S since we know if there
1545 to_reloc_pc32 = FALSE;
1548 /* Since we don't know the current PC when PIC is true,
1549 we can't convert to R_X86_64_PC32. */
1550 if (to_reloc_pc32 && is_pic)
1555 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1556 ld.so may use its link-time address. */
1557 else if (h->start_stop
1560 || h->root.type == bfd_link_hash_defined
1561 || h->root.type == bfd_link_hash_defweak)
1562 && h != htab->elf.hdynamic
1565 /* bfd_link_hash_new or bfd_link_hash_undefined is
1566 set by an assignment in a linker script in
1567 bfd_elf_record_link_assignment. start_stop is set
1568 on __start_SECNAME/__stop_SECNAME which mark section
1573 && (h->root.type == bfd_link_hash_new
1574 || h->root.type == bfd_link_hash_undefined
1575 || ((h->root.type == bfd_link_hash_defined
1576 || h->root.type == bfd_link_hash_defweak)
1577 && h->root.u.def.section == bfd_und_section_ptr))))
1579 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1584 tsec = h->root.u.def.section;
1590 /* Don't convert GOTPCREL relocation against large section. */
1591 if (elf_section_data (tsec) != NULL
1592 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1595 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1602 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1607 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1609 modrm = bfd_get_8 (abfd, contents + roff - 1);
1612 /* Convert to "jmp foo nop". */
1615 nop_offset = irel->r_offset + 3;
1616 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1617 irel->r_offset -= 1;
1618 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1622 struct elf_x86_link_hash_entry *eh
1623 = (struct elf_x86_link_hash_entry *) h;
1625 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1628 /* To support TLS optimization, always use addr32 prefix for
1629 "call *__tls_get_addr@GOTPCREL(%rip)". */
1630 if (eh && eh->tls_get_addr)
1633 nop_offset = irel->r_offset - 2;
1637 nop = link_info->call_nop_byte;
1638 if (link_info->call_nop_as_suffix)
1640 nop_offset = irel->r_offset + 3;
1641 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1642 irel->r_offset -= 1;
1643 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1646 nop_offset = irel->r_offset - 2;
1649 bfd_put_8 (abfd, nop, contents + nop_offset);
1650 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1651 r_type = R_X86_64_PC32;
1656 unsigned int rex_mask = REX_R;
1658 if (r_type == R_X86_64_REX_GOTPCRELX)
1659 rex = bfd_get_8 (abfd, contents + roff - 3);
1667 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1668 "lea foo(%rip), %reg". */
1670 r_type = R_X86_64_PC32;
1674 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1675 "mov $foo, %reg". */
1677 modrm = bfd_get_8 (abfd, contents + roff - 1);
1678 modrm = 0xc0 | (modrm & 0x38) >> 3;
1679 if ((rex & REX_W) != 0
1680 && ABI_64_P (link_info->output_bfd))
1682 /* Keep the REX_W bit in REX byte for LP64. */
1683 r_type = R_X86_64_32S;
1684 goto rewrite_modrm_rex;
1688 /* If the REX_W bit in REX byte isn't needed,
1689 use R_X86_64_32 and clear the W bit to avoid
1690 sign-extend imm32 to imm64. */
1691 r_type = R_X86_64_32;
1692 /* Clear the W bit in REX byte. */
1694 goto rewrite_modrm_rex;
1700 /* R_X86_64_PC32 isn't supported. */
1704 modrm = bfd_get_8 (abfd, contents + roff - 1);
1707 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1708 "test $foo, %reg". */
1709 modrm = 0xc0 | (modrm & 0x38) >> 3;
1714 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1715 "binop $foo, %reg". */
1716 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1720 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1721 overflow when sign-extending imm32 to imm64. */
1722 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1725 bfd_put_8 (abfd, modrm, contents + roff - 1);
1729 /* Move the R bit to the B bit in REX byte. */
1730 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1731 bfd_put_8 (abfd, rex, contents + roff - 3);
1734 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1738 bfd_put_8 (abfd, opcode, contents + roff - 2);
1742 irel->r_info = htab->r_info (r_symndx,
1743 r_type | R_X86_64_converted_reloc_bit);
1750 /* Look through the relocs for a section during the first phase, and
1751 calculate needed space in the global offset table, procedure
1752 linkage table, and dynamic reloc sections. */
1755 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1757 const Elf_Internal_Rela *relocs)
1759 struct elf_x86_link_hash_table *htab;
1760 Elf_Internal_Shdr *symtab_hdr;
1761 struct elf_link_hash_entry **sym_hashes;
1762 const Elf_Internal_Rela *rel;
1763 const Elf_Internal_Rela *rel_end;
1766 bfd_boolean converted;
1768 if (bfd_link_relocatable (info))
1771 /* Don't do anything special with non-loaded, non-alloced sections.
1772 In particular, any relocs in such sections should not affect GOT
1773 and PLT reference counting (ie. we don't allow them to create GOT
1774 or PLT entries), there's no possibility or desire to optimize TLS
1775 relocs, and there's not much point in propagating relocs to shared
1776 libs that the dynamic linker won't relocate. */
1777 if ((sec->flags & SEC_ALLOC) == 0)
1780 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1783 sec->check_relocs_failed = 1;
1787 BFD_ASSERT (is_x86_elf (abfd, htab));
1789 /* Get the section contents. */
1790 if (elf_section_data (sec)->this_hdr.contents != NULL)
1791 contents = elf_section_data (sec)->this_hdr.contents;
1792 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1794 sec->check_relocs_failed = 1;
1798 symtab_hdr = &elf_symtab_hdr (abfd);
1799 sym_hashes = elf_sym_hashes (abfd);
1805 rel_end = relocs + sec->reloc_count;
1806 for (rel = relocs; rel < rel_end; rel++)
1808 unsigned int r_type;
1809 unsigned int r_symndx;
1810 struct elf_link_hash_entry *h;
1811 struct elf_x86_link_hash_entry *eh;
1812 Elf_Internal_Sym *isym;
1814 bfd_boolean size_reloc;
1815 bfd_boolean converted_reloc;
1817 r_symndx = htab->r_sym (rel->r_info);
1818 r_type = ELF32_R_TYPE (rel->r_info);
1820 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1822 /* xgettext:c-format */
1823 _bfd_error_handler (_("%B: bad symbol index: %d"),
1828 if (r_symndx < symtab_hdr->sh_info)
1830 /* A local symbol. */
1831 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1836 /* Check relocation against local STT_GNU_IFUNC symbol. */
1837 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1839 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1844 /* Fake a STT_GNU_IFUNC symbol. */
1845 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1847 h->type = STT_GNU_IFUNC;
1850 h->forced_local = 1;
1851 h->root.type = bfd_link_hash_defined;
1859 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1860 while (h->root.type == bfd_link_hash_indirect
1861 || h->root.type == bfd_link_hash_warning)
1862 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1865 /* Check invalid x32 relocations. */
1866 if (!ABI_64_P (abfd))
1872 case R_X86_64_DTPOFF64:
1873 case R_X86_64_TPOFF64:
1875 case R_X86_64_GOTOFF64:
1876 case R_X86_64_GOT64:
1877 case R_X86_64_GOTPCREL64:
1878 case R_X86_64_GOTPC64:
1879 case R_X86_64_GOTPLT64:
1880 case R_X86_64_PLTOFF64:
1883 name = h->root.root.string;
1885 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1888 /* xgettext:c-format */
1889 (_("%B: relocation %s against symbol `%s' isn't "
1890 "supported in x32 mode"), abfd,
1891 x86_64_elf_howto_table[r_type].name, name);
1892 bfd_set_error (bfd_error_bad_value);
1900 /* It is referenced by a non-shared object. */
1903 if (h->type == STT_GNU_IFUNC)
1904 elf_tdata (info->output_bfd)->has_gnu_symbols
1905 |= elf_gnu_symbol_ifunc;
1908 converted_reloc = FALSE;
1909 if ((r_type == R_X86_64_GOTPCREL
1910 || r_type == R_X86_64_GOTPCRELX
1911 || r_type == R_X86_64_REX_GOTPCRELX)
1912 && (h == NULL || h->type != STT_GNU_IFUNC))
1914 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1915 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1916 irel, h, &converted_reloc,
1920 if (converted_reloc)
1924 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1925 symtab_hdr, sym_hashes,
1926 &r_type, GOT_UNKNOWN,
1927 rel, rel_end, h, r_symndx, FALSE))
1930 eh = (struct elf_x86_link_hash_entry *) h;
1933 case R_X86_64_TLSLD:
1934 htab->tls_ld_or_ldm_got.refcount = 1;
1937 case R_X86_64_TPOFF32:
1938 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1939 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1940 &x86_64_elf_howto_table[r_type]);
1942 eh->zero_undefweak &= 0x2;
1945 case R_X86_64_GOTTPOFF:
1946 if (!bfd_link_executable (info))
1947 info->flags |= DF_STATIC_TLS;
1950 case R_X86_64_GOT32:
1951 case R_X86_64_GOTPCREL:
1952 case R_X86_64_GOTPCRELX:
1953 case R_X86_64_REX_GOTPCRELX:
1954 case R_X86_64_TLSGD:
1955 case R_X86_64_GOT64:
1956 case R_X86_64_GOTPCREL64:
1957 case R_X86_64_GOTPLT64:
1958 case R_X86_64_GOTPC32_TLSDESC:
1959 case R_X86_64_TLSDESC_CALL:
1960 /* This symbol requires a global offset table entry. */
1962 int tls_type, old_tls_type;
1966 default: tls_type = GOT_NORMAL; break;
1967 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1968 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1969 case R_X86_64_GOTPC32_TLSDESC:
1970 case R_X86_64_TLSDESC_CALL:
1971 tls_type = GOT_TLS_GDESC; break;
1976 h->got.refcount = 1;
1977 old_tls_type = eh->tls_type;
1981 bfd_signed_vma *local_got_refcounts;
1983 /* This is a global offset table entry for a local symbol. */
1984 local_got_refcounts = elf_local_got_refcounts (abfd);
1985 if (local_got_refcounts == NULL)
1989 size = symtab_hdr->sh_info;
1990 size *= sizeof (bfd_signed_vma)
1991 + sizeof (bfd_vma) + sizeof (char);
1992 local_got_refcounts = ((bfd_signed_vma *)
1993 bfd_zalloc (abfd, size));
1994 if (local_got_refcounts == NULL)
1996 elf_local_got_refcounts (abfd) = local_got_refcounts;
1997 elf_x86_local_tlsdesc_gotent (abfd)
1998 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
1999 elf_x86_local_got_tls_type (abfd)
2000 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2002 local_got_refcounts[r_symndx] = 1;
2004 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2007 /* If a TLS symbol is accessed using IE at least once,
2008 there is no point to use dynamic model for it. */
2009 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2010 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2011 || tls_type != GOT_TLS_IE))
2013 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2014 tls_type = old_tls_type;
2015 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2016 && GOT_TLS_GD_ANY_P (tls_type))
2017 tls_type |= old_tls_type;
2021 name = h->root.root.string;
2023 name = bfd_elf_sym_name (abfd, symtab_hdr,
2026 /* xgettext:c-format */
2027 (_("%B: '%s' accessed both as normal and"
2028 " thread local symbol"),
2030 bfd_set_error (bfd_error_bad_value);
2035 if (old_tls_type != tls_type)
2038 eh->tls_type = tls_type;
2040 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2045 case R_X86_64_GOTOFF64:
2046 case R_X86_64_GOTPC32:
2047 case R_X86_64_GOTPC64:
2050 eh->zero_undefweak &= 0x2;
2053 case R_X86_64_PLT32:
2054 case R_X86_64_PLT32_BND:
2055 /* This symbol requires a procedure linkage table entry. We
2056 actually build the entry in adjust_dynamic_symbol,
2057 because this might be a case of linking PIC code which is
2058 never referenced by a dynamic object, in which case we
2059 don't need to generate a procedure linkage table entry
2062 /* If this is a local symbol, we resolve it directly without
2063 creating a procedure linkage table entry. */
2067 eh->zero_undefweak &= 0x2;
2069 h->plt.refcount = 1;
2072 case R_X86_64_PLTOFF64:
2073 /* This tries to form the 'address' of a function relative
2074 to GOT. For global symbols we need a PLT entry. */
2078 h->plt.refcount = 1;
2082 case R_X86_64_SIZE32:
2083 case R_X86_64_SIZE64:
2088 if (!ABI_64_P (abfd))
2094 /* Check relocation overflow as these relocs may lead to
2095 run-time relocation overflow. Don't error out for
2096 sections we don't care about, such as debug sections or
2097 when relocation overflow check is disabled. */
2098 if (!info->no_reloc_overflow_check
2100 && (bfd_link_pic (info)
2101 || (bfd_link_executable (info)
2105 && (sec->flags & SEC_READONLY) == 0)))
2106 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2107 &x86_64_elf_howto_table[r_type]);
2113 case R_X86_64_PC32_BND:
2117 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2118 eh->zero_undefweak |= 0x2;
2119 /* We are called after all symbols have been resolved. Only
2120 relocation against STT_GNU_IFUNC symbol must go through
2123 && (bfd_link_executable (info)
2124 || h->type == STT_GNU_IFUNC))
2126 bfd_boolean func_pointer_ref = FALSE;
2128 if (r_type == R_X86_64_PC32)
2130 /* Since something like ".long foo - ." may be used
2131 as pointer, make sure that PLT is used if foo is
2132 a function defined in a shared library. */
2133 if ((sec->flags & SEC_CODE) == 0)
2134 h->pointer_equality_needed = 1;
2136 else if (r_type != R_X86_64_PC32_BND
2137 && r_type != R_X86_64_PC64)
2139 h->pointer_equality_needed = 1;
2140 /* At run-time, R_X86_64_64 can be resolved for both
2141 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2142 can only be resolved for x32. */
2143 if ((sec->flags & SEC_READONLY) == 0
2144 && (r_type == R_X86_64_64
2145 || (!ABI_64_P (abfd)
2146 && (r_type == R_X86_64_32
2147 || r_type == R_X86_64_32S))))
2148 func_pointer_ref = TRUE;
2151 if (!func_pointer_ref)
2153 /* If this reloc is in a read-only section, we might
2154 need a copy reloc. We can't check reliably at this
2155 stage whether the section is read-only, as input
2156 sections have not yet been mapped to output sections.
2157 Tentatively set the flag for now, and correct in
2158 adjust_dynamic_symbol. */
2161 /* We may need a .plt entry if the symbol is a function
2162 defined in a shared lib or is a function referenced
2163 from the code or read-only section. */
2165 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2166 h->plt.refcount = 1;
2172 if (NEED_DYNAMIC_RELOCATION_P (info, h, sec, r_type,
2173 htab->pointer_r_type))
2175 struct elf_dyn_relocs *p;
2176 struct elf_dyn_relocs **head;
2178 /* We must copy these reloc types into the output file.
2179 Create a reloc section in dynobj and make room for
2183 sreloc = _bfd_elf_make_dynamic_reloc_section
2184 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2185 abfd, /*rela?*/ TRUE);
2191 /* If this is a global symbol, we count the number of
2192 relocations we need for this symbol. */
2194 head = &eh->dyn_relocs;
2197 /* Track dynamic relocs needed for local syms too.
2198 We really need local syms available to do this
2203 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2208 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2212 /* Beware of type punned pointers vs strict aliasing
2214 vpp = &(elf_section_data (s)->local_dynrel);
2215 head = (struct elf_dyn_relocs **)vpp;
2219 if (p == NULL || p->sec != sec)
2221 bfd_size_type amt = sizeof *p;
2223 p = ((struct elf_dyn_relocs *)
2224 bfd_alloc (htab->elf.dynobj, amt));
2235 /* Count size relocation as PC-relative relocation. */
2236 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2241 /* This relocation describes the C++ object vtable hierarchy.
2242 Reconstruct it for later use during GC. */
2243 case R_X86_64_GNU_VTINHERIT:
2244 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2248 /* This relocation describes which C++ vtable entries are actually
2249 used. Record for later use during GC. */
2250 case R_X86_64_GNU_VTENTRY:
2251 BFD_ASSERT (h != NULL);
2253 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2262 if (elf_section_data (sec)->this_hdr.contents != contents)
2264 if (!converted && !info->keep_memory)
2268 /* Cache the section contents for elf_link_input_bfd if any
2269 load is converted or --no-keep-memory isn't used. */
2270 elf_section_data (sec)->this_hdr.contents = contents;
2274 /* Cache relocations if any load is converted. */
2275 if (elf_section_data (sec)->relocs != relocs && converted)
2276 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2281 if (elf_section_data (sec)->this_hdr.contents != contents)
2283 sec->check_relocs_failed = 1;
2287 /* Return the relocation value for @tpoff relocation
2288 if STT_TLS virtual address is ADDRESS. */
2291 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2293 struct elf_link_hash_table *htab = elf_hash_table (info);
2294 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2295 bfd_vma static_tls_size;
2297 /* If tls_segment is NULL, we should have signalled an error already. */
2298 if (htab->tls_sec == NULL)
2301 /* Consider special static TLS alignment requirements. */
2302 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2303 return address - static_tls_size - htab->tls_sec->vma;
2306 /* Is the instruction before OFFSET in CONTENTS a 32bit relative
2310 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset)
2312 /* Opcode Instruction
2315 0x0f 0x8x conditional jump */
2317 && (contents [offset - 1] == 0xe8
2318 || contents [offset - 1] == 0xe9))
2320 && contents [offset - 2] == 0x0f
2321 && (contents [offset - 1] & 0xf0) == 0x80));
2324 /* Relocate an x86_64 ELF section. */
2327 elf_x86_64_relocate_section (bfd *output_bfd,
2328 struct bfd_link_info *info,
2330 asection *input_section,
2332 Elf_Internal_Rela *relocs,
2333 Elf_Internal_Sym *local_syms,
2334 asection **local_sections)
2336 struct elf_x86_link_hash_table *htab;
2337 Elf_Internal_Shdr *symtab_hdr;
2338 struct elf_link_hash_entry **sym_hashes;
2339 bfd_vma *local_got_offsets;
2340 bfd_vma *local_tlsdesc_gotents;
2341 Elf_Internal_Rela *rel;
2342 Elf_Internal_Rela *wrel;
2343 Elf_Internal_Rela *relend;
2344 unsigned int plt_entry_size;
2346 /* Skip if check_relocs failed. */
2347 if (input_section->check_relocs_failed)
2350 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2354 BFD_ASSERT (is_x86_elf (input_bfd, htab));
2356 plt_entry_size = htab->plt.plt_entry_size;
2357 symtab_hdr = &elf_symtab_hdr (input_bfd);
2358 sym_hashes = elf_sym_hashes (input_bfd);
2359 local_got_offsets = elf_local_got_offsets (input_bfd);
2360 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2362 _bfd_x86_elf_set_tls_module_base (info);
2364 rel = wrel = relocs;
2365 relend = relocs + input_section->reloc_count;
2366 for (; rel < relend; wrel++, rel++)
2368 unsigned int r_type, r_type_tls;
2369 reloc_howto_type *howto;
2370 unsigned long r_symndx;
2371 struct elf_link_hash_entry *h;
2372 struct elf_x86_link_hash_entry *eh;
2373 Elf_Internal_Sym *sym;
2375 bfd_vma off, offplt, plt_offset;
2377 bfd_boolean unresolved_reloc;
2378 bfd_reloc_status_type r;
2380 asection *base_got, *resolved_plt;
2382 bfd_boolean resolved_to_zero;
2383 bfd_boolean relative_reloc;
2384 bfd_boolean converted_reloc;
2385 bfd_boolean need_copy_reloc_in_pie;
2387 r_type = ELF32_R_TYPE (rel->r_info);
2388 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2389 || r_type == (int) R_X86_64_GNU_VTENTRY)
2396 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2397 r_type &= ~R_X86_64_converted_reloc_bit;
2399 if (r_type >= (int) R_X86_64_standard)
2400 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2402 if (r_type != (int) R_X86_64_32
2403 || ABI_64_P (output_bfd))
2404 howto = x86_64_elf_howto_table + r_type;
2406 howto = (x86_64_elf_howto_table
2407 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
2408 r_symndx = htab->r_sym (rel->r_info);
2412 unresolved_reloc = FALSE;
2413 if (r_symndx < symtab_hdr->sh_info)
2415 sym = local_syms + r_symndx;
2416 sec = local_sections[r_symndx];
2418 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2420 st_size = sym->st_size;
2422 /* Relocate against local STT_GNU_IFUNC symbol. */
2423 if (!bfd_link_relocatable (info)
2424 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2426 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2431 /* Set STT_GNU_IFUNC symbol value. */
2432 h->root.u.def.value = sym->st_value;
2433 h->root.u.def.section = sec;
2438 bfd_boolean warned ATTRIBUTE_UNUSED;
2439 bfd_boolean ignored ATTRIBUTE_UNUSED;
2441 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2442 r_symndx, symtab_hdr, sym_hashes,
2444 unresolved_reloc, warned, ignored);
2448 if (sec != NULL && discarded_section (sec))
2450 _bfd_clear_contents (howto, input_bfd, input_section,
2451 contents + rel->r_offset);
2452 wrel->r_offset = rel->r_offset;
2456 /* For ld -r, remove relocations in debug sections against
2457 sections defined in discarded sections. Not done for
2458 eh_frame editing code expects to be present. */
2459 if (bfd_link_relocatable (info)
2460 && (input_section->flags & SEC_DEBUGGING))
2466 if (bfd_link_relocatable (info))
2473 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2475 if (r_type == R_X86_64_64)
2477 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2478 zero-extend it to 64bit if addend is zero. */
2479 r_type = R_X86_64_32;
2480 memset (contents + rel->r_offset + 4, 0, 4);
2482 else if (r_type == R_X86_64_SIZE64)
2484 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2485 zero-extend it to 64bit if addend is zero. */
2486 r_type = R_X86_64_SIZE32;
2487 memset (contents + rel->r_offset + 4, 0, 4);
2491 eh = (struct elf_x86_link_hash_entry *) h;
2493 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2494 it here if it is defined in a non-shared object. */
2496 && h->type == STT_GNU_IFUNC
2502 if ((input_section->flags & SEC_ALLOC) == 0)
2504 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2505 sections because such sections are not SEC_ALLOC and
2506 thus ld.so will not process them. */
2507 if ((input_section->flags & SEC_DEBUGGING) != 0)
2517 case R_X86_64_GOTPCREL:
2518 case R_X86_64_GOTPCRELX:
2519 case R_X86_64_REX_GOTPCRELX:
2520 case R_X86_64_GOTPCREL64:
2521 base_got = htab->elf.sgot;
2522 off = h->got.offset;
2524 if (base_got == NULL)
2527 if (off == (bfd_vma) -1)
2529 /* We can't use h->got.offset here to save state, or
2530 even just remember the offset, as finish_dynamic_symbol
2531 would use that as offset into .got. */
2533 if (h->plt.offset == (bfd_vma) -1)
2536 if (htab->elf.splt != NULL)
2538 plt_index = (h->plt.offset / plt_entry_size
2539 - htab->plt.has_plt0);
2540 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2541 base_got = htab->elf.sgotplt;
2545 plt_index = h->plt.offset / plt_entry_size;
2546 off = plt_index * GOT_ENTRY_SIZE;
2547 base_got = htab->elf.igotplt;
2550 if (h->dynindx == -1
2554 /* This references the local defitionion. We must
2555 initialize this entry in the global offset table.
2556 Since the offset must always be a multiple of 8,
2557 we use the least significant bit to record
2558 whether we have initialized it already.
2560 When doing a dynamic link, we create a .rela.got
2561 relocation entry to initialize the value. This
2562 is done in the finish_dynamic_symbol routine. */
2567 bfd_put_64 (output_bfd, relocation,
2568 base_got->contents + off);
2569 /* Note that this is harmless for the GOTPLT64
2570 case, as -1 | 1 still is -1. */
2576 relocation = (base_got->output_section->vma
2577 + base_got->output_offset + off);
2582 if (h->plt.offset == (bfd_vma) -1)
2584 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2585 if (r_type == htab->pointer_r_type
2586 && (input_section->flags & SEC_CODE) == 0)
2587 goto do_ifunc_pointer;
2588 goto bad_ifunc_reloc;
2591 /* STT_GNU_IFUNC symbol must go through PLT. */
2592 if (htab->elf.splt != NULL)
2594 if (htab->plt_second != NULL)
2596 resolved_plt = htab->plt_second;
2597 plt_offset = eh->plt_second.offset;
2601 resolved_plt = htab->elf.splt;
2602 plt_offset = h->plt.offset;
2607 resolved_plt = htab->elf.iplt;
2608 plt_offset = h->plt.offset;
2611 relocation = (resolved_plt->output_section->vma
2612 + resolved_plt->output_offset + plt_offset);
2618 if (h->root.root.string)
2619 name = h->root.root.string;
2621 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2624 /* xgettext:c-format */
2625 (_("%B: relocation %s against STT_GNU_IFUNC "
2626 "symbol `%s' isn't supported"), input_bfd,
2628 bfd_set_error (bfd_error_bad_value);
2632 if (bfd_link_pic (info))
2637 if (ABI_64_P (output_bfd))
2642 if (rel->r_addend != 0)
2644 if (h->root.root.string)
2645 name = h->root.root.string;
2647 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2650 /* xgettext:c-format */
2651 (_("%B: relocation %s against STT_GNU_IFUNC "
2652 "symbol `%s' has non-zero addend: %Ld"),
2653 input_bfd, howto->name, name, rel->r_addend);
2654 bfd_set_error (bfd_error_bad_value);
2658 /* Generate dynamic relcoation only when there is a
2659 non-GOT reference in a shared object or there is no
2661 if ((bfd_link_pic (info) && h->non_got_ref)
2662 || h->plt.offset == (bfd_vma) -1)
2664 Elf_Internal_Rela outrel;
2667 /* Need a dynamic relocation to get the real function
2669 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2673 if (outrel.r_offset == (bfd_vma) -1
2674 || outrel.r_offset == (bfd_vma) -2)
2677 outrel.r_offset += (input_section->output_section->vma
2678 + input_section->output_offset);
2680 if (POINTER_LOCAL_IFUNC_P (info, h))
2682 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
2683 h->root.root.string,
2684 h->root.u.def.section->owner);
2686 /* This symbol is resolved locally. */
2687 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2688 outrel.r_addend = (h->root.u.def.value
2689 + h->root.u.def.section->output_section->vma
2690 + h->root.u.def.section->output_offset);
2694 outrel.r_info = htab->r_info (h->dynindx, r_type);
2695 outrel.r_addend = 0;
2698 /* Dynamic relocations are stored in
2699 1. .rela.ifunc section in PIC object.
2700 2. .rela.got section in dynamic executable.
2701 3. .rela.iplt section in static executable. */
2702 if (bfd_link_pic (info))
2703 sreloc = htab->elf.irelifunc;
2704 else if (htab->elf.splt != NULL)
2705 sreloc = htab->elf.srelgot;
2707 sreloc = htab->elf.irelplt;
2708 elf_append_rela (output_bfd, sreloc, &outrel);
2710 /* If this reloc is against an external symbol, we
2711 do not want to fiddle with the addend. Otherwise,
2712 we need to include the symbol value so that it
2713 becomes an addend for the dynamic reloc. For an
2714 internal symbol, we have updated addend. */
2719 case R_X86_64_PC32_BND:
2721 case R_X86_64_PLT32:
2722 case R_X86_64_PLT32_BND:
2727 resolved_to_zero = (eh != NULL
2728 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2730 /* When generating a shared object, the relocations handled here are
2731 copied into the output file to be resolved at run time. */
2734 case R_X86_64_GOT32:
2735 case R_X86_64_GOT64:
2736 /* Relocation is to the entry for this symbol in the global
2738 case R_X86_64_GOTPCREL:
2739 case R_X86_64_GOTPCRELX:
2740 case R_X86_64_REX_GOTPCRELX:
2741 case R_X86_64_GOTPCREL64:
2742 /* Use global offset table entry as symbol value. */
2743 case R_X86_64_GOTPLT64:
2744 /* This is obsolete and treated the same as GOT64. */
2745 base_got = htab->elf.sgot;
2747 if (htab->elf.sgot == NULL)
2750 relative_reloc = FALSE;
2753 off = h->got.offset;
2755 && h->plt.offset != (bfd_vma)-1
2756 && off == (bfd_vma)-1)
2758 /* We can't use h->got.offset here to save
2759 state, or even just remember the offset, as
2760 finish_dynamic_symbol would use that as offset into
2762 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2763 - htab->plt.has_plt0);
2764 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2765 base_got = htab->elf.sgotplt;
2768 if (RESOLVED_LOCALLY_P (info, h, htab))
2770 /* We must initialize this entry in the global offset
2771 table. Since the offset must always be a multiple
2772 of 8, we use the least significant bit to record
2773 whether we have initialized it already.
2775 When doing a dynamic link, we create a .rela.got
2776 relocation entry to initialize the value. This is
2777 done in the finish_dynamic_symbol routine. */
2782 bfd_put_64 (output_bfd, relocation,
2783 base_got->contents + off);
2784 /* Note that this is harmless for the GOTPLT64 case,
2785 as -1 | 1 still is -1. */
2788 if (GENERATE_RELATIVE_RELOC_P (info, h))
2790 /* If this symbol isn't dynamic in PIC,
2791 generate R_X86_64_RELATIVE here. */
2792 eh->no_finish_dynamic_symbol = 1;
2793 relative_reloc = TRUE;
2798 unresolved_reloc = FALSE;
2802 if (local_got_offsets == NULL)
2805 off = local_got_offsets[r_symndx];
2807 /* The offset must always be a multiple of 8. We use
2808 the least significant bit to record whether we have
2809 already generated the necessary reloc. */
2814 bfd_put_64 (output_bfd, relocation,
2815 base_got->contents + off);
2816 local_got_offsets[r_symndx] |= 1;
2818 if (bfd_link_pic (info))
2819 relative_reloc = TRUE;
2826 Elf_Internal_Rela outrel;
2828 /* We need to generate a R_X86_64_RELATIVE reloc
2829 for the dynamic linker. */
2830 s = htab->elf.srelgot;
2834 outrel.r_offset = (base_got->output_section->vma
2835 + base_got->output_offset
2837 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2838 outrel.r_addend = relocation;
2839 elf_append_rela (output_bfd, s, &outrel);
2842 if (off >= (bfd_vma) -2)
2845 relocation = base_got->output_section->vma
2846 + base_got->output_offset + off;
2847 if (r_type != R_X86_64_GOTPCREL
2848 && r_type != R_X86_64_GOTPCRELX
2849 && r_type != R_X86_64_REX_GOTPCRELX
2850 && r_type != R_X86_64_GOTPCREL64)
2851 relocation -= htab->elf.sgotplt->output_section->vma
2852 - htab->elf.sgotplt->output_offset;
2856 case R_X86_64_GOTOFF64:
2857 /* Relocation is relative to the start of the global offset
2860 /* Check to make sure it isn't a protected function or data
2861 symbol for shared library since it may not be local when
2862 used as function address or with copy relocation. We also
2863 need to make sure that a symbol is referenced locally. */
2864 if (bfd_link_pic (info) && h)
2866 if (!h->def_regular)
2870 switch (ELF_ST_VISIBILITY (h->other))
2873 v = _("hidden symbol");
2876 v = _("internal symbol");
2879 v = _("protected symbol");
2887 /* xgettext:c-format */
2888 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s"
2889 " `%s' can not be used when making a shared object"),
2890 input_bfd, v, h->root.root.string);
2891 bfd_set_error (bfd_error_bad_value);
2894 else if (!bfd_link_executable (info)
2895 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2896 && (h->type == STT_FUNC
2897 || h->type == STT_OBJECT)
2898 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2901 /* xgettext:c-format */
2902 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s"
2903 " `%s' can not be used when making a shared object"),
2905 h->type == STT_FUNC ? "function" : "data",
2906 h->root.root.string);
2907 bfd_set_error (bfd_error_bad_value);
2912 /* Note that sgot is not involved in this
2913 calculation. We always want the start of .got.plt. If we
2914 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2915 permitted by the ABI, we might have to change this
2917 relocation -= htab->elf.sgotplt->output_section->vma
2918 + htab->elf.sgotplt->output_offset;
2921 case R_X86_64_GOTPC32:
2922 case R_X86_64_GOTPC64:
2923 /* Use global offset table as symbol value. */
2924 relocation = htab->elf.sgotplt->output_section->vma
2925 + htab->elf.sgotplt->output_offset;
2926 unresolved_reloc = FALSE;
2929 case R_X86_64_PLTOFF64:
2930 /* Relocation is PLT entry relative to GOT. For local
2931 symbols it's the symbol itself relative to GOT. */
2933 /* See PLT32 handling. */
2934 && (h->plt.offset != (bfd_vma) -1
2935 || eh->plt_got.offset != (bfd_vma) -1)
2936 && htab->elf.splt != NULL)
2938 if (eh->plt_got.offset != (bfd_vma) -1)
2940 /* Use the GOT PLT. */
2941 resolved_plt = htab->plt_got;
2942 plt_offset = eh->plt_got.offset;
2944 else if (htab->plt_second != NULL)
2946 resolved_plt = htab->plt_second;
2947 plt_offset = eh->plt_second.offset;
2951 resolved_plt = htab->elf.splt;
2952 plt_offset = h->plt.offset;
2955 relocation = (resolved_plt->output_section->vma
2956 + resolved_plt->output_offset
2958 unresolved_reloc = FALSE;
2961 relocation -= htab->elf.sgotplt->output_section->vma
2962 + htab->elf.sgotplt->output_offset;
2965 case R_X86_64_PLT32:
2966 case R_X86_64_PLT32_BND:
2967 /* Relocation is to the entry for this symbol in the
2968 procedure linkage table. */
2970 /* Resolve a PLT32 reloc against a local symbol directly,
2971 without using the procedure linkage table. */
2975 if ((h->plt.offset == (bfd_vma) -1
2976 && eh->plt_got.offset == (bfd_vma) -1)
2977 || htab->elf.splt == NULL)
2979 /* We didn't make a PLT entry for this symbol. This
2980 happens when statically linking PIC code, or when
2981 using -Bsymbolic. */
2985 if (h->plt.offset != (bfd_vma) -1)
2987 if (htab->plt_second != NULL)
2989 resolved_plt = htab->plt_second;
2990 plt_offset = eh->plt_second.offset;
2994 resolved_plt = htab->elf.splt;
2995 plt_offset = h->plt.offset;
3000 /* Use the GOT PLT. */
3001 resolved_plt = htab->plt_got;
3002 plt_offset = eh->plt_got.offset;
3005 relocation = (resolved_plt->output_section->vma
3006 + resolved_plt->output_offset
3008 unresolved_reloc = FALSE;
3011 case R_X86_64_SIZE32:
3012 case R_X86_64_SIZE64:
3013 /* Set to symbol size. */
3014 relocation = st_size;
3020 case R_X86_64_PC32_BND:
3021 /* Don't complain about -fPIC if the symbol is undefined when
3022 building executable unless it is unresolved weak symbol or
3023 -z nocopyreloc is used. */
3024 if ((input_section->flags & SEC_ALLOC) != 0
3025 && (input_section->flags & SEC_READONLY) != 0
3027 && ((bfd_link_executable (info)
3028 && ((h->root.type == bfd_link_hash_undefweak
3029 && !resolved_to_zero)
3030 || ((info->nocopyreloc
3031 || (eh->def_protected
3032 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3034 && !(h->root.u.def.section->flags & SEC_CODE))))
3035 || bfd_link_dll (info)))
3037 bfd_boolean fail = FALSE;
3039 = ((r_type == R_X86_64_PC32
3040 || r_type == R_X86_64_PC32_BND)
3041 && is_32bit_relative_branch (contents, rel->r_offset));
3043 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3045 /* Symbol is referenced locally. Make sure it is
3046 defined locally or for a branch. */
3047 fail = (!(h->def_regular || ELF_COMMON_DEF_P (h))
3050 else if (!(bfd_link_pie (info)
3051 && (h->needs_copy || eh->needs_copy)))
3053 /* Symbol doesn't need copy reloc and isn't referenced
3054 locally. We only allow branch to symbol with
3055 non-default visibility. */
3057 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT);
3061 return elf_x86_64_need_pic (info, input_bfd, input_section,
3062 h, NULL, NULL, howto);
3071 /* FIXME: The ABI says the linker should make sure the value is
3072 the same when it's zeroextended to 64 bit. */
3075 if ((input_section->flags & SEC_ALLOC) == 0)
3078 need_copy_reloc_in_pie = (bfd_link_pie (info)
3083 == bfd_link_hash_undefined))
3084 && (X86_PCREL_TYPE_P (r_type)
3085 || X86_SIZE_TYPE_P (r_type)));
3087 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3088 need_copy_reloc_in_pie,
3089 resolved_to_zero, FALSE))
3091 Elf_Internal_Rela outrel;
3092 bfd_boolean skip, relocate;
3095 /* When generating a shared object, these relocations
3096 are copied into the output file to be resolved at run
3102 _bfd_elf_section_offset (output_bfd, info, input_section,
3104 if (outrel.r_offset == (bfd_vma) -1)
3106 else if (outrel.r_offset == (bfd_vma) -2)
3107 skip = TRUE, relocate = TRUE;
3109 outrel.r_offset += (input_section->output_section->vma
3110 + input_section->output_offset);
3113 memset (&outrel, 0, sizeof outrel);
3115 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3117 outrel.r_info = htab->r_info (h->dynindx, r_type);
3118 outrel.r_addend = rel->r_addend;
3122 /* This symbol is local, or marked to become local.
3123 When relocation overflow check is disabled, we
3124 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3125 if (r_type == htab->pointer_r_type
3126 || (r_type == R_X86_64_32
3127 && info->no_reloc_overflow_check))
3130 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3131 outrel.r_addend = relocation + rel->r_addend;
3133 else if (r_type == R_X86_64_64
3134 && !ABI_64_P (output_bfd))
3137 outrel.r_info = htab->r_info (0,
3138 R_X86_64_RELATIVE64);
3139 outrel.r_addend = relocation + rel->r_addend;
3140 /* Check addend overflow. */
3141 if ((outrel.r_addend & 0x80000000)
3142 != (rel->r_addend & 0x80000000))
3145 int addend = rel->r_addend;
3146 if (h && h->root.root.string)
3147 name = h->root.root.string;
3149 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3152 /* xgettext:c-format */
3153 (_("%B: addend %s%#x in relocation %s against "
3154 "symbol `%s' at %#Lx in section `%A' is "
3156 input_bfd, addend < 0 ? "-" : "", addend,
3157 howto->name, name, rel->r_offset, input_section);
3158 bfd_set_error (bfd_error_bad_value);
3166 if (bfd_is_abs_section (sec))
3168 else if (sec == NULL || sec->owner == NULL)
3170 bfd_set_error (bfd_error_bad_value);
3177 /* We are turning this relocation into one
3178 against a section symbol. It would be
3179 proper to subtract the symbol's value,
3180 osec->vma, from the emitted reloc addend,
3181 but ld.so expects buggy relocs. */
3182 osec = sec->output_section;
3183 sindx = elf_section_data (osec)->dynindx;
3186 asection *oi = htab->elf.text_index_section;
3187 sindx = elf_section_data (oi)->dynindx;
3189 BFD_ASSERT (sindx != 0);
3192 outrel.r_info = htab->r_info (sindx, r_type);
3193 outrel.r_addend = relocation + rel->r_addend;
3197 sreloc = elf_section_data (input_section)->sreloc;
3199 if (sreloc == NULL || sreloc->contents == NULL)
3201 r = bfd_reloc_notsupported;
3202 goto check_relocation_error;
3205 elf_append_rela (output_bfd, sreloc, &outrel);
3207 /* If this reloc is against an external symbol, we do
3208 not want to fiddle with the addend. Otherwise, we
3209 need to include the symbol value so that it becomes
3210 an addend for the dynamic reloc. */
3217 case R_X86_64_TLSGD:
3218 case R_X86_64_GOTPC32_TLSDESC:
3219 case R_X86_64_TLSDESC_CALL:
3220 case R_X86_64_GOTTPOFF:
3221 tls_type = GOT_UNKNOWN;
3222 if (h == NULL && local_got_offsets)
3223 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3225 tls_type = elf_x86_hash_entry (h)->tls_type;
3227 r_type_tls = r_type;
3228 if (! elf_x86_64_tls_transition (info, input_bfd,
3229 input_section, contents,
3230 symtab_hdr, sym_hashes,
3231 &r_type_tls, tls_type, rel,
3232 relend, h, r_symndx, TRUE))
3235 if (r_type_tls == R_X86_64_TPOFF32)
3237 bfd_vma roff = rel->r_offset;
3239 BFD_ASSERT (! unresolved_reloc);
3241 if (r_type == R_X86_64_TLSGD)
3243 /* GD->LE transition. For 64bit, change
3244 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3245 .word 0x6666; rex64; call __tls_get_addr@PLT
3247 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3249 call *__tls_get_addr@GOTPCREL(%rip)
3250 which may be converted to
3251 addr32 call __tls_get_addr
3254 leaq foo@tpoff(%rax), %rax
3256 leaq foo@tlsgd(%rip), %rdi
3257 .word 0x6666; rex64; call __tls_get_addr@PLT
3259 leaq foo@tlsgd(%rip), %rdi
3261 call *__tls_get_addr@GOTPCREL(%rip)
3262 which may be converted to
3263 addr32 call __tls_get_addr
3266 leaq foo@tpoff(%rax), %rax
3267 For largepic, change:
3268 leaq foo@tlsgd(%rip), %rdi
3269 movabsq $__tls_get_addr@pltoff, %rax
3274 leaq foo@tpoff(%rax), %rax
3275 nopw 0x0(%rax,%rax,1) */
3277 if (ABI_64_P (output_bfd))
3279 if (contents[roff + 5] == 0xb8)
3281 memcpy (contents + roff - 3,
3282 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3283 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3287 memcpy (contents + roff - 4,
3288 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3292 memcpy (contents + roff - 3,
3293 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3295 bfd_put_32 (output_bfd,
3296 elf_x86_64_tpoff (info, relocation),
3297 contents + roff + 8 + largepic);
3298 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3299 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3304 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3306 /* GDesc -> LE transition.
3307 It's originally something like:
3308 leaq x@tlsdesc(%rip), %rax
3311 movl $x@tpoff, %rax. */
3313 unsigned int val, type;
3315 type = bfd_get_8 (input_bfd, contents + roff - 3);
3316 val = bfd_get_8 (input_bfd, contents + roff - 1);
3317 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3318 contents + roff - 3);
3319 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3320 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3321 contents + roff - 1);
3322 bfd_put_32 (output_bfd,
3323 elf_x86_64_tpoff (info, relocation),
3327 else if (r_type == R_X86_64_TLSDESC_CALL)
3329 /* GDesc -> LE transition.
3334 bfd_put_8 (output_bfd, 0x66, contents + roff);
3335 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3338 else if (r_type == R_X86_64_GOTTPOFF)
3340 /* IE->LE transition:
3341 For 64bit, originally it can be one of:
3342 movq foo@gottpoff(%rip), %reg
3343 addq foo@gottpoff(%rip), %reg
3346 leaq foo(%reg), %reg
3348 For 32bit, originally it can be one of:
3349 movq foo@gottpoff(%rip), %reg
3350 addl foo@gottpoff(%rip), %reg
3353 leal foo(%reg), %reg
3356 unsigned int val, type, reg;
3359 val = bfd_get_8 (input_bfd, contents + roff - 3);
3362 type = bfd_get_8 (input_bfd, contents + roff - 2);
3363 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3369 bfd_put_8 (output_bfd, 0x49,
3370 contents + roff - 3);
3371 else if (!ABI_64_P (output_bfd) && val == 0x44)
3372 bfd_put_8 (output_bfd, 0x41,
3373 contents + roff - 3);
3374 bfd_put_8 (output_bfd, 0xc7,
3375 contents + roff - 2);
3376 bfd_put_8 (output_bfd, 0xc0 | reg,
3377 contents + roff - 1);
3381 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3384 bfd_put_8 (output_bfd, 0x49,
3385 contents + roff - 3);
3386 else if (!ABI_64_P (output_bfd) && val == 0x44)
3387 bfd_put_8 (output_bfd, 0x41,
3388 contents + roff - 3);
3389 bfd_put_8 (output_bfd, 0x81,
3390 contents + roff - 2);
3391 bfd_put_8 (output_bfd, 0xc0 | reg,
3392 contents + roff - 1);
3396 /* addq/addl -> leaq/leal */
3398 bfd_put_8 (output_bfd, 0x4d,
3399 contents + roff - 3);
3400 else if (!ABI_64_P (output_bfd) && val == 0x44)
3401 bfd_put_8 (output_bfd, 0x45,
3402 contents + roff - 3);
3403 bfd_put_8 (output_bfd, 0x8d,
3404 contents + roff - 2);
3405 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3406 contents + roff - 1);
3408 bfd_put_32 (output_bfd,
3409 elf_x86_64_tpoff (info, relocation),
3417 if (htab->elf.sgot == NULL)
3422 off = h->got.offset;
3423 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3427 if (local_got_offsets == NULL)
3430 off = local_got_offsets[r_symndx];
3431 offplt = local_tlsdesc_gotents[r_symndx];
3438 Elf_Internal_Rela outrel;
3442 if (htab->elf.srelgot == NULL)
3445 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3447 if (GOT_TLS_GDESC_P (tls_type))
3449 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3450 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3451 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3452 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3453 + htab->elf.sgotplt->output_offset
3455 + htab->sgotplt_jump_table_size);
3456 sreloc = htab->elf.srelplt;
3458 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3460 outrel.r_addend = 0;
3461 elf_append_rela (output_bfd, sreloc, &outrel);
3464 sreloc = htab->elf.srelgot;
3466 outrel.r_offset = (htab->elf.sgot->output_section->vma
3467 + htab->elf.sgot->output_offset + off);
3469 if (GOT_TLS_GD_P (tls_type))
3470 dr_type = R_X86_64_DTPMOD64;
3471 else if (GOT_TLS_GDESC_P (tls_type))
3474 dr_type = R_X86_64_TPOFF64;
3476 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3477 outrel.r_addend = 0;
3478 if ((dr_type == R_X86_64_TPOFF64
3479 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3480 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3481 outrel.r_info = htab->r_info (indx, dr_type);
3483 elf_append_rela (output_bfd, sreloc, &outrel);
3485 if (GOT_TLS_GD_P (tls_type))
3489 BFD_ASSERT (! unresolved_reloc);
3490 bfd_put_64 (output_bfd,
3491 relocation - _bfd_x86_elf_dtpoff_base (info),
3492 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3496 bfd_put_64 (output_bfd, 0,
3497 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3498 outrel.r_info = htab->r_info (indx,
3500 outrel.r_offset += GOT_ENTRY_SIZE;
3501 elf_append_rela (output_bfd, sreloc,
3510 local_got_offsets[r_symndx] |= 1;
3513 if (off >= (bfd_vma) -2
3514 && ! GOT_TLS_GDESC_P (tls_type))
3516 if (r_type_tls == r_type)
3518 if (r_type == R_X86_64_GOTPC32_TLSDESC
3519 || r_type == R_X86_64_TLSDESC_CALL)
3520 relocation = htab->elf.sgotplt->output_section->vma
3521 + htab->elf.sgotplt->output_offset
3522 + offplt + htab->sgotplt_jump_table_size;
3524 relocation = htab->elf.sgot->output_section->vma
3525 + htab->elf.sgot->output_offset + off;
3526 unresolved_reloc = FALSE;
3530 bfd_vma roff = rel->r_offset;
3532 if (r_type == R_X86_64_TLSGD)
3534 /* GD->IE transition. For 64bit, change
3535 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3536 .word 0x6666; rex64; call __tls_get_addr@PLT
3538 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3540 call *__tls_get_addr@GOTPCREL(%rip
3541 which may be converted to
3542 addr32 call __tls_get_addr
3545 addq foo@gottpoff(%rip), %rax
3547 leaq foo@tlsgd(%rip), %rdi
3548 .word 0x6666; rex64; call __tls_get_addr@PLT
3550 leaq foo@tlsgd(%rip), %rdi
3552 call *__tls_get_addr@GOTPCREL(%rip)
3553 which may be converted to
3554 addr32 call __tls_get_addr
3557 addq foo@gottpoff(%rip), %rax
3558 For largepic, change:
3559 leaq foo@tlsgd(%rip), %rdi
3560 movabsq $__tls_get_addr@pltoff, %rax
3565 addq foo@gottpoff(%rax), %rax
3566 nopw 0x0(%rax,%rax,1) */
3568 if (ABI_64_P (output_bfd))
3570 if (contents[roff + 5] == 0xb8)
3572 memcpy (contents + roff - 3,
3573 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3574 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3578 memcpy (contents + roff - 4,
3579 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3583 memcpy (contents + roff - 3,
3584 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3587 relocation = (htab->elf.sgot->output_section->vma
3588 + htab->elf.sgot->output_offset + off
3591 - input_section->output_section->vma
3592 - input_section->output_offset
3594 bfd_put_32 (output_bfd, relocation,
3595 contents + roff + 8 + largepic);
3596 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3601 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3603 /* GDesc -> IE transition.
3604 It's originally something like:
3605 leaq x@tlsdesc(%rip), %rax
3608 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3610 /* Now modify the instruction as appropriate. To
3611 turn a leaq into a movq in the form we use it, it
3612 suffices to change the second byte from 0x8d to
3614 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3616 bfd_put_32 (output_bfd,
3617 htab->elf.sgot->output_section->vma
3618 + htab->elf.sgot->output_offset + off
3620 - input_section->output_section->vma
3621 - input_section->output_offset
3626 else if (r_type == R_X86_64_TLSDESC_CALL)
3628 /* GDesc -> IE transition.
3635 bfd_put_8 (output_bfd, 0x66, contents + roff);
3636 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3644 case R_X86_64_TLSLD:
3645 if (! elf_x86_64_tls_transition (info, input_bfd,
3646 input_section, contents,
3647 symtab_hdr, sym_hashes,
3648 &r_type, GOT_UNKNOWN, rel,
3649 relend, h, r_symndx, TRUE))
3652 if (r_type != R_X86_64_TLSLD)
3654 /* LD->LE transition:
3655 leaq foo@tlsld(%rip), %rdi
3656 call __tls_get_addr@PLT
3657 For 64bit, we change it into:
3658 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3659 For 32bit, we change it into:
3660 nopl 0x0(%rax); movl %fs:0, %eax
3662 leaq foo@tlsld(%rip), %rdi;
3663 call *__tls_get_addr@GOTPCREL(%rip)
3664 which may be converted to
3665 addr32 call __tls_get_addr
3666 For 64bit, we change it into:
3667 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3668 For 32bit, we change it into:
3669 nopw 0x0(%rax); movl %fs:0, %eax
3670 For largepic, change:
3671 leaq foo@tlsgd(%rip), %rdi
3672 movabsq $__tls_get_addr@pltoff, %rax
3676 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3679 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3680 if (ABI_64_P (output_bfd))
3682 if (contents[rel->r_offset + 5] == 0xb8)
3683 memcpy (contents + rel->r_offset - 3,
3684 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3685 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3686 else if (contents[rel->r_offset + 4] == 0xff
3687 || contents[rel->r_offset + 4] == 0x67)
3688 memcpy (contents + rel->r_offset - 3,
3689 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3692 memcpy (contents + rel->r_offset - 3,
3693 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3697 if (contents[rel->r_offset + 4] == 0xff)
3698 memcpy (contents + rel->r_offset - 3,
3699 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3702 memcpy (contents + rel->r_offset - 3,
3703 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3705 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3706 and R_X86_64_PLTOFF64. */
3712 if (htab->elf.sgot == NULL)
3715 off = htab->tls_ld_or_ldm_got.offset;
3720 Elf_Internal_Rela outrel;
3722 if (htab->elf.srelgot == NULL)
3725 outrel.r_offset = (htab->elf.sgot->output_section->vma
3726 + htab->elf.sgot->output_offset + off);
3728 bfd_put_64 (output_bfd, 0,
3729 htab->elf.sgot->contents + off);
3730 bfd_put_64 (output_bfd, 0,
3731 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3732 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3733 outrel.r_addend = 0;
3734 elf_append_rela (output_bfd, htab->elf.srelgot,
3736 htab->tls_ld_or_ldm_got.offset |= 1;
3738 relocation = htab->elf.sgot->output_section->vma
3739 + htab->elf.sgot->output_offset + off;
3740 unresolved_reloc = FALSE;
3743 case R_X86_64_DTPOFF32:
3744 if (!bfd_link_executable (info)
3745 || (input_section->flags & SEC_CODE) == 0)
3746 relocation -= _bfd_x86_elf_dtpoff_base (info);
3748 relocation = elf_x86_64_tpoff (info, relocation);
3751 case R_X86_64_TPOFF32:
3752 case R_X86_64_TPOFF64:
3753 BFD_ASSERT (bfd_link_executable (info));
3754 relocation = elf_x86_64_tpoff (info, relocation);
3757 case R_X86_64_DTPOFF64:
3758 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3759 relocation -= _bfd_x86_elf_dtpoff_base (info);
3766 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3767 because such sections are not SEC_ALLOC and thus ld.so will
3768 not process them. */
3769 if (unresolved_reloc
3770 && !((input_section->flags & SEC_DEBUGGING) != 0
3772 && _bfd_elf_section_offset (output_bfd, info, input_section,
3773 rel->r_offset) != (bfd_vma) -1)
3778 sec = h->root.u.def.section;
3779 if ((info->nocopyreloc
3780 || (eh->def_protected
3781 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3782 && !(h->root.u.def.section->flags & SEC_CODE))
3783 return elf_x86_64_need_pic (info, input_bfd, input_section,
3784 h, NULL, NULL, howto);
3789 /* xgettext:c-format */
3790 (_("%B(%A+%#Lx): unresolvable %s relocation against symbol `%s'"),
3795 h->root.root.string);
3801 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3802 contents, rel->r_offset,
3803 relocation, rel->r_addend);
3805 check_relocation_error:
3806 if (r != bfd_reloc_ok)
3811 name = h->root.root.string;
3814 name = bfd_elf_string_from_elf_section (input_bfd,
3815 symtab_hdr->sh_link,
3820 name = bfd_section_name (input_bfd, sec);
3823 if (r == bfd_reloc_overflow)
3825 if (converted_reloc)
3827 info->callbacks->einfo
3828 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3831 (*info->callbacks->reloc_overflow)
3832 (info, (h ? &h->root : NULL), name, howto->name,
3833 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3838 /* xgettext:c-format */
3839 (_("%B(%A+%#Lx): reloc against `%s': error %d"),
3840 input_bfd, input_section,
3841 rel->r_offset, name, (int) r);
3852 Elf_Internal_Shdr *rel_hdr;
3853 size_t deleted = rel - wrel;
3855 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3856 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3857 if (rel_hdr->sh_size == 0)
3859 /* It is too late to remove an empty reloc section. Leave
3861 ??? What is wrong with an empty section??? */
3862 rel_hdr->sh_size = rel_hdr->sh_entsize;
3865 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3866 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3867 input_section->reloc_count -= deleted;
3873 /* Finish up dynamic symbol handling. We set the contents of various
3874 dynamic sections here. */
3877 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3878 struct bfd_link_info *info,
3879 struct elf_link_hash_entry *h,
3880 Elf_Internal_Sym *sym)
3882 struct elf_x86_link_hash_table *htab;
3883 bfd_boolean use_plt_second;
3884 struct elf_x86_link_hash_entry *eh;
3885 bfd_boolean local_undefweak;
3887 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3891 /* Use the second PLT section only if there is .plt section. */
3892 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3894 eh = (struct elf_x86_link_hash_entry *) h;
3895 if (eh->no_finish_dynamic_symbol)
3898 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3899 resolved undefined weak symbols in executable so that their
3900 references have value 0 at run-time. */
3901 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3903 if (h->plt.offset != (bfd_vma) -1)
3906 bfd_vma got_offset, plt_offset;
3907 Elf_Internal_Rela rela;
3909 asection *plt, *gotplt, *relplt, *resolved_plt;
3910 const struct elf_backend_data *bed;
3911 bfd_vma plt_got_pcrel_offset;
3913 /* When building a static executable, use .iplt, .igot.plt and
3914 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3915 if (htab->elf.splt != NULL)
3917 plt = htab->elf.splt;
3918 gotplt = htab->elf.sgotplt;
3919 relplt = htab->elf.srelplt;
3923 plt = htab->elf.iplt;
3924 gotplt = htab->elf.igotplt;
3925 relplt = htab->elf.irelplt;
3928 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
3930 /* Get the index in the procedure linkage table which
3931 corresponds to this symbol. This is the index of this symbol
3932 in all the symbols for which we are making plt entries. The
3933 first entry in the procedure linkage table is reserved.
3935 Get the offset into the .got table of the entry that
3936 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
3937 bytes. The first three are reserved for the dynamic linker.
3939 For static executables, we don't reserve anything. */
3941 if (plt == htab->elf.splt)
3943 got_offset = (h->plt.offset / htab->plt.plt_entry_size
3944 - htab->plt.has_plt0);
3945 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
3949 got_offset = h->plt.offset / htab->plt.plt_entry_size;
3950 got_offset = got_offset * GOT_ENTRY_SIZE;
3953 /* Fill in the entry in the procedure linkage table. */
3954 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
3955 htab->plt.plt_entry_size);
3958 memcpy (htab->plt_second->contents + eh->plt_second.offset,
3959 htab->non_lazy_plt->plt_entry,
3960 htab->non_lazy_plt->plt_entry_size);
3962 resolved_plt = htab->plt_second;
3963 plt_offset = eh->plt_second.offset;
3968 plt_offset = h->plt.offset;
3971 /* Insert the relocation positions of the plt section. */
3973 /* Put offset the PC-relative instruction referring to the GOT entry,
3974 subtracting the size of that instruction. */
3975 plt_got_pcrel_offset = (gotplt->output_section->vma
3976 + gotplt->output_offset
3978 - resolved_plt->output_section->vma
3979 - resolved_plt->output_offset
3981 - htab->plt.plt_got_insn_size);
3983 /* Check PC-relative offset overflow in PLT entry. */
3984 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
3985 /* xgettext:c-format */
3986 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"),
3987 output_bfd, h->root.root.string);
3989 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
3990 (resolved_plt->contents + plt_offset
3991 + htab->plt.plt_got_offset));
3993 /* Fill in the entry in the global offset table, initially this
3994 points to the second part of the PLT entry. Leave the entry
3995 as zero for undefined weak symbol in PIE. No PLT relocation
3996 against undefined weak symbol in PIE. */
3997 if (!local_undefweak)
3999 if (htab->plt.has_plt0)
4000 bfd_put_64 (output_bfd, (plt->output_section->vma
4001 + plt->output_offset
4003 + htab->lazy_plt->plt_lazy_offset),
4004 gotplt->contents + got_offset);
4006 /* Fill in the entry in the .rela.plt section. */
4007 rela.r_offset = (gotplt->output_section->vma
4008 + gotplt->output_offset
4010 if (PLT_LOCAL_IFUNC_P (info, h))
4012 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
4013 h->root.root.string,
4014 h->root.u.def.section->owner);
4016 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4017 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4018 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4019 rela.r_addend = (h->root.u.def.value
4020 + h->root.u.def.section->output_section->vma
4021 + h->root.u.def.section->output_offset);
4022 /* R_X86_64_IRELATIVE comes last. */
4023 plt_index = htab->next_irelative_index--;
4027 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4029 plt_index = htab->next_jump_slot_index++;
4032 /* Don't fill the second and third slots in PLT entry for
4033 static executables nor without PLT0. */
4034 if (plt == htab->elf.splt && htab->plt.has_plt0)
4037 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4039 /* Put relocation index. */
4040 bfd_put_32 (output_bfd, plt_index,
4041 (plt->contents + h->plt.offset
4042 + htab->lazy_plt->plt_reloc_offset));
4044 /* Put offset for jmp .PLT0 and check for overflow. We don't
4045 check relocation index for overflow since branch displacement
4046 will overflow first. */
4047 if (plt0_offset > 0x80000000)
4048 /* xgettext:c-format */
4049 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"),
4050 output_bfd, h->root.root.string);
4051 bfd_put_32 (output_bfd, - plt0_offset,
4052 (plt->contents + h->plt.offset
4053 + htab->lazy_plt->plt_plt_offset));
4056 bed = get_elf_backend_data (output_bfd);
4057 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4058 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4061 else if (eh->plt_got.offset != (bfd_vma) -1)
4063 bfd_vma got_offset, plt_offset;
4064 asection *plt, *got;
4065 bfd_boolean got_after_plt;
4066 int32_t got_pcrel_offset;
4068 /* Set the entry in the GOT procedure linkage table. */
4069 plt = htab->plt_got;
4070 got = htab->elf.sgot;
4071 got_offset = h->got.offset;
4073 if (got_offset == (bfd_vma) -1
4074 || (h->type == STT_GNU_IFUNC && h->def_regular)
4079 /* Use the non-lazy PLT entry template for the GOT PLT since they
4080 are the identical. */
4081 /* Fill in the entry in the GOT procedure linkage table. */
4082 plt_offset = eh->plt_got.offset;
4083 memcpy (plt->contents + plt_offset,
4084 htab->non_lazy_plt->plt_entry,
4085 htab->non_lazy_plt->plt_entry_size);
4087 /* Put offset the PC-relative instruction referring to the GOT
4088 entry, subtracting the size of that instruction. */
4089 got_pcrel_offset = (got->output_section->vma
4090 + got->output_offset
4092 - plt->output_section->vma
4093 - plt->output_offset
4095 - htab->non_lazy_plt->plt_got_insn_size);
4097 /* Check PC-relative offset overflow in GOT PLT entry. */
4098 got_after_plt = got->output_section->vma > plt->output_section->vma;
4099 if ((got_after_plt && got_pcrel_offset < 0)
4100 || (!got_after_plt && got_pcrel_offset > 0))
4101 /* xgettext:c-format */
4102 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4103 output_bfd, h->root.root.string);
4105 bfd_put_32 (output_bfd, got_pcrel_offset,
4106 (plt->contents + plt_offset
4107 + htab->non_lazy_plt->plt_got_offset));
4110 if (!local_undefweak
4112 && (h->plt.offset != (bfd_vma) -1
4113 || eh->plt_got.offset != (bfd_vma) -1))
4115 /* Mark the symbol as undefined, rather than as defined in
4116 the .plt section. Leave the value if there were any
4117 relocations where pointer equality matters (this is a clue
4118 for the dynamic linker, to make function pointer
4119 comparisons work between an application and shared
4120 library), otherwise set it to zero. If a function is only
4121 called from a binary, there is no need to slow down
4122 shared libraries because of that. */
4123 sym->st_shndx = SHN_UNDEF;
4124 if (!h->pointer_equality_needed)
4128 /* Don't generate dynamic GOT relocation against undefined weak
4129 symbol in executable. */
4130 if (h->got.offset != (bfd_vma) -1
4131 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4132 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4133 && !local_undefweak)
4135 Elf_Internal_Rela rela;
4136 asection *relgot = htab->elf.srelgot;
4138 /* This symbol has an entry in the global offset table. Set it
4140 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4143 rela.r_offset = (htab->elf.sgot->output_section->vma
4144 + htab->elf.sgot->output_offset
4145 + (h->got.offset &~ (bfd_vma) 1));
4147 /* If this is a static link, or it is a -Bsymbolic link and the
4148 symbol is defined locally or was forced to be local because
4149 of a version file, we just want to emit a RELATIVE reloc.
4150 The entry in the global offset table will already have been
4151 initialized in the relocate_section function. */
4153 && h->type == STT_GNU_IFUNC)
4155 if (h->plt.offset == (bfd_vma) -1)
4157 /* STT_GNU_IFUNC is referenced without PLT. */
4158 if (htab->elf.splt == NULL)
4160 /* use .rel[a].iplt section to store .got relocations
4161 in static executable. */
4162 relgot = htab->elf.irelplt;
4164 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4166 info->callbacks->minfo (_("Local IFUNC function `%s' in %B\n"),
4167 h->root.root.string,
4168 h->root.u.def.section->owner);
4170 rela.r_info = htab->r_info (0,
4171 R_X86_64_IRELATIVE);
4172 rela.r_addend = (h->root.u.def.value
4173 + h->root.u.def.section->output_section->vma
4174 + h->root.u.def.section->output_offset);
4179 else if (bfd_link_pic (info))
4181 /* Generate R_X86_64_GLOB_DAT. */
4189 if (!h->pointer_equality_needed)
4192 /* For non-shared object, we can't use .got.plt, which
4193 contains the real function addres if we need pointer
4194 equality. We load the GOT entry with the PLT entry. */
4195 if (htab->plt_second != NULL)
4197 plt = htab->plt_second;
4198 plt_offset = eh->plt_second.offset;
4202 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4203 plt_offset = h->plt.offset;
4205 bfd_put_64 (output_bfd, (plt->output_section->vma
4206 + plt->output_offset
4208 htab->elf.sgot->contents + h->got.offset);
4212 else if (bfd_link_pic (info)
4213 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4215 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
4217 BFD_ASSERT((h->got.offset & 1) != 0);
4218 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4219 rela.r_addend = (h->root.u.def.value
4220 + h->root.u.def.section->output_section->vma
4221 + h->root.u.def.section->output_offset);
4225 BFD_ASSERT((h->got.offset & 1) == 0);
4227 bfd_put_64 (output_bfd, (bfd_vma) 0,
4228 htab->elf.sgot->contents + h->got.offset);
4229 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4233 elf_append_rela (output_bfd, relgot, &rela);
4238 Elf_Internal_Rela rela;
4241 /* This symbol needs a copy reloc. Set it up. */
4242 VERIFY_COPY_RELOC (h, htab)
4244 rela.r_offset = (h->root.u.def.value
4245 + h->root.u.def.section->output_section->vma
4246 + h->root.u.def.section->output_offset);
4247 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4249 if (h->root.u.def.section == htab->elf.sdynrelro)
4250 s = htab->elf.sreldynrelro;
4252 s = htab->elf.srelbss;
4253 elf_append_rela (output_bfd, s, &rela);
4259 /* Finish up local dynamic symbol handling. We set the contents of
4260 various dynamic sections here. */
4263 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4265 struct elf_link_hash_entry *h
4266 = (struct elf_link_hash_entry *) *slot;
4267 struct bfd_link_info *info
4268 = (struct bfd_link_info *) inf;
4270 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4274 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4275 here since undefined weak symbol may not be dynamic and may not be
4276 called for elf_x86_64_finish_dynamic_symbol. */
4279 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4282 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4283 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4285 if (h->root.type != bfd_link_hash_undefweak
4286 || h->dynindx != -1)
4289 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4293 /* Used to decide how to sort relocs in an optimal manner for the
4294 dynamic linker, before writing them out. */
4296 static enum elf_reloc_type_class
4297 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4298 const asection *rel_sec ATTRIBUTE_UNUSED,
4299 const Elf_Internal_Rela *rela)
4301 bfd *abfd = info->output_bfd;
4302 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4303 struct elf_x86_link_hash_table *htab
4304 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4306 if (htab->elf.dynsym != NULL
4307 && htab->elf.dynsym->contents != NULL)
4309 /* Check relocation against STT_GNU_IFUNC symbol if there are
4311 unsigned long r_symndx = htab->r_sym (rela->r_info);
4312 if (r_symndx != STN_UNDEF)
4314 Elf_Internal_Sym sym;
4315 if (!bed->s->swap_symbol_in (abfd,
4316 (htab->elf.dynsym->contents
4317 + r_symndx * bed->s->sizeof_sym),
4321 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4322 return reloc_class_ifunc;
4326 switch ((int) ELF32_R_TYPE (rela->r_info))
4328 case R_X86_64_IRELATIVE:
4329 return reloc_class_ifunc;
4330 case R_X86_64_RELATIVE:
4331 case R_X86_64_RELATIVE64:
4332 return reloc_class_relative;
4333 case R_X86_64_JUMP_SLOT:
4334 return reloc_class_plt;
4336 return reloc_class_copy;
4338 return reloc_class_normal;
4342 /* Finish up the dynamic sections. */
4345 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4346 struct bfd_link_info *info)
4348 struct elf_x86_link_hash_table *htab;
4350 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4354 if (! htab->elf.dynamic_sections_created)
4357 if (htab->elf.splt && htab->elf.splt->size > 0)
4359 elf_section_data (htab->elf.splt->output_section)
4360 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4362 if (htab->plt.has_plt0)
4364 /* Fill in the special first entry in the procedure linkage
4366 memcpy (htab->elf.splt->contents,
4367 htab->lazy_plt->plt0_entry,
4368 htab->lazy_plt->plt0_entry_size);
4369 /* Add offset for pushq GOT+8(%rip), since the instruction
4370 uses 6 bytes subtract this value. */
4371 bfd_put_32 (output_bfd,
4372 (htab->elf.sgotplt->output_section->vma
4373 + htab->elf.sgotplt->output_offset
4375 - htab->elf.splt->output_section->vma
4376 - htab->elf.splt->output_offset
4378 (htab->elf.splt->contents
4379 + htab->lazy_plt->plt0_got1_offset));
4380 /* Add offset for the PC-relative instruction accessing
4381 GOT+16, subtracting the offset to the end of that
4383 bfd_put_32 (output_bfd,
4384 (htab->elf.sgotplt->output_section->vma
4385 + htab->elf.sgotplt->output_offset
4387 - htab->elf.splt->output_section->vma
4388 - htab->elf.splt->output_offset
4389 - htab->lazy_plt->plt0_got2_insn_end),
4390 (htab->elf.splt->contents
4391 + htab->lazy_plt->plt0_got2_offset));
4394 if (htab->tlsdesc_plt)
4396 bfd_put_64 (output_bfd, (bfd_vma) 0,
4397 htab->elf.sgot->contents + htab->tlsdesc_got);
4399 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4400 htab->lazy_plt->plt0_entry,
4401 htab->lazy_plt->plt0_entry_size);
4403 /* Add offset for pushq GOT+8(%rip), since the
4404 instruction uses 6 bytes subtract this value. */
4405 bfd_put_32 (output_bfd,
4406 (htab->elf.sgotplt->output_section->vma
4407 + htab->elf.sgotplt->output_offset
4409 - htab->elf.splt->output_section->vma
4410 - htab->elf.splt->output_offset
4413 (htab->elf.splt->contents
4415 + htab->lazy_plt->plt0_got1_offset));
4416 /* Add offset for the PC-relative instruction accessing
4417 GOT+TDG, where TDG stands for htab->tlsdesc_got,
4418 subtracting the offset to the end of that
4420 bfd_put_32 (output_bfd,
4421 (htab->elf.sgot->output_section->vma
4422 + htab->elf.sgot->output_offset
4424 - htab->elf.splt->output_section->vma
4425 - htab->elf.splt->output_offset
4427 - htab->lazy_plt->plt0_got2_insn_end),
4428 (htab->elf.splt->contents
4430 + htab->lazy_plt->plt0_got2_offset));
4434 /* Fill PLT entries for undefined weak symbols in PIE. */
4435 if (bfd_link_pie (info))
4436 bfd_hash_traverse (&info->hash->table,
4437 elf_x86_64_pie_finish_undefweak_symbol,
4443 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4444 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4445 It has to be done before elf_link_sort_relocs is called so that
4446 dynamic relocations are properly sorted. */
4449 elf_x86_64_output_arch_local_syms
4450 (bfd *output_bfd ATTRIBUTE_UNUSED,
4451 struct bfd_link_info *info,
4452 void *flaginfo ATTRIBUTE_UNUSED,
4453 int (*func) (void *, const char *,
4456 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4458 struct elf_x86_link_hash_table *htab
4459 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4463 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4464 htab_traverse (htab->loc_hash_table,
4465 elf_x86_64_finish_local_dynamic_symbol,
4471 /* Forward declaration. */
4472 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4474 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4475 dynamic relocations. */
4478 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4479 long symcount ATTRIBUTE_UNUSED,
4480 asymbol **syms ATTRIBUTE_UNUSED,
4487 bfd_byte *plt_contents;
4489 const struct elf_x86_lazy_plt_layout *lazy_plt;
4490 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4491 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4492 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4493 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4494 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4496 enum elf_x86_plt_type plt_type;
4497 struct elf_x86_plt plts[] =
4499 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4500 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4501 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4502 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4503 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4508 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4511 if (dynsymcount <= 0)
4514 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4518 if (get_elf_x86_backend_data (abfd)->target_os == is_normal)
4520 lazy_plt = &elf_x86_64_lazy_plt;
4521 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4522 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4523 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4524 if (ABI_64_P (abfd))
4526 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4527 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4531 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4532 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4537 lazy_plt = &elf_x86_64_nacl_plt;
4538 non_lazy_plt = NULL;
4539 lazy_bnd_plt = NULL;
4540 non_lazy_bnd_plt = NULL;
4541 lazy_ibt_plt = NULL;
4542 non_lazy_ibt_plt = NULL;
4546 for (j = 0; plts[j].name != NULL; j++)
4548 plt = bfd_get_section_by_name (abfd, plts[j].name);
4549 if (plt == NULL || plt->size == 0)
4552 /* Get the PLT section contents. */
4553 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4554 if (plt_contents == NULL)
4556 if (!bfd_get_section_contents (abfd, (asection *) plt,
4557 plt_contents, 0, plt->size))
4559 free (plt_contents);
4563 /* Check what kind of PLT it is. */
4564 plt_type = plt_unknown;
4565 if (plts[j].type == plt_unknown
4566 && (plt->size >= (lazy_plt->plt_entry_size
4567 + lazy_plt->plt_entry_size)))
4569 /* Match lazy PLT first. Need to check the first two
4571 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4572 lazy_plt->plt0_got1_offset) == 0)
4573 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4575 plt_type = plt_lazy;
4576 else if (lazy_bnd_plt != NULL
4577 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4578 lazy_bnd_plt->plt0_got1_offset) == 0)
4579 && (memcmp (plt_contents + 6,
4580 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4582 plt_type = plt_lazy | plt_second;
4583 /* The fist entry in the lazy IBT PLT is the same as the
4585 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4586 lazy_ibt_plt->plt_entry,
4587 lazy_ibt_plt->plt_got_offset) == 0))
4588 lazy_plt = lazy_ibt_plt;
4590 lazy_plt = lazy_bnd_plt;
4594 if (non_lazy_plt != NULL
4595 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4596 && plt->size >= non_lazy_plt->plt_entry_size)
4598 /* Match non-lazy PLT. */
4599 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4600 non_lazy_plt->plt_got_offset) == 0)
4601 plt_type = plt_non_lazy;
4604 if (plt_type == plt_unknown || plt_type == plt_second)
4606 if (non_lazy_bnd_plt != NULL
4607 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4608 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4609 non_lazy_bnd_plt->plt_got_offset) == 0))
4611 /* Match BND PLT. */
4612 plt_type = plt_second;
4613 non_lazy_plt = non_lazy_bnd_plt;
4615 else if (non_lazy_ibt_plt != NULL
4616 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4617 && (memcmp (plt_contents,
4618 non_lazy_ibt_plt->plt_entry,
4619 non_lazy_ibt_plt->plt_got_offset) == 0))
4621 /* Match IBT PLT. */
4622 plt_type = plt_second;
4623 non_lazy_plt = non_lazy_ibt_plt;
4627 if (plt_type == plt_unknown)
4629 free (plt_contents);
4634 plts[j].type = plt_type;
4636 if ((plt_type & plt_lazy))
4638 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4639 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4640 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4641 /* Skip PLT0 in lazy PLT. */
4646 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4647 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4648 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4652 /* Skip lazy PLT when the second PLT is used. */
4653 if (plt_type == (plt_lazy | plt_second))
4657 n = plt->size / plts[j].plt_entry_size;
4662 plts[j].contents = plt_contents;
4665 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4666 (bfd_vma) 0, plts, dynsyms,
4670 /* Handle an x86-64 specific section when reading an object file. This
4671 is called when elfcode.h finds a section with an unknown type. */
4674 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4675 const char *name, int shindex)
4677 if (hdr->sh_type != SHT_X86_64_UNWIND)
4680 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4686 /* Hook called by the linker routine which adds symbols from an object
4687 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4691 elf_x86_64_add_symbol_hook (bfd *abfd,
4692 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4693 Elf_Internal_Sym *sym,
4694 const char **namep ATTRIBUTE_UNUSED,
4695 flagword *flagsp ATTRIBUTE_UNUSED,
4701 switch (sym->st_shndx)
4703 case SHN_X86_64_LCOMMON:
4704 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4707 lcomm = bfd_make_section_with_flags (abfd,
4711 | SEC_LINKER_CREATED));
4714 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4717 *valp = sym->st_size;
4725 /* Given a BFD section, try to locate the corresponding ELF section
4729 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4730 asection *sec, int *index_return)
4732 if (sec == &_bfd_elf_large_com_section)
4734 *index_return = SHN_X86_64_LCOMMON;
4740 /* Process a symbol. */
4743 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4746 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4748 switch (elfsym->internal_elf_sym.st_shndx)
4750 case SHN_X86_64_LCOMMON:
4751 asym->section = &_bfd_elf_large_com_section;
4752 asym->value = elfsym->internal_elf_sym.st_size;
4753 /* Common symbol doesn't set BSF_GLOBAL. */
4754 asym->flags &= ~BSF_GLOBAL;
4760 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4762 return (sym->st_shndx == SHN_COMMON
4763 || sym->st_shndx == SHN_X86_64_LCOMMON);
4767 elf_x86_64_common_section_index (asection *sec)
4769 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4772 return SHN_X86_64_LCOMMON;
4776 elf_x86_64_common_section (asection *sec)
4778 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4779 return bfd_com_section_ptr;
4781 return &_bfd_elf_large_com_section;
4785 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4786 const Elf_Internal_Sym *sym,
4791 const asection *oldsec)
4793 /* A normal common symbol and a large common symbol result in a
4794 normal common symbol. We turn the large common symbol into a
4797 && h->root.type == bfd_link_hash_common
4799 && bfd_is_com_section (*psec)
4802 if (sym->st_shndx == SHN_COMMON
4803 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4805 h->root.u.c.p->section
4806 = bfd_make_section_old_way (oldbfd, "COMMON");
4807 h->root.u.c.p->section->flags = SEC_ALLOC;
4809 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4810 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4811 *psec = bfd_com_section_ptr;
4818 elf_x86_64_additional_program_headers (bfd *abfd,
4819 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4824 /* Check to see if we need a large readonly segment. */
4825 s = bfd_get_section_by_name (abfd, ".lrodata");
4826 if (s && (s->flags & SEC_LOAD))
4829 /* Check to see if we need a large data segment. Since .lbss sections
4830 is placed right after the .bss section, there should be no need for
4831 a large data segment just because of .lbss. */
4832 s = bfd_get_section_by_name (abfd, ".ldata");
4833 if (s && (s->flags & SEC_LOAD))
4839 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
4842 elf_x86_64_relocs_compatible (const bfd_target *input,
4843 const bfd_target *output)
4845 return ((xvec_get_elf_backend_data (input)->s->elfclass
4846 == xvec_get_elf_backend_data (output)->s->elfclass)
4847 && _bfd_elf_relocs_compatible (input, output));
4850 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
4851 with GNU properties if found. Otherwise, return NULL. */
4854 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
4856 struct elf_x86_init_table init_table;
4858 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
4859 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
4860 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
4861 != (int) R_X86_64_GNU_VTINHERIT)
4862 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
4863 != (int) R_X86_64_GNU_VTENTRY))
4866 /* This is unused for x86-64. */
4867 init_table.plt0_pad_byte = 0x90;
4869 if (get_elf_x86_backend_data (info->output_bfd)->target_os
4874 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
4875 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
4879 init_table.lazy_plt = &elf_x86_64_lazy_plt;
4880 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
4883 if (ABI_64_P (info->output_bfd))
4885 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4886 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4890 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4891 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4896 init_table.lazy_plt = &elf_x86_64_nacl_plt;
4897 init_table.non_lazy_plt = NULL;
4898 init_table.lazy_ibt_plt = NULL;
4899 init_table.non_lazy_ibt_plt = NULL;
4902 if (ABI_64_P (info->output_bfd))
4904 init_table.r_info = elf64_r_info;
4905 init_table.r_sym = elf64_r_sym;
4909 init_table.r_info = elf32_r_info;
4910 init_table.r_sym = elf32_r_sym;
4913 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
4916 static const struct bfd_elf_special_section
4917 elf_x86_64_special_sections[]=
4919 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4920 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4921 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
4922 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4923 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4924 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4925 { NULL, 0, 0, 0, 0 }
4928 #define TARGET_LITTLE_SYM x86_64_elf64_vec
4929 #define TARGET_LITTLE_NAME "elf64-x86-64"
4930 #define ELF_ARCH bfd_arch_i386
4931 #define ELF_TARGET_ID X86_64_ELF_DATA
4932 #define ELF_MACHINE_CODE EM_X86_64
4933 #define ELF_MAXPAGESIZE 0x200000
4934 #define ELF_MINPAGESIZE 0x1000
4935 #define ELF_COMMONPAGESIZE 0x1000
4937 #define elf_backend_can_gc_sections 1
4938 #define elf_backend_can_refcount 1
4939 #define elf_backend_want_got_plt 1
4940 #define elf_backend_plt_readonly 1
4941 #define elf_backend_want_plt_sym 0
4942 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
4943 #define elf_backend_rela_normal 1
4944 #define elf_backend_plt_alignment 4
4945 #define elf_backend_extern_protected_data 1
4946 #define elf_backend_caches_rawsize 1
4947 #define elf_backend_dtrel_excludes_plt 1
4948 #define elf_backend_want_dynrelro 1
4950 #define elf_info_to_howto elf_x86_64_info_to_howto
4952 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
4953 #define bfd_elf64_bfd_reloc_name_lookup \
4954 elf_x86_64_reloc_name_lookup
4956 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
4957 #define elf_backend_check_relocs elf_x86_64_check_relocs
4958 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
4959 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
4960 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
4961 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
4962 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
4963 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
4965 #define elf_backend_write_core_note elf_x86_64_write_core_note
4967 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
4968 #define elf_backend_relocate_section elf_x86_64_relocate_section
4969 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
4970 #define elf_backend_object_p elf64_x86_64_elf_object_p
4971 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
4973 #define elf_backend_section_from_shdr \
4974 elf_x86_64_section_from_shdr
4976 #define elf_backend_section_from_bfd_section \
4977 elf_x86_64_elf_section_from_bfd_section
4978 #define elf_backend_add_symbol_hook \
4979 elf_x86_64_add_symbol_hook
4980 #define elf_backend_symbol_processing \
4981 elf_x86_64_symbol_processing
4982 #define elf_backend_common_section_index \
4983 elf_x86_64_common_section_index
4984 #define elf_backend_common_section \
4985 elf_x86_64_common_section
4986 #define elf_backend_common_definition \
4987 elf_x86_64_common_definition
4988 #define elf_backend_merge_symbol \
4989 elf_x86_64_merge_symbol
4990 #define elf_backend_special_sections \
4991 elf_x86_64_special_sections
4992 #define elf_backend_additional_program_headers \
4993 elf_x86_64_additional_program_headers
4994 #define elf_backend_setup_gnu_properties \
4995 elf_x86_64_link_setup_gnu_properties
4996 #define elf_backend_hide_symbol \
4997 _bfd_x86_elf_hide_symbol
4999 #include "elf64-target.h"
5001 /* CloudABI support. */
5003 #undef TARGET_LITTLE_SYM
5004 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5005 #undef TARGET_LITTLE_NAME
5006 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5009 #define ELF_OSABI ELFOSABI_CLOUDABI
5012 #define elf64_bed elf64_x86_64_cloudabi_bed
5014 #include "elf64-target.h"
5016 /* FreeBSD support. */
5018 #undef TARGET_LITTLE_SYM
5019 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5020 #undef TARGET_LITTLE_NAME
5021 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5024 #define ELF_OSABI ELFOSABI_FREEBSD
5027 #define elf64_bed elf64_x86_64_fbsd_bed
5029 #include "elf64-target.h"
5031 /* Solaris 2 support. */
5033 #undef TARGET_LITTLE_SYM
5034 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5035 #undef TARGET_LITTLE_NAME
5036 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5038 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5039 objects won't be recognized. */
5043 #define elf64_bed elf64_x86_64_sol2_bed
5045 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5047 #undef elf_backend_static_tls_alignment
5048 #define elf_backend_static_tls_alignment 16
5050 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5052 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5054 #undef elf_backend_want_plt_sym
5055 #define elf_backend_want_plt_sym 1
5057 #undef elf_backend_strtab_flags
5058 #define elf_backend_strtab_flags SHF_STRINGS
5061 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5062 bfd *obfd ATTRIBUTE_UNUSED,
5063 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5064 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5066 /* PR 19938: FIXME: Need to add code for setting the sh_info
5067 and sh_link fields of Solaris specific section types. */
5071 #undef elf_backend_copy_special_section_fields
5072 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5074 #include "elf64-target.h"
5076 /* Native Client support. */
5079 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5081 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5082 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5086 #undef TARGET_LITTLE_SYM
5087 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5088 #undef TARGET_LITTLE_NAME
5089 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5091 #define elf64_bed elf64_x86_64_nacl_bed
5093 #undef ELF_MAXPAGESIZE
5094 #undef ELF_MINPAGESIZE
5095 #undef ELF_COMMONPAGESIZE
5096 #define ELF_MAXPAGESIZE 0x10000
5097 #define ELF_MINPAGESIZE 0x10000
5098 #define ELF_COMMONPAGESIZE 0x10000
5100 /* Restore defaults. */
5102 #undef elf_backend_static_tls_alignment
5103 #undef elf_backend_want_plt_sym
5104 #define elf_backend_want_plt_sym 0
5105 #undef elf_backend_strtab_flags
5106 #undef elf_backend_copy_special_section_fields
5108 /* NaCl uses substantially different PLT entries for the same effects. */
5110 #undef elf_backend_plt_alignment
5111 #define elf_backend_plt_alignment 5
5112 #define NACL_PLT_ENTRY_SIZE 64
5113 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5115 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5117 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5118 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5119 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5120 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5121 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5123 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5124 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5126 /* 32 bytes of nop to pad out to the standard size. */
5127 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5128 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5129 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5130 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5131 0x66, /* excess data16 prefix */
5135 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5137 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5138 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5139 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5140 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5142 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5143 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5144 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5146 /* Lazy GOT entries point here (32-byte aligned). */
5147 0x68, /* pushq immediate */
5148 0, 0, 0, 0, /* replaced with index into relocation table. */
5149 0xe9, /* jmp relative */
5150 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5152 /* 22 bytes of nop to pad out to the standard size. */
5153 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5154 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5155 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5158 /* .eh_frame covering the .plt section. */
5160 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5162 #if (PLT_CIE_LENGTH != 20 \
5163 || PLT_FDE_LENGTH != 36 \
5164 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5165 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5166 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5168 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5169 0, 0, 0, 0, /* CIE ID */
5170 1, /* CIE version */
5171 'z', 'R', 0, /* Augmentation string */
5172 1, /* Code alignment factor */
5173 0x78, /* Data alignment factor */
5174 16, /* Return address column */
5175 1, /* Augmentation size */
5176 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5177 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5178 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5179 DW_CFA_nop, DW_CFA_nop,
5181 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5182 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5183 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5184 0, 0, 0, 0, /* .plt size goes here */
5185 0, /* Augmentation size */
5186 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5187 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5188 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5189 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5190 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5191 13, /* Block length */
5192 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5193 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5194 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5195 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5196 DW_CFA_nop, DW_CFA_nop
5199 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5201 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5202 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5203 elf_x86_64_nacl_plt_entry, /* plt_entry */
5204 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5205 2, /* plt0_got1_offset */
5206 9, /* plt0_got2_offset */
5207 13, /* plt0_got2_insn_end */
5208 3, /* plt_got_offset */
5209 33, /* plt_reloc_offset */
5210 38, /* plt_plt_offset */
5211 7, /* plt_got_insn_size */
5212 42, /* plt_plt_insn_end */
5213 32, /* plt_lazy_offset */
5214 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5215 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5216 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5217 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5220 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5225 #undef elf_backend_arch_data
5226 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5228 #undef elf_backend_object_p
5229 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5230 #undef elf_backend_modify_segment_map
5231 #define elf_backend_modify_segment_map nacl_modify_segment_map
5232 #undef elf_backend_modify_program_headers
5233 #define elf_backend_modify_program_headers nacl_modify_program_headers
5234 #undef elf_backend_final_write_processing
5235 #define elf_backend_final_write_processing nacl_final_write_processing
5237 #include "elf64-target.h"
5239 /* Native Client x32 support. */
5242 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5244 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5245 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5249 #undef TARGET_LITTLE_SYM
5250 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5251 #undef TARGET_LITTLE_NAME
5252 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5254 #define elf32_bed elf32_x86_64_nacl_bed
5256 #define bfd_elf32_bfd_reloc_type_lookup \
5257 elf_x86_64_reloc_type_lookup
5258 #define bfd_elf32_bfd_reloc_name_lookup \
5259 elf_x86_64_reloc_name_lookup
5260 #define bfd_elf32_get_synthetic_symtab \
5261 elf_x86_64_get_synthetic_symtab
5263 #undef elf_backend_object_p
5264 #define elf_backend_object_p \
5265 elf32_x86_64_nacl_elf_object_p
5267 #undef elf_backend_bfd_from_remote_memory
5268 #define elf_backend_bfd_from_remote_memory \
5269 _bfd_elf32_bfd_from_remote_memory
5271 #undef elf_backend_size_info
5272 #define elf_backend_size_info \
5273 _bfd_elf32_size_info
5275 #include "elf32-target.h"
5277 /* Restore defaults. */
5278 #undef elf_backend_object_p
5279 #define elf_backend_object_p elf64_x86_64_elf_object_p
5280 #undef elf_backend_bfd_from_remote_memory
5281 #undef elf_backend_size_info
5282 #undef elf_backend_modify_segment_map
5283 #undef elf_backend_modify_program_headers
5284 #undef elf_backend_final_write_processing
5286 /* Intel L1OM support. */
5289 elf64_l1om_elf_object_p (bfd *abfd)
5291 /* Set the right machine number for an L1OM elf64 file. */
5292 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5296 #undef TARGET_LITTLE_SYM
5297 #define TARGET_LITTLE_SYM l1om_elf64_vec
5298 #undef TARGET_LITTLE_NAME
5299 #define TARGET_LITTLE_NAME "elf64-l1om"
5301 #define ELF_ARCH bfd_arch_l1om
5303 #undef ELF_MACHINE_CODE
5304 #define ELF_MACHINE_CODE EM_L1OM
5309 #define elf64_bed elf64_l1om_bed
5311 #undef elf_backend_object_p
5312 #define elf_backend_object_p elf64_l1om_elf_object_p
5314 /* Restore defaults. */
5315 #undef ELF_MAXPAGESIZE
5316 #undef ELF_MINPAGESIZE
5317 #undef ELF_COMMONPAGESIZE
5318 #define ELF_MAXPAGESIZE 0x200000
5319 #define ELF_MINPAGESIZE 0x1000
5320 #define ELF_COMMONPAGESIZE 0x1000
5321 #undef elf_backend_plt_alignment
5322 #define elf_backend_plt_alignment 4
5323 #undef elf_backend_arch_data
5324 #define elf_backend_arch_data &elf_x86_64_arch_bed
5326 #include "elf64-target.h"
5328 /* FreeBSD L1OM support. */
5330 #undef TARGET_LITTLE_SYM
5331 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5332 #undef TARGET_LITTLE_NAME
5333 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5336 #define ELF_OSABI ELFOSABI_FREEBSD
5339 #define elf64_bed elf64_l1om_fbsd_bed
5341 #include "elf64-target.h"
5343 /* Intel K1OM support. */
5346 elf64_k1om_elf_object_p (bfd *abfd)
5348 /* Set the right machine number for an K1OM elf64 file. */
5349 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5353 #undef TARGET_LITTLE_SYM
5354 #define TARGET_LITTLE_SYM k1om_elf64_vec
5355 #undef TARGET_LITTLE_NAME
5356 #define TARGET_LITTLE_NAME "elf64-k1om"
5358 #define ELF_ARCH bfd_arch_k1om
5360 #undef ELF_MACHINE_CODE
5361 #define ELF_MACHINE_CODE EM_K1OM
5366 #define elf64_bed elf64_k1om_bed
5368 #undef elf_backend_object_p
5369 #define elf_backend_object_p elf64_k1om_elf_object_p
5371 #undef elf_backend_static_tls_alignment
5373 #undef elf_backend_want_plt_sym
5374 #define elf_backend_want_plt_sym 0
5376 #include "elf64-target.h"
5378 /* FreeBSD K1OM support. */
5380 #undef TARGET_LITTLE_SYM
5381 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5382 #undef TARGET_LITTLE_NAME
5383 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5386 #define ELF_OSABI ELFOSABI_FREEBSD
5389 #define elf64_bed elf64_k1om_fbsd_bed
5391 #include "elf64-target.h"
5393 /* 32bit x86-64 support. */
5395 #undef TARGET_LITTLE_SYM
5396 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5397 #undef TARGET_LITTLE_NAME
5398 #define TARGET_LITTLE_NAME "elf32-x86-64"
5402 #define ELF_ARCH bfd_arch_i386
5404 #undef ELF_MACHINE_CODE
5405 #define ELF_MACHINE_CODE EM_X86_64
5409 #undef elf_backend_object_p
5410 #define elf_backend_object_p \
5411 elf32_x86_64_elf_object_p
5413 #undef elf_backend_bfd_from_remote_memory
5414 #define elf_backend_bfd_from_remote_memory \
5415 _bfd_elf32_bfd_from_remote_memory
5417 #undef elf_backend_size_info
5418 #define elf_backend_size_info \
5419 _bfd_elf32_size_info
5421 #include "elf32-target.h"