1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2018 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
22 #include "elfxx-x86.h"
25 #include "libiberty.h"
27 #include "opcode/i386.h"
28 #include "elf/x86-64.h"
35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
36 #define MINUS_ONE (~ (bfd_vma) 0)
38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
41 since they are the same. */
43 /* The relocation "howto" table. Order of fields:
44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
46 static reloc_howto_type x86_64_elf_howto_table[] =
48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont,
49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000,
51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE,
54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff,
57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff,
60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff,
63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff,
66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE,
69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE,
72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE,
75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed,
76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff,
78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed,
82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff,
84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield,
85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE),
86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield,
87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE),
88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield,
89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE),
90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed,
91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE),
92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE,
95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE,
98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE,
101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff,
104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed,
105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff,
107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff,
110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed,
111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff,
113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed,
114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff,
116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield,
117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE,
119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64",
121 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed,
123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32",
124 FALSE, 0xffffffff, 0xffffffff, TRUE),
125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE,
128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE,
131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed,
132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64",
133 FALSE, MINUS_ONE, MINUS_ONE, TRUE),
134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE,
137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed,
138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE,
140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned,
141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff,
143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned,
144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE,
146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0,
147 complain_overflow_bitfield, bfd_elf_generic_reloc,
148 "R_X86_64_GOTPC32_TLSDESC",
149 FALSE, 0xffffffff, 0xffffffff, TRUE),
150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0,
151 complain_overflow_dont, bfd_elf_generic_reloc,
152 "R_X86_64_TLSDESC_CALL",
154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0,
155 complain_overflow_bitfield, bfd_elf_generic_reloc,
157 FALSE, MINUS_ONE, MINUS_ONE, FALSE),
158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE,
161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield,
162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE,
164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff,
167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff,
170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff,
173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff,
177 /* We have a gap in the reloc numbers here.
178 R_X86_64_standard counts the number up to this point, and
179 R_X86_64_vt_offset is the value to subtract from a reloc type of
180 R_X86_64_GNU_VT* to form an index into this table. */
181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
184 /* GNU extension to record C++ vtable hierarchy. */
185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont,
186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE),
188 /* GNU extension to record C++ vtable member usage. */
189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont,
190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0,
193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield,
195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff,
199 /* Set if a relocation is converted from a GOTPCREL relocation. */
200 #define R_X86_64_converted_reloc_bit (1 << 7)
202 #define X86_PCREL_TYPE_P(TYPE) \
203 ( ((TYPE) == R_X86_64_PC8) \
204 || ((TYPE) == R_X86_64_PC16) \
205 || ((TYPE) == R_X86_64_PC32) \
206 || ((TYPE) == R_X86_64_PC32_BND) \
207 || ((TYPE) == R_X86_64_PC64))
209 #define X86_SIZE_TYPE_P(TYPE) \
210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64)
212 /* Map BFD relocs to the x86_64 elf relocs. */
215 bfd_reloc_code_real_type bfd_reloc_val;
216 unsigned char elf_reloc_val;
219 static const struct elf_reloc_map x86_64_reloc_map[] =
221 { BFD_RELOC_NONE, R_X86_64_NONE, },
222 { BFD_RELOC_64, R_X86_64_64, },
223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
231 { BFD_RELOC_32, R_X86_64_32, },
232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
233 { BFD_RELOC_16, R_X86_64_16, },
234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
235 { BFD_RELOC_8, R_X86_64_8, },
236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
267 static reloc_howto_type *
268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
272 if (r_type == (unsigned int) R_X86_64_32)
277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
280 || r_type >= (unsigned int) R_X86_64_max)
282 if (r_type >= (unsigned int) R_X86_64_standard)
284 /* xgettext:c-format */
285 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
287 bfd_set_error (bfd_error_bad_value);
293 i = r_type - (unsigned int) R_X86_64_vt_offset;
294 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
295 return &x86_64_elf_howto_table[i];
298 /* Given a BFD reloc type, return a HOWTO structure. */
299 static reloc_howto_type *
300 elf_x86_64_reloc_type_lookup (bfd *abfd,
301 bfd_reloc_code_real_type code)
305 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
308 if (x86_64_reloc_map[i].bfd_reloc_val == code)
309 return elf_x86_64_rtype_to_howto (abfd,
310 x86_64_reloc_map[i].elf_reloc_val);
315 static reloc_howto_type *
316 elf_x86_64_reloc_name_lookup (bfd *abfd,
321 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
323 /* Get x32 R_X86_64_32. */
324 reloc_howto_type *reloc
325 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
326 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
330 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
331 if (x86_64_elf_howto_table[i].name != NULL
332 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
333 return &x86_64_elf_howto_table[i];
338 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
341 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
342 Elf_Internal_Rela *dst)
346 r_type = ELF32_R_TYPE (dst->r_info);
347 if (r_type != (unsigned int) R_X86_64_GNU_VTINHERIT
348 && r_type != (unsigned int) R_X86_64_GNU_VTENTRY)
349 r_type &= ~R_X86_64_converted_reloc_bit;
350 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
351 if (cache_ptr->howto == NULL)
353 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
357 /* Support for core dump NOTE sections. */
359 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
364 switch (note->descsz)
369 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
371 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
374 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
382 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
384 elf_tdata (abfd)->core->signal
385 = bfd_get_16 (abfd, note->descdata + 12);
388 elf_tdata (abfd)->core->lwpid
389 = bfd_get_32 (abfd, note->descdata + 32);
398 /* Make a ".reg/999" section. */
399 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
400 size, note->descpos + offset);
404 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
406 switch (note->descsz)
411 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
412 elf_tdata (abfd)->core->pid
413 = bfd_get_32 (abfd, note->descdata + 12);
414 elf_tdata (abfd)->core->program
415 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
416 elf_tdata (abfd)->core->command
417 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
420 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
421 elf_tdata (abfd)->core->pid
422 = bfd_get_32 (abfd, note->descdata + 24);
423 elf_tdata (abfd)->core->program
424 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
425 elf_tdata (abfd)->core->command
426 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
429 /* Note that for some reason, a spurious space is tacked
430 onto the end of the args in some (at least one anyway)
431 implementations, so strip it off if it exists. */
434 char *command = elf_tdata (abfd)->core->command;
435 int n = strlen (command);
437 if (0 < n && command[n - 1] == ' ')
438 command[n - 1] = '\0';
446 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
449 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
451 const char *fname, *psargs;
462 va_start (ap, note_type);
463 fname = va_arg (ap, const char *);
464 psargs = va_arg (ap, const char *);
467 if (bed->s->elfclass == ELFCLASS32)
470 memset (&data, 0, sizeof (data));
471 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
472 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
473 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
474 &data, sizeof (data));
479 memset (&data, 0, sizeof (data));
480 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
481 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
482 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
483 &data, sizeof (data));
488 va_start (ap, note_type);
489 pid = va_arg (ap, long);
490 cursig = va_arg (ap, int);
491 gregs = va_arg (ap, const void *);
494 if (bed->s->elfclass == ELFCLASS32)
496 if (bed->elf_machine_code == EM_X86_64)
498 prstatusx32_t prstat;
499 memset (&prstat, 0, sizeof (prstat));
501 prstat.pr_cursig = cursig;
502 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
503 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
504 &prstat, sizeof (prstat));
509 memset (&prstat, 0, sizeof (prstat));
511 prstat.pr_cursig = cursig;
512 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
513 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
514 &prstat, sizeof (prstat));
520 memset (&prstat, 0, sizeof (prstat));
522 prstat.pr_cursig = cursig;
523 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
524 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
525 &prstat, sizeof (prstat));
532 /* Functions for the x86-64 ELF linker. */
534 /* The size in bytes of an entry in the global offset table. */
536 #define GOT_ENTRY_SIZE 8
538 /* The size in bytes of an entry in the lazy procedure linkage table. */
540 #define LAZY_PLT_ENTRY_SIZE 16
542 /* The size in bytes of an entry in the non-lazy procedure linkage
545 #define NON_LAZY_PLT_ENTRY_SIZE 8
547 /* The first entry in a lazy procedure linkage table looks like this.
548 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
551 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
553 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
554 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
555 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
558 /* Subsequent entries in a lazy procedure linkage table look like this. */
560 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
562 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
563 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
564 0x68, /* pushq immediate */
565 0, 0, 0, 0, /* replaced with index into relocation table. */
566 0xe9, /* jmp relative */
567 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
570 /* The first entry in a lazy procedure linkage table with BND prefix
573 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
575 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
576 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
577 0x0f, 0x1f, 0 /* nopl (%rax) */
580 /* Subsequent entries for branches with BND prefx in a lazy procedure
581 linkage table look like this. */
583 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
585 0x68, 0, 0, 0, 0, /* pushq immediate */
586 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
587 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
590 /* The first entry in the IBT-enabled lazy procedure linkage table is the
591 the same as the lazy PLT with BND prefix so that bound registers are
592 preserved when control is passed to dynamic linker. Subsequent
593 entries for a IBT-enabled lazy procedure linkage table look like
596 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
598 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
599 0x68, 0, 0, 0, 0, /* pushq immediate */
600 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
604 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
605 is the same as the normal lazy PLT. Subsequent entries for an
606 x32 IBT-enabled lazy procedure linkage table look like this. */
608 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
610 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
611 0x68, 0, 0, 0, 0, /* pushq immediate */
612 0xe9, 0, 0, 0, 0, /* jmpq relative */
613 0x66, 0x90 /* xchg %ax,%ax */
616 /* Entries in the non-lazey procedure linkage table look like this. */
618 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
620 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
621 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
622 0x66, 0x90 /* xchg %ax,%ax */
625 /* Entries for branches with BND prefix in the non-lazey procedure
626 linkage table look like this. */
628 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
630 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
631 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
635 /* Entries for branches with IBT-enabled in the non-lazey procedure
636 linkage table look like this. They have the same size as the lazy
639 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
641 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
642 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
643 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
644 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
647 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
648 linkage table look like this. They have the same size as the lazy
651 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
653 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
654 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
655 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
656 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
659 /* .eh_frame covering the lazy .plt section. */
661 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
663 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
664 0, 0, 0, 0, /* CIE ID */
666 'z', 'R', 0, /* Augmentation string */
667 1, /* Code alignment factor */
668 0x78, /* Data alignment factor */
669 16, /* Return address column */
670 1, /* Augmentation size */
671 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
672 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
673 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
674 DW_CFA_nop, DW_CFA_nop,
676 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
677 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
678 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
679 0, 0, 0, 0, /* .plt size goes here */
680 0, /* Augmentation size */
681 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
682 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
683 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
684 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
685 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
686 11, /* Block length */
687 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
688 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
689 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
690 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
691 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
694 /* .eh_frame covering the lazy BND .plt section. */
696 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
698 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
699 0, 0, 0, 0, /* CIE ID */
701 'z', 'R', 0, /* Augmentation string */
702 1, /* Code alignment factor */
703 0x78, /* Data alignment factor */
704 16, /* Return address column */
705 1, /* Augmentation size */
706 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
707 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
708 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
709 DW_CFA_nop, DW_CFA_nop,
711 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
712 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
713 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
714 0, 0, 0, 0, /* .plt size goes here */
715 0, /* Augmentation size */
716 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
717 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
718 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
719 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
720 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
721 11, /* Block length */
722 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
723 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
724 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
725 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
726 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
729 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
731 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
733 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
734 0, 0, 0, 0, /* CIE ID */
736 'z', 'R', 0, /* Augmentation string */
737 1, /* Code alignment factor */
738 0x78, /* Data alignment factor */
739 16, /* Return address column */
740 1, /* Augmentation size */
741 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
742 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
743 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
744 DW_CFA_nop, DW_CFA_nop,
746 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
747 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
748 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
749 0, 0, 0, 0, /* .plt size goes here */
750 0, /* Augmentation size */
751 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
752 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
753 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
754 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
755 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
756 11, /* Block length */
757 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
758 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
759 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
760 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
761 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
764 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
766 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
768 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
769 0, 0, 0, 0, /* CIE ID */
771 'z', 'R', 0, /* Augmentation string */
772 1, /* Code alignment factor */
773 0x78, /* Data alignment factor */
774 16, /* Return address column */
775 1, /* Augmentation size */
776 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
777 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
778 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
779 DW_CFA_nop, DW_CFA_nop,
781 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
782 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
783 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
784 0, 0, 0, 0, /* .plt size goes here */
785 0, /* Augmentation size */
786 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
787 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
788 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
789 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
790 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
791 11, /* Block length */
792 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
793 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
794 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
795 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
796 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
799 /* .eh_frame covering the non-lazy .plt section. */
801 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
803 #define PLT_GOT_FDE_LENGTH 20
804 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
805 0, 0, 0, 0, /* CIE ID */
807 'z', 'R', 0, /* Augmentation string */
808 1, /* Code alignment factor */
809 0x78, /* Data alignment factor */
810 16, /* Return address column */
811 1, /* Augmentation size */
812 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
813 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
814 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
815 DW_CFA_nop, DW_CFA_nop,
817 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
818 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
819 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
820 0, 0, 0, 0, /* non-lazy .plt size goes here */
821 0, /* Augmentation size */
822 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
823 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
826 /* These are the standard parameters. */
827 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
829 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
830 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
831 elf_x86_64_lazy_plt_entry, /* plt_entry */
832 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
833 2, /* plt0_got1_offset */
834 8, /* plt0_got2_offset */
835 12, /* plt0_got2_insn_end */
836 2, /* plt_got_offset */
837 7, /* plt_reloc_offset */
838 12, /* plt_plt_offset */
839 6, /* plt_got_insn_size */
840 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
841 6, /* plt_lazy_offset */
842 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
843 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
844 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
845 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
848 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
850 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
851 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
852 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
853 2, /* plt_got_offset */
854 6, /* plt_got_insn_size */
855 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
856 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
859 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
861 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
862 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
863 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
864 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
865 2, /* plt0_got1_offset */
866 1+8, /* plt0_got2_offset */
867 1+12, /* plt0_got2_insn_end */
868 1+2, /* plt_got_offset */
869 1, /* plt_reloc_offset */
870 7, /* plt_plt_offset */
871 1+6, /* plt_got_insn_size */
872 11, /* plt_plt_insn_end */
873 0, /* plt_lazy_offset */
874 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
875 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
876 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
877 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
880 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
882 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
883 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
884 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
885 1+2, /* plt_got_offset */
886 1+6, /* plt_got_insn_size */
887 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
888 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
891 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
893 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
894 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
895 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
896 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
897 2, /* plt0_got1_offset */
898 1+8, /* plt0_got2_offset */
899 1+12, /* plt0_got2_insn_end */
900 4+1+2, /* plt_got_offset */
901 4+1, /* plt_reloc_offset */
902 4+1+6, /* plt_plt_offset */
903 4+1+6, /* plt_got_insn_size */
904 4+1+5+5, /* plt_plt_insn_end */
905 0, /* plt_lazy_offset */
906 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
907 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
908 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
909 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
912 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
914 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
915 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
916 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
917 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
918 2, /* plt0_got1_offset */
919 8, /* plt0_got2_offset */
920 12, /* plt0_got2_insn_end */
921 4+2, /* plt_got_offset */
922 4+1, /* plt_reloc_offset */
923 4+6, /* plt_plt_offset */
924 4+6, /* plt_got_insn_size */
925 4+5+5, /* plt_plt_insn_end */
926 0, /* plt_lazy_offset */
927 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
928 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
929 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
930 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
933 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
935 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
936 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
937 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
938 4+1+2, /* plt_got_offset */
939 4+1+6, /* plt_got_insn_size */
940 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
941 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
944 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
946 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
947 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
948 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
949 4+2, /* plt_got_offset */
950 4+6, /* plt_got_insn_size */
951 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
952 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
955 static const struct elf_x86_backend_data elf_x86_64_arch_bed =
960 #define elf_backend_arch_data &elf_x86_64_arch_bed
963 elf64_x86_64_elf_object_p (bfd *abfd)
965 /* Set the right machine number for an x86-64 elf64 file. */
966 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
971 elf32_x86_64_elf_object_p (bfd *abfd)
973 /* Set the right machine number for an x86-64 elf32 file. */
974 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
978 /* Return TRUE if the TLS access code sequence support transition
982 elf_x86_64_check_tls_transition (bfd *abfd,
983 struct bfd_link_info *info,
986 Elf_Internal_Shdr *symtab_hdr,
987 struct elf_link_hash_entry **sym_hashes,
989 const Elf_Internal_Rela *rel,
990 const Elf_Internal_Rela *relend)
993 unsigned long r_symndx;
994 bfd_boolean largepic = FALSE;
995 struct elf_link_hash_entry *h;
997 struct elf_x86_link_hash_table *htab;
999 bfd_boolean indirect_call;
1001 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1002 offset = rel->r_offset;
1005 case R_X86_64_TLSGD:
1006 case R_X86_64_TLSLD:
1007 if ((rel + 1) >= relend)
1010 if (r_type == R_X86_64_TLSGD)
1012 /* Check transition from GD access model. For 64bit, only
1013 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1014 .word 0x6666; rex64; call __tls_get_addr@PLT
1016 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1018 call *__tls_get_addr@GOTPCREL(%rip)
1019 which may be converted to
1020 addr32 call __tls_get_addr
1021 can transit to different access model. For 32bit, only
1022 leaq foo@tlsgd(%rip), %rdi
1023 .word 0x6666; rex64; call __tls_get_addr@PLT
1025 leaq foo@tlsgd(%rip), %rdi
1027 call *__tls_get_addr@GOTPCREL(%rip)
1028 which may be converted to
1029 addr32 call __tls_get_addr
1030 can transit to different access model. For largepic,
1032 leaq foo@tlsgd(%rip), %rdi
1033 movabsq $__tls_get_addr@pltoff, %rax
1037 leaq foo@tlsgd(%rip), %rdi
1038 movabsq $__tls_get_addr@pltoff, %rax
1042 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1044 if ((offset + 12) > sec->size)
1047 call = contents + offset + 4;
1049 || !((call[1] == 0x48
1057 && call[3] == 0xe8)))
1059 if (!ABI_64_P (abfd)
1060 || (offset + 19) > sec->size
1062 || memcmp (call - 7, leaq + 1, 3) != 0
1063 || memcmp (call, "\x48\xb8", 2) != 0
1067 || !((call[10] == 0x48 && call[12] == 0xd8)
1068 || (call[10] == 0x4c && call[12] == 0xf8)))
1072 else if (ABI_64_P (abfd))
1075 || memcmp (contents + offset - 4, leaq, 4) != 0)
1081 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1084 indirect_call = call[2] == 0xff;
1088 /* Check transition from LD access model. Only
1089 leaq foo@tlsld(%rip), %rdi;
1090 call __tls_get_addr@PLT
1092 leaq foo@tlsld(%rip), %rdi;
1093 call *__tls_get_addr@GOTPCREL(%rip)
1094 which may be converted to
1095 addr32 call __tls_get_addr
1096 can transit to different access model. For largepic
1098 leaq foo@tlsld(%rip), %rdi
1099 movabsq $__tls_get_addr@pltoff, %rax
1103 leaq foo@tlsld(%rip), %rdi
1104 movabsq $__tls_get_addr@pltoff, %rax
1108 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1110 if (offset < 3 || (offset + 9) > sec->size)
1113 if (memcmp (contents + offset - 3, lea, 3) != 0)
1116 call = contents + offset + 4;
1117 if (!(call[0] == 0xe8
1118 || (call[0] == 0xff && call[1] == 0x15)
1119 || (call[0] == 0x67 && call[1] == 0xe8)))
1121 if (!ABI_64_P (abfd)
1122 || (offset + 19) > sec->size
1123 || memcmp (call, "\x48\xb8", 2) != 0
1127 || !((call[10] == 0x48 && call[12] == 0xd8)
1128 || (call[10] == 0x4c && call[12] == 0xf8)))
1132 indirect_call = call[0] == 0xff;
1135 r_symndx = htab->r_sym (rel[1].r_info);
1136 if (r_symndx < symtab_hdr->sh_info)
1139 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1141 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1145 r_type = (ELF32_R_TYPE (rel[1].r_info)
1146 & ~R_X86_64_converted_reloc_bit);
1148 return r_type == R_X86_64_PLTOFF64;
1149 else if (indirect_call)
1150 return r_type == R_X86_64_GOTPCRELX;
1152 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1155 case R_X86_64_GOTTPOFF:
1156 /* Check transition from IE access model:
1157 mov foo@gottpoff(%rip), %reg
1158 add foo@gottpoff(%rip), %reg
1161 /* Check REX prefix first. */
1162 if (offset >= 3 && (offset + 4) <= sec->size)
1164 val = bfd_get_8 (abfd, contents + offset - 3);
1165 if (val != 0x48 && val != 0x4c)
1167 /* X32 may have 0x44 REX prefix or no REX prefix. */
1168 if (ABI_64_P (abfd))
1174 /* X32 may not have any REX prefix. */
1175 if (ABI_64_P (abfd))
1177 if (offset < 2 || (offset + 3) > sec->size)
1181 val = bfd_get_8 (abfd, contents + offset - 2);
1182 if (val != 0x8b && val != 0x03)
1185 val = bfd_get_8 (abfd, contents + offset - 1);
1186 return (val & 0xc7) == 5;
1188 case R_X86_64_GOTPC32_TLSDESC:
1189 /* Check transition from GDesc access model:
1190 leaq x@tlsdesc(%rip), %rax
1192 Make sure it's a leaq adding rip to a 32-bit offset
1193 into any register, although it's probably almost always
1196 if (offset < 3 || (offset + 4) > sec->size)
1199 val = bfd_get_8 (abfd, contents + offset - 3);
1200 if ((val & 0xfb) != 0x48)
1203 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1206 val = bfd_get_8 (abfd, contents + offset - 1);
1207 return (val & 0xc7) == 0x05;
1209 case R_X86_64_TLSDESC_CALL:
1210 /* Check transition from GDesc access model:
1211 call *x@tlsdesc(%rax)
1213 if (offset + 2 <= sec->size)
1215 /* Make sure that it's a call *x@tlsdesc(%rax). */
1216 call = contents + offset;
1217 return call[0] == 0xff && call[1] == 0x10;
1227 /* Return TRUE if the TLS access transition is OK or no transition
1228 will be performed. Update R_TYPE if there is a transition. */
1231 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1232 asection *sec, bfd_byte *contents,
1233 Elf_Internal_Shdr *symtab_hdr,
1234 struct elf_link_hash_entry **sym_hashes,
1235 unsigned int *r_type, int tls_type,
1236 const Elf_Internal_Rela *rel,
1237 const Elf_Internal_Rela *relend,
1238 struct elf_link_hash_entry *h,
1239 unsigned long r_symndx,
1240 bfd_boolean from_relocate_section)
1242 unsigned int from_type = *r_type;
1243 unsigned int to_type = from_type;
1244 bfd_boolean check = TRUE;
1246 /* Skip TLS transition for functions. */
1248 && (h->type == STT_FUNC
1249 || h->type == STT_GNU_IFUNC))
1254 case R_X86_64_TLSGD:
1255 case R_X86_64_GOTPC32_TLSDESC:
1256 case R_X86_64_TLSDESC_CALL:
1257 case R_X86_64_GOTTPOFF:
1258 if (bfd_link_executable (info))
1261 to_type = R_X86_64_TPOFF32;
1263 to_type = R_X86_64_GOTTPOFF;
1266 /* When we are called from elf_x86_64_relocate_section, there may
1267 be additional transitions based on TLS_TYPE. */
1268 if (from_relocate_section)
1270 unsigned int new_to_type = to_type;
1272 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1273 new_to_type = R_X86_64_TPOFF32;
1275 if (to_type == R_X86_64_TLSGD
1276 || to_type == R_X86_64_GOTPC32_TLSDESC
1277 || to_type == R_X86_64_TLSDESC_CALL)
1279 if (tls_type == GOT_TLS_IE)
1280 new_to_type = R_X86_64_GOTTPOFF;
1283 /* We checked the transition before when we were called from
1284 elf_x86_64_check_relocs. We only want to check the new
1285 transition which hasn't been checked before. */
1286 check = new_to_type != to_type && from_type == to_type;
1287 to_type = new_to_type;
1292 case R_X86_64_TLSLD:
1293 if (bfd_link_executable (info))
1294 to_type = R_X86_64_TPOFF32;
1301 /* Return TRUE if there is no transition. */
1302 if (from_type == to_type)
1305 /* Check if the transition can be performed. */
1307 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1308 symtab_hdr, sym_hashes,
1309 from_type, rel, relend))
1311 reloc_howto_type *from, *to;
1314 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1315 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1317 if (from == NULL || to == NULL)
1321 name = h->root.root.string;
1324 struct elf_x86_link_hash_table *htab;
1326 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1331 Elf_Internal_Sym *isym;
1333 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1335 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1340 /* xgettext:c-format */
1341 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1342 " in section `%pA' failed"),
1343 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1344 bfd_set_error (bfd_error_bad_value);
1352 /* Rename some of the generic section flags to better document how they
1354 #define check_relocs_failed sec_flg0
1357 elf_x86_64_need_pic (struct bfd_link_info *info,
1358 bfd *input_bfd, asection *sec,
1359 struct elf_link_hash_entry *h,
1360 Elf_Internal_Shdr *symtab_hdr,
1361 Elf_Internal_Sym *isym,
1362 reloc_howto_type *howto)
1365 const char *und = "";
1366 const char *pic = "";
1372 name = h->root.root.string;
1373 switch (ELF_ST_VISIBILITY (h->other))
1376 v = _("hidden symbol ");
1379 v = _("internal symbol ");
1382 v = _("protected symbol ");
1385 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1386 v = _("protected symbol ");
1389 pic = _("; recompile with -fPIC");
1393 if (!h->def_regular && !h->def_dynamic)
1394 und = _("undefined ");
1398 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1399 pic = _("; recompile with -fPIC");
1402 if (bfd_link_dll (info))
1403 object = _("a shared object");
1404 else if (bfd_link_pie (info))
1405 object = _("a PIE object");
1407 object = _("a PDE object");
1409 /* xgettext:c-format */
1410 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1411 "not be used when making %s%s"),
1412 input_bfd, howto->name, und, v, name,
1414 bfd_set_error (bfd_error_bad_value);
1415 sec->check_relocs_failed = 1;
1419 /* With the local symbol, foo, we convert
1420 mov foo@GOTPCREL(%rip), %reg
1424 call/jmp *foo@GOTPCREL(%rip)
1426 nop call foo/jmp foo nop
1427 When PIC is false, convert
1428 test %reg, foo@GOTPCREL(%rip)
1432 binop foo@GOTPCREL(%rip), %reg
1435 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1439 elf_x86_64_convert_load_reloc (bfd *abfd,
1441 unsigned int *r_type_p,
1442 Elf_Internal_Rela *irel,
1443 struct elf_link_hash_entry *h,
1444 bfd_boolean *converted,
1445 struct bfd_link_info *link_info)
1447 struct elf_x86_link_hash_table *htab;
1449 bfd_boolean no_overflow;
1451 bfd_boolean to_reloc_pc32;
1453 bfd_signed_vma raddend;
1454 unsigned int opcode;
1456 unsigned int r_type = *r_type_p;
1457 unsigned int r_symndx;
1458 bfd_vma roff = irel->r_offset;
1460 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
1463 raddend = irel->r_addend;
1464 /* Addend for 32-bit PC-relative relocation must be -4. */
1468 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1469 is_pic = bfd_link_pic (link_info);
1471 relocx = (r_type == R_X86_64_GOTPCRELX
1472 || r_type == R_X86_64_REX_GOTPCRELX);
1474 /* TRUE if --no-relax is used. */
1475 no_overflow = link_info->disable_target_specific_optimizations > 1;
1477 r_symndx = htab->r_sym (irel->r_info);
1479 opcode = bfd_get_8 (abfd, contents + roff - 2);
1481 /* Convert mov to lea since it has been done for a while. */
1484 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
1485 for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
1486 test, xor instructions. */
1491 /* We convert only to R_X86_64_PC32:
1493 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1494 3. no_overflow is true.
1497 to_reloc_pc32 = (opcode == 0xff
1502 /* Get the symbol referred to by the reloc. */
1505 Elf_Internal_Sym *isym
1506 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx);
1508 /* Skip relocation against undefined symbols. */
1509 if (isym->st_shndx == SHN_UNDEF)
1512 if (isym->st_shndx == SHN_ABS)
1513 tsec = bfd_abs_section_ptr;
1514 else if (isym->st_shndx == SHN_COMMON)
1515 tsec = bfd_com_section_ptr;
1516 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1517 tsec = &_bfd_elf_large_com_section;
1519 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1523 /* Undefined weak symbol is only bound locally in executable
1524 and its reference is resolved as 0 without relocation
1525 overflow. We can only perform this optimization for
1526 GOTPCRELX relocations since we need to modify REX byte.
1527 It is OK convert mov with R_X86_64_GOTPCREL to
1529 bfd_boolean local_ref;
1530 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1532 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1533 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1534 if ((relocx || opcode == 0x8b)
1535 && (h->root.type == bfd_link_hash_undefweak
1541 /* Skip for branch instructions since R_X86_64_PC32
1548 /* For non-branch instructions, we can convert to
1549 R_X86_64_32/R_X86_64_32S since we know if there
1551 to_reloc_pc32 = FALSE;
1554 /* Since we don't know the current PC when PIC is true,
1555 we can't convert to R_X86_64_PC32. */
1556 if (to_reloc_pc32 && is_pic)
1561 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1562 ld.so may use its link-time address. */
1563 else if (h->start_stop
1566 || h->root.type == bfd_link_hash_defined
1567 || h->root.type == bfd_link_hash_defweak)
1568 && h != htab->elf.hdynamic
1571 /* bfd_link_hash_new or bfd_link_hash_undefined is
1572 set by an assignment in a linker script in
1573 bfd_elf_record_link_assignment. start_stop is set
1574 on __start_SECNAME/__stop_SECNAME which mark section
1579 && (h->root.type == bfd_link_hash_new
1580 || h->root.type == bfd_link_hash_undefined
1581 || ((h->root.type == bfd_link_hash_defined
1582 || h->root.type == bfd_link_hash_defweak)
1583 && h->root.u.def.section == bfd_und_section_ptr))))
1585 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1590 tsec = h->root.u.def.section;
1596 /* Don't convert GOTPCREL relocation against large section. */
1597 if (elf_section_data (tsec) != NULL
1598 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1601 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1608 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1613 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1615 modrm = bfd_get_8 (abfd, contents + roff - 1);
1618 /* Convert to "jmp foo nop". */
1621 nop_offset = irel->r_offset + 3;
1622 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1623 irel->r_offset -= 1;
1624 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1628 struct elf_x86_link_hash_entry *eh
1629 = (struct elf_x86_link_hash_entry *) h;
1631 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1634 /* To support TLS optimization, always use addr32 prefix for
1635 "call *__tls_get_addr@GOTPCREL(%rip)". */
1636 if (eh && eh->tls_get_addr)
1639 nop_offset = irel->r_offset - 2;
1643 nop = link_info->call_nop_byte;
1644 if (link_info->call_nop_as_suffix)
1646 nop_offset = irel->r_offset + 3;
1647 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1648 irel->r_offset -= 1;
1649 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1652 nop_offset = irel->r_offset - 2;
1655 bfd_put_8 (abfd, nop, contents + nop_offset);
1656 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1657 r_type = R_X86_64_PC32;
1662 unsigned int rex_mask = REX_R;
1664 if (r_type == R_X86_64_REX_GOTPCRELX)
1665 rex = bfd_get_8 (abfd, contents + roff - 3);
1673 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1674 "lea foo(%rip), %reg". */
1676 r_type = R_X86_64_PC32;
1680 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1681 "mov $foo, %reg". */
1683 modrm = bfd_get_8 (abfd, contents + roff - 1);
1684 modrm = 0xc0 | (modrm & 0x38) >> 3;
1685 if ((rex & REX_W) != 0
1686 && ABI_64_P (link_info->output_bfd))
1688 /* Keep the REX_W bit in REX byte for LP64. */
1689 r_type = R_X86_64_32S;
1690 goto rewrite_modrm_rex;
1694 /* If the REX_W bit in REX byte isn't needed,
1695 use R_X86_64_32 and clear the W bit to avoid
1696 sign-extend imm32 to imm64. */
1697 r_type = R_X86_64_32;
1698 /* Clear the W bit in REX byte. */
1700 goto rewrite_modrm_rex;
1706 /* R_X86_64_PC32 isn't supported. */
1710 modrm = bfd_get_8 (abfd, contents + roff - 1);
1713 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1714 "test $foo, %reg". */
1715 modrm = 0xc0 | (modrm & 0x38) >> 3;
1720 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1721 "binop $foo, %reg". */
1722 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1726 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1727 overflow when sign-extending imm32 to imm64. */
1728 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
1731 bfd_put_8 (abfd, modrm, contents + roff - 1);
1735 /* Move the R bit to the B bit in REX byte. */
1736 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1737 bfd_put_8 (abfd, rex, contents + roff - 3);
1740 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1744 bfd_put_8 (abfd, opcode, contents + roff - 2);
1748 irel->r_info = htab->r_info (r_symndx,
1749 r_type | R_X86_64_converted_reloc_bit);
1756 /* Look through the relocs for a section during the first phase, and
1757 calculate needed space in the global offset table, procedure
1758 linkage table, and dynamic reloc sections. */
1761 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info,
1763 const Elf_Internal_Rela *relocs)
1765 struct elf_x86_link_hash_table *htab;
1766 Elf_Internal_Shdr *symtab_hdr;
1767 struct elf_link_hash_entry **sym_hashes;
1768 const Elf_Internal_Rela *rel;
1769 const Elf_Internal_Rela *rel_end;
1772 bfd_boolean converted;
1774 if (bfd_link_relocatable (info))
1777 /* Don't do anything special with non-loaded, non-alloced sections.
1778 In particular, any relocs in such sections should not affect GOT
1779 and PLT reference counting (ie. we don't allow them to create GOT
1780 or PLT entries), there's no possibility or desire to optimize TLS
1781 relocs, and there's not much point in propagating relocs to shared
1782 libs that the dynamic linker won't relocate. */
1783 if ((sec->flags & SEC_ALLOC) == 0)
1786 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1789 sec->check_relocs_failed = 1;
1793 BFD_ASSERT (is_x86_elf (abfd, htab));
1795 /* Get the section contents. */
1796 if (elf_section_data (sec)->this_hdr.contents != NULL)
1797 contents = elf_section_data (sec)->this_hdr.contents;
1798 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1800 sec->check_relocs_failed = 1;
1804 symtab_hdr = &elf_symtab_hdr (abfd);
1805 sym_hashes = elf_sym_hashes (abfd);
1811 rel_end = relocs + sec->reloc_count;
1812 for (rel = relocs; rel < rel_end; rel++)
1814 unsigned int r_type;
1815 unsigned int r_symndx;
1816 struct elf_link_hash_entry *h;
1817 struct elf_x86_link_hash_entry *eh;
1818 Elf_Internal_Sym *isym;
1820 bfd_boolean size_reloc;
1821 bfd_boolean converted_reloc;
1823 r_symndx = htab->r_sym (rel->r_info);
1824 r_type = ELF32_R_TYPE (rel->r_info);
1826 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
1828 /* xgettext:c-format */
1829 _bfd_error_handler (_("%pB: bad symbol index: %d"),
1834 if (r_symndx < symtab_hdr->sh_info)
1836 /* A local symbol. */
1837 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
1842 /* Check relocation against local STT_GNU_IFUNC symbol. */
1843 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
1845 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
1850 /* Fake a STT_GNU_IFUNC symbol. */
1851 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
1853 h->type = STT_GNU_IFUNC;
1856 h->forced_local = 1;
1857 h->root.type = bfd_link_hash_defined;
1865 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1866 while (h->root.type == bfd_link_hash_indirect
1867 || h->root.type == bfd_link_hash_warning)
1868 h = (struct elf_link_hash_entry *) h->root.u.i.link;
1871 /* Check invalid x32 relocations. */
1872 if (!ABI_64_P (abfd))
1878 case R_X86_64_DTPOFF64:
1879 case R_X86_64_TPOFF64:
1881 case R_X86_64_GOTOFF64:
1882 case R_X86_64_GOT64:
1883 case R_X86_64_GOTPCREL64:
1884 case R_X86_64_GOTPC64:
1885 case R_X86_64_GOTPLT64:
1886 case R_X86_64_PLTOFF64:
1889 name = h->root.root.string;
1891 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
1894 /* xgettext:c-format */
1895 (_("%pB: relocation %s against symbol `%s' isn't "
1896 "supported in x32 mode"), abfd,
1897 x86_64_elf_howto_table[r_type].name, name);
1898 bfd_set_error (bfd_error_bad_value);
1906 /* It is referenced by a non-shared object. */
1909 if (h->type == STT_GNU_IFUNC)
1910 elf_tdata (info->output_bfd)->has_gnu_symbols
1911 |= elf_gnu_symbol_ifunc;
1914 converted_reloc = FALSE;
1915 if ((r_type == R_X86_64_GOTPCREL
1916 || r_type == R_X86_64_GOTPCRELX
1917 || r_type == R_X86_64_REX_GOTPCRELX)
1918 && (h == NULL || h->type != STT_GNU_IFUNC))
1920 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
1921 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
1922 irel, h, &converted_reloc,
1926 if (converted_reloc)
1930 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
1931 symtab_hdr, sym_hashes,
1932 &r_type, GOT_UNKNOWN,
1933 rel, rel_end, h, r_symndx, FALSE))
1936 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
1937 if (h == htab->elf.hgot)
1938 htab->got_referenced = TRUE;
1940 eh = (struct elf_x86_link_hash_entry *) h;
1943 case R_X86_64_TLSLD:
1944 htab->tls_ld_or_ldm_got.refcount = 1;
1947 case R_X86_64_TPOFF32:
1948 if (!bfd_link_executable (info) && ABI_64_P (abfd))
1949 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
1950 &x86_64_elf_howto_table[r_type]);
1952 eh->zero_undefweak &= 0x2;
1955 case R_X86_64_GOTTPOFF:
1956 if (!bfd_link_executable (info))
1957 info->flags |= DF_STATIC_TLS;
1960 case R_X86_64_GOT32:
1961 case R_X86_64_GOTPCREL:
1962 case R_X86_64_GOTPCRELX:
1963 case R_X86_64_REX_GOTPCRELX:
1964 case R_X86_64_TLSGD:
1965 case R_X86_64_GOT64:
1966 case R_X86_64_GOTPCREL64:
1967 case R_X86_64_GOTPLT64:
1968 case R_X86_64_GOTPC32_TLSDESC:
1969 case R_X86_64_TLSDESC_CALL:
1970 /* This symbol requires a global offset table entry. */
1972 int tls_type, old_tls_type;
1976 default: tls_type = GOT_NORMAL; break;
1977 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break;
1978 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break;
1979 case R_X86_64_GOTPC32_TLSDESC:
1980 case R_X86_64_TLSDESC_CALL:
1981 tls_type = GOT_TLS_GDESC; break;
1986 h->got.refcount = 1;
1987 old_tls_type = eh->tls_type;
1991 bfd_signed_vma *local_got_refcounts;
1993 /* This is a global offset table entry for a local symbol. */
1994 local_got_refcounts = elf_local_got_refcounts (abfd);
1995 if (local_got_refcounts == NULL)
1999 size = symtab_hdr->sh_info;
2000 size *= sizeof (bfd_signed_vma)
2001 + sizeof (bfd_vma) + sizeof (char);
2002 local_got_refcounts = ((bfd_signed_vma *)
2003 bfd_zalloc (abfd, size));
2004 if (local_got_refcounts == NULL)
2006 elf_local_got_refcounts (abfd) = local_got_refcounts;
2007 elf_x86_local_tlsdesc_gotent (abfd)
2008 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info);
2009 elf_x86_local_got_tls_type (abfd)
2010 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info);
2012 local_got_refcounts[r_symndx] = 1;
2014 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2017 /* If a TLS symbol is accessed using IE at least once,
2018 there is no point to use dynamic model for it. */
2019 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2020 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2021 || tls_type != GOT_TLS_IE))
2023 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2024 tls_type = old_tls_type;
2025 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2026 && GOT_TLS_GD_ANY_P (tls_type))
2027 tls_type |= old_tls_type;
2031 name = h->root.root.string;
2033 name = bfd_elf_sym_name (abfd, symtab_hdr,
2036 /* xgettext:c-format */
2037 (_("%pB: '%s' accessed both as normal and"
2038 " thread local symbol"),
2040 bfd_set_error (bfd_error_bad_value);
2045 if (old_tls_type != tls_type)
2048 eh->tls_type = tls_type;
2050 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2055 case R_X86_64_GOTOFF64:
2056 case R_X86_64_GOTPC32:
2057 case R_X86_64_GOTPC64:
2060 eh->zero_undefweak &= 0x2;
2063 case R_X86_64_PLT32:
2064 case R_X86_64_PLT32_BND:
2065 /* This symbol requires a procedure linkage table entry. We
2066 actually build the entry in adjust_dynamic_symbol,
2067 because this might be a case of linking PIC code which is
2068 never referenced by a dynamic object, in which case we
2069 don't need to generate a procedure linkage table entry
2072 /* If this is a local symbol, we resolve it directly without
2073 creating a procedure linkage table entry. */
2077 eh->zero_undefweak &= 0x2;
2079 h->plt.refcount = 1;
2082 case R_X86_64_PLTOFF64:
2083 /* This tries to form the 'address' of a function relative
2084 to GOT. For global symbols we need a PLT entry. */
2088 h->plt.refcount = 1;
2092 case R_X86_64_SIZE32:
2093 case R_X86_64_SIZE64:
2098 if (!ABI_64_P (abfd))
2104 /* Check relocation overflow as these relocs may lead to
2105 run-time relocation overflow. Don't error out for
2106 sections we don't care about, such as debug sections or
2107 when relocation overflow check is disabled. */
2108 if (!info->no_reloc_overflow_check
2110 && (bfd_link_pic (info)
2111 || (bfd_link_executable (info)
2115 && (sec->flags & SEC_READONLY) == 0)))
2116 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2117 &x86_64_elf_howto_table[r_type]);
2123 case R_X86_64_PC32_BND:
2127 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2128 eh->zero_undefweak |= 0x2;
2129 /* We are called after all symbols have been resolved. Only
2130 relocation against STT_GNU_IFUNC symbol must go through
2133 && (bfd_link_executable (info)
2134 || h->type == STT_GNU_IFUNC))
2136 bfd_boolean func_pointer_ref = FALSE;
2138 if (r_type == R_X86_64_PC32)
2140 /* Since something like ".long foo - ." may be used
2141 as pointer, make sure that PLT is used if foo is
2142 a function defined in a shared library. */
2143 if ((sec->flags & SEC_CODE) == 0)
2145 h->pointer_equality_needed = 1;
2146 if (bfd_link_pie (info)
2147 && h->type == STT_FUNC
2152 h->plt.refcount = 1;
2156 else if (r_type != R_X86_64_PC32_BND
2157 && r_type != R_X86_64_PC64)
2159 h->pointer_equality_needed = 1;
2160 /* At run-time, R_X86_64_64 can be resolved for both
2161 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2162 can only be resolved for x32. */
2163 if ((sec->flags & SEC_READONLY) == 0
2164 && (r_type == R_X86_64_64
2165 || (!ABI_64_P (abfd)
2166 && (r_type == R_X86_64_32
2167 || r_type == R_X86_64_32S))))
2168 func_pointer_ref = TRUE;
2171 if (!func_pointer_ref)
2173 /* If this reloc is in a read-only section, we might
2174 need a copy reloc. We can't check reliably at this
2175 stage whether the section is read-only, as input
2176 sections have not yet been mapped to output sections.
2177 Tentatively set the flag for now, and correct in
2178 adjust_dynamic_symbol. */
2181 /* We may need a .plt entry if the symbol is a function
2182 defined in a shared lib or is a function referenced
2183 from the code or read-only section. */
2185 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2186 h->plt.refcount = 1;
2192 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type,
2193 htab->pointer_r_type))
2195 struct elf_dyn_relocs *p;
2196 struct elf_dyn_relocs **head;
2198 /* We must copy these reloc types into the output file.
2199 Create a reloc section in dynobj and make room for
2203 sreloc = _bfd_elf_make_dynamic_reloc_section
2204 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2,
2205 abfd, /*rela?*/ TRUE);
2211 /* If this is a global symbol, we count the number of
2212 relocations we need for this symbol. */
2214 head = &eh->dyn_relocs;
2217 /* Track dynamic relocs needed for local syms too.
2218 We really need local syms available to do this
2223 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
2228 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2232 /* Beware of type punned pointers vs strict aliasing
2234 vpp = &(elf_section_data (s)->local_dynrel);
2235 head = (struct elf_dyn_relocs **)vpp;
2239 if (p == NULL || p->sec != sec)
2241 bfd_size_type amt = sizeof *p;
2243 p = ((struct elf_dyn_relocs *)
2244 bfd_alloc (htab->elf.dynobj, amt));
2255 /* Count size relocation as PC-relative relocation. */
2256 if (X86_PCREL_TYPE_P (r_type) || size_reloc)
2261 /* This relocation describes the C++ object vtable hierarchy.
2262 Reconstruct it for later use during GC. */
2263 case R_X86_64_GNU_VTINHERIT:
2264 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2268 /* This relocation describes which C++ vtable entries are actually
2269 used. Record for later use during GC. */
2270 case R_X86_64_GNU_VTENTRY:
2271 BFD_ASSERT (h != NULL);
2273 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2282 if (elf_section_data (sec)->this_hdr.contents != contents)
2284 if (!converted && !info->keep_memory)
2288 /* Cache the section contents for elf_link_input_bfd if any
2289 load is converted or --no-keep-memory isn't used. */
2290 elf_section_data (sec)->this_hdr.contents = contents;
2294 /* Cache relocations if any load is converted. */
2295 if (elf_section_data (sec)->relocs != relocs && converted)
2296 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2301 if (elf_section_data (sec)->this_hdr.contents != contents)
2303 sec->check_relocs_failed = 1;
2307 /* Return the relocation value for @tpoff relocation
2308 if STT_TLS virtual address is ADDRESS. */
2311 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2313 struct elf_link_hash_table *htab = elf_hash_table (info);
2314 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2315 bfd_vma static_tls_size;
2317 /* If tls_segment is NULL, we should have signalled an error already. */
2318 if (htab->tls_sec == NULL)
2321 /* Consider special static TLS alignment requirements. */
2322 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2323 return address - static_tls_size - htab->tls_sec->vma;
2326 /* Relocate an x86_64 ELF section. */
2329 elf_x86_64_relocate_section (bfd *output_bfd,
2330 struct bfd_link_info *info,
2332 asection *input_section,
2334 Elf_Internal_Rela *relocs,
2335 Elf_Internal_Sym *local_syms,
2336 asection **local_sections)
2338 struct elf_x86_link_hash_table *htab;
2339 Elf_Internal_Shdr *symtab_hdr;
2340 struct elf_link_hash_entry **sym_hashes;
2341 bfd_vma *local_got_offsets;
2342 bfd_vma *local_tlsdesc_gotents;
2343 Elf_Internal_Rela *rel;
2344 Elf_Internal_Rela *wrel;
2345 Elf_Internal_Rela *relend;
2346 unsigned int plt_entry_size;
2348 /* Skip if check_relocs failed. */
2349 if (input_section->check_relocs_failed)
2352 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2356 BFD_ASSERT (is_x86_elf (input_bfd, htab));
2358 plt_entry_size = htab->plt.plt_entry_size;
2359 symtab_hdr = &elf_symtab_hdr (input_bfd);
2360 sym_hashes = elf_sym_hashes (input_bfd);
2361 local_got_offsets = elf_local_got_offsets (input_bfd);
2362 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2364 _bfd_x86_elf_set_tls_module_base (info);
2366 rel = wrel = relocs;
2367 relend = relocs + input_section->reloc_count;
2368 for (; rel < relend; wrel++, rel++)
2370 unsigned int r_type, r_type_tls;
2371 reloc_howto_type *howto;
2372 unsigned long r_symndx;
2373 struct elf_link_hash_entry *h;
2374 struct elf_x86_link_hash_entry *eh;
2375 Elf_Internal_Sym *sym;
2377 bfd_vma off, offplt, plt_offset;
2379 bfd_boolean unresolved_reloc;
2380 bfd_reloc_status_type r;
2382 asection *base_got, *resolved_plt;
2384 bfd_boolean resolved_to_zero;
2385 bfd_boolean relative_reloc;
2386 bfd_boolean converted_reloc;
2387 bfd_boolean need_copy_reloc_in_pie;
2389 r_type = ELF32_R_TYPE (rel->r_info);
2390 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2391 || r_type == (int) R_X86_64_GNU_VTENTRY)
2398 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2399 r_type &= ~R_X86_64_converted_reloc_bit;
2401 if (r_type >= (int) R_X86_64_standard)
2402 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2404 if (r_type != (int) R_X86_64_32
2405 || ABI_64_P (output_bfd))
2406 howto = x86_64_elf_howto_table + r_type;
2408 howto = (x86_64_elf_howto_table
2409 + ARRAY_SIZE (x86_64_elf_howto_table) - 1);
2410 r_symndx = htab->r_sym (rel->r_info);
2414 unresolved_reloc = FALSE;
2415 if (r_symndx < symtab_hdr->sh_info)
2417 sym = local_syms + r_symndx;
2418 sec = local_sections[r_symndx];
2420 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2422 st_size = sym->st_size;
2424 /* Relocate against local STT_GNU_IFUNC symbol. */
2425 if (!bfd_link_relocatable (info)
2426 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2428 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2433 /* Set STT_GNU_IFUNC symbol value. */
2434 h->root.u.def.value = sym->st_value;
2435 h->root.u.def.section = sec;
2440 bfd_boolean warned ATTRIBUTE_UNUSED;
2441 bfd_boolean ignored ATTRIBUTE_UNUSED;
2443 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2444 r_symndx, symtab_hdr, sym_hashes,
2446 unresolved_reloc, warned, ignored);
2450 if (sec != NULL && discarded_section (sec))
2452 _bfd_clear_contents (howto, input_bfd, input_section,
2453 contents + rel->r_offset);
2454 wrel->r_offset = rel->r_offset;
2458 /* For ld -r, remove relocations in debug sections against
2459 sections defined in discarded sections. Not done for
2460 eh_frame editing code expects to be present. */
2461 if (bfd_link_relocatable (info)
2462 && (input_section->flags & SEC_DEBUGGING))
2468 if (bfd_link_relocatable (info))
2475 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2477 if (r_type == R_X86_64_64)
2479 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2480 zero-extend it to 64bit if addend is zero. */
2481 r_type = R_X86_64_32;
2482 memset (contents + rel->r_offset + 4, 0, 4);
2484 else if (r_type == R_X86_64_SIZE64)
2486 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2487 zero-extend it to 64bit if addend is zero. */
2488 r_type = R_X86_64_SIZE32;
2489 memset (contents + rel->r_offset + 4, 0, 4);
2493 eh = (struct elf_x86_link_hash_entry *) h;
2495 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2496 it here if it is defined in a non-shared object. */
2498 && h->type == STT_GNU_IFUNC
2504 if ((input_section->flags & SEC_ALLOC) == 0)
2506 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2507 STT_GNU_IFUNC symbol as STT_FUNC. */
2508 if (elf_section_type (input_section) == SHT_NOTE)
2510 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2511 sections because such sections are not SEC_ALLOC and
2512 thus ld.so will not process them. */
2513 if ((input_section->flags & SEC_DEBUGGING) != 0)
2523 case R_X86_64_GOTPCREL:
2524 case R_X86_64_GOTPCRELX:
2525 case R_X86_64_REX_GOTPCRELX:
2526 case R_X86_64_GOTPCREL64:
2527 base_got = htab->elf.sgot;
2528 off = h->got.offset;
2530 if (base_got == NULL)
2533 if (off == (bfd_vma) -1)
2535 /* We can't use h->got.offset here to save state, or
2536 even just remember the offset, as finish_dynamic_symbol
2537 would use that as offset into .got. */
2539 if (h->plt.offset == (bfd_vma) -1)
2542 if (htab->elf.splt != NULL)
2544 plt_index = (h->plt.offset / plt_entry_size
2545 - htab->plt.has_plt0);
2546 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2547 base_got = htab->elf.sgotplt;
2551 plt_index = h->plt.offset / plt_entry_size;
2552 off = plt_index * GOT_ENTRY_SIZE;
2553 base_got = htab->elf.igotplt;
2556 if (h->dynindx == -1
2560 /* This references the local defitionion. We must
2561 initialize this entry in the global offset table.
2562 Since the offset must always be a multiple of 8,
2563 we use the least significant bit to record
2564 whether we have initialized it already.
2566 When doing a dynamic link, we create a .rela.got
2567 relocation entry to initialize the value. This
2568 is done in the finish_dynamic_symbol routine. */
2573 bfd_put_64 (output_bfd, relocation,
2574 base_got->contents + off);
2575 /* Note that this is harmless for the GOTPLT64
2576 case, as -1 | 1 still is -1. */
2582 relocation = (base_got->output_section->vma
2583 + base_got->output_offset + off);
2588 if (h->plt.offset == (bfd_vma) -1)
2590 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2591 if (r_type == htab->pointer_r_type
2592 && (input_section->flags & SEC_CODE) == 0)
2593 goto do_ifunc_pointer;
2594 goto bad_ifunc_reloc;
2597 /* STT_GNU_IFUNC symbol must go through PLT. */
2598 if (htab->elf.splt != NULL)
2600 if (htab->plt_second != NULL)
2602 resolved_plt = htab->plt_second;
2603 plt_offset = eh->plt_second.offset;
2607 resolved_plt = htab->elf.splt;
2608 plt_offset = h->plt.offset;
2613 resolved_plt = htab->elf.iplt;
2614 plt_offset = h->plt.offset;
2617 relocation = (resolved_plt->output_section->vma
2618 + resolved_plt->output_offset + plt_offset);
2624 if (h->root.root.string)
2625 name = h->root.root.string;
2627 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2630 /* xgettext:c-format */
2631 (_("%pB: relocation %s against STT_GNU_IFUNC "
2632 "symbol `%s' isn't supported"), input_bfd,
2634 bfd_set_error (bfd_error_bad_value);
2638 if (bfd_link_pic (info))
2643 if (ABI_64_P (output_bfd))
2648 if (rel->r_addend != 0)
2650 if (h->root.root.string)
2651 name = h->root.root.string;
2653 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2656 /* xgettext:c-format */
2657 (_("%pB: relocation %s against STT_GNU_IFUNC "
2658 "symbol `%s' has non-zero addend: %" PRId64),
2659 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2660 bfd_set_error (bfd_error_bad_value);
2664 /* Generate dynamic relcoation only when there is a
2665 non-GOT reference in a shared object or there is no
2667 if ((bfd_link_pic (info) && h->non_got_ref)
2668 || h->plt.offset == (bfd_vma) -1)
2670 Elf_Internal_Rela outrel;
2673 /* Need a dynamic relocation to get the real function
2675 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2679 if (outrel.r_offset == (bfd_vma) -1
2680 || outrel.r_offset == (bfd_vma) -2)
2683 outrel.r_offset += (input_section->output_section->vma
2684 + input_section->output_offset);
2686 if (POINTER_LOCAL_IFUNC_P (info, h))
2688 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2689 h->root.root.string,
2690 h->root.u.def.section->owner);
2692 /* This symbol is resolved locally. */
2693 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2694 outrel.r_addend = (h->root.u.def.value
2695 + h->root.u.def.section->output_section->vma
2696 + h->root.u.def.section->output_offset);
2700 outrel.r_info = htab->r_info (h->dynindx, r_type);
2701 outrel.r_addend = 0;
2704 /* Dynamic relocations are stored in
2705 1. .rela.ifunc section in PIC object.
2706 2. .rela.got section in dynamic executable.
2707 3. .rela.iplt section in static executable. */
2708 if (bfd_link_pic (info))
2709 sreloc = htab->elf.irelifunc;
2710 else if (htab->elf.splt != NULL)
2711 sreloc = htab->elf.srelgot;
2713 sreloc = htab->elf.irelplt;
2714 elf_append_rela (output_bfd, sreloc, &outrel);
2716 /* If this reloc is against an external symbol, we
2717 do not want to fiddle with the addend. Otherwise,
2718 we need to include the symbol value so that it
2719 becomes an addend for the dynamic reloc. For an
2720 internal symbol, we have updated addend. */
2725 case R_X86_64_PC32_BND:
2727 case R_X86_64_PLT32:
2728 case R_X86_64_PLT32_BND:
2734 resolved_to_zero = (eh != NULL
2735 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
2737 /* When generating a shared object, the relocations handled here are
2738 copied into the output file to be resolved at run time. */
2741 case R_X86_64_GOT32:
2742 case R_X86_64_GOT64:
2743 /* Relocation is to the entry for this symbol in the global
2745 case R_X86_64_GOTPCREL:
2746 case R_X86_64_GOTPCRELX:
2747 case R_X86_64_REX_GOTPCRELX:
2748 case R_X86_64_GOTPCREL64:
2749 /* Use global offset table entry as symbol value. */
2750 case R_X86_64_GOTPLT64:
2751 /* This is obsolete and treated the same as GOT64. */
2752 base_got = htab->elf.sgot;
2754 if (htab->elf.sgot == NULL)
2757 relative_reloc = FALSE;
2760 off = h->got.offset;
2762 && h->plt.offset != (bfd_vma)-1
2763 && off == (bfd_vma)-1)
2765 /* We can't use h->got.offset here to save
2766 state, or even just remember the offset, as
2767 finish_dynamic_symbol would use that as offset into
2769 bfd_vma plt_index = (h->plt.offset / plt_entry_size
2770 - htab->plt.has_plt0);
2771 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2772 base_got = htab->elf.sgotplt;
2775 if (RESOLVED_LOCALLY_P (info, h, htab))
2777 /* We must initialize this entry in the global offset
2778 table. Since the offset must always be a multiple
2779 of 8, we use the least significant bit to record
2780 whether we have initialized it already.
2782 When doing a dynamic link, we create a .rela.got
2783 relocation entry to initialize the value. This is
2784 done in the finish_dynamic_symbol routine. */
2789 bfd_put_64 (output_bfd, relocation,
2790 base_got->contents + off);
2791 /* Note that this is harmless for the GOTPLT64 case,
2792 as -1 | 1 still is -1. */
2795 if (GENERATE_RELATIVE_RELOC_P (info, h))
2797 /* If this symbol isn't dynamic in PIC,
2798 generate R_X86_64_RELATIVE here. */
2799 eh->no_finish_dynamic_symbol = 1;
2800 relative_reloc = TRUE;
2805 unresolved_reloc = FALSE;
2809 if (local_got_offsets == NULL)
2812 off = local_got_offsets[r_symndx];
2814 /* The offset must always be a multiple of 8. We use
2815 the least significant bit to record whether we have
2816 already generated the necessary reloc. */
2821 bfd_put_64 (output_bfd, relocation,
2822 base_got->contents + off);
2823 local_got_offsets[r_symndx] |= 1;
2825 if (bfd_link_pic (info))
2826 relative_reloc = TRUE;
2833 Elf_Internal_Rela outrel;
2835 /* We need to generate a R_X86_64_RELATIVE reloc
2836 for the dynamic linker. */
2837 s = htab->elf.srelgot;
2841 outrel.r_offset = (base_got->output_section->vma
2842 + base_got->output_offset
2844 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
2845 outrel.r_addend = relocation;
2846 elf_append_rela (output_bfd, s, &outrel);
2849 if (off >= (bfd_vma) -2)
2852 relocation = base_got->output_section->vma
2853 + base_got->output_offset + off;
2854 if (r_type != R_X86_64_GOTPCREL
2855 && r_type != R_X86_64_GOTPCRELX
2856 && r_type != R_X86_64_REX_GOTPCRELX
2857 && r_type != R_X86_64_GOTPCREL64)
2858 relocation -= htab->elf.sgotplt->output_section->vma
2859 - htab->elf.sgotplt->output_offset;
2863 case R_X86_64_GOTOFF64:
2864 /* Relocation is relative to the start of the global offset
2867 /* Check to make sure it isn't a protected function or data
2868 symbol for shared library since it may not be local when
2869 used as function address or with copy relocation. We also
2870 need to make sure that a symbol is referenced locally. */
2871 if (bfd_link_pic (info) && h)
2873 if (!h->def_regular)
2877 switch (ELF_ST_VISIBILITY (h->other))
2880 v = _("hidden symbol");
2883 v = _("internal symbol");
2886 v = _("protected symbol");
2894 /* xgettext:c-format */
2895 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
2896 " `%s' can not be used when making a shared object"),
2897 input_bfd, v, h->root.root.string);
2898 bfd_set_error (bfd_error_bad_value);
2901 else if (!bfd_link_executable (info)
2902 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
2903 && (h->type == STT_FUNC
2904 || h->type == STT_OBJECT)
2905 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
2908 /* xgettext:c-format */
2909 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
2910 " `%s' can not be used when making a shared object"),
2912 h->type == STT_FUNC ? "function" : "data",
2913 h->root.root.string);
2914 bfd_set_error (bfd_error_bad_value);
2919 /* Note that sgot is not involved in this
2920 calculation. We always want the start of .got.plt. If we
2921 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
2922 permitted by the ABI, we might have to change this
2924 relocation -= htab->elf.sgotplt->output_section->vma
2925 + htab->elf.sgotplt->output_offset;
2928 case R_X86_64_GOTPC32:
2929 case R_X86_64_GOTPC64:
2930 /* Use global offset table as symbol value. */
2931 relocation = htab->elf.sgotplt->output_section->vma
2932 + htab->elf.sgotplt->output_offset;
2933 unresolved_reloc = FALSE;
2936 case R_X86_64_PLTOFF64:
2937 /* Relocation is PLT entry relative to GOT. For local
2938 symbols it's the symbol itself relative to GOT. */
2940 /* See PLT32 handling. */
2941 && (h->plt.offset != (bfd_vma) -1
2942 || eh->plt_got.offset != (bfd_vma) -1)
2943 && htab->elf.splt != NULL)
2945 if (eh->plt_got.offset != (bfd_vma) -1)
2947 /* Use the GOT PLT. */
2948 resolved_plt = htab->plt_got;
2949 plt_offset = eh->plt_got.offset;
2951 else if (htab->plt_second != NULL)
2953 resolved_plt = htab->plt_second;
2954 plt_offset = eh->plt_second.offset;
2958 resolved_plt = htab->elf.splt;
2959 plt_offset = h->plt.offset;
2962 relocation = (resolved_plt->output_section->vma
2963 + resolved_plt->output_offset
2965 unresolved_reloc = FALSE;
2968 relocation -= htab->elf.sgotplt->output_section->vma
2969 + htab->elf.sgotplt->output_offset;
2972 case R_X86_64_PLT32:
2973 case R_X86_64_PLT32_BND:
2974 /* Relocation is to the entry for this symbol in the
2975 procedure linkage table. */
2977 /* Resolve a PLT32 reloc against a local symbol directly,
2978 without using the procedure linkage table. */
2982 if ((h->plt.offset == (bfd_vma) -1
2983 && eh->plt_got.offset == (bfd_vma) -1)
2984 || htab->elf.splt == NULL)
2986 /* We didn't make a PLT entry for this symbol. This
2987 happens when statically linking PIC code, or when
2988 using -Bsymbolic. */
2993 if (h->plt.offset != (bfd_vma) -1)
2995 if (htab->plt_second != NULL)
2997 resolved_plt = htab->plt_second;
2998 plt_offset = eh->plt_second.offset;
3002 resolved_plt = htab->elf.splt;
3003 plt_offset = h->plt.offset;
3008 /* Use the GOT PLT. */
3009 resolved_plt = htab->plt_got;
3010 plt_offset = eh->plt_got.offset;
3013 relocation = (resolved_plt->output_section->vma
3014 + resolved_plt->output_offset
3016 unresolved_reloc = FALSE;
3019 case R_X86_64_SIZE32:
3020 case R_X86_64_SIZE64:
3021 /* Set to symbol size. */
3022 relocation = st_size;
3028 case R_X86_64_PC32_BND:
3029 /* Don't complain about -fPIC if the symbol is undefined when
3030 building executable unless it is unresolved weak symbol,
3031 references a dynamic definition in PIE or -z nocopyreloc
3033 if ((input_section->flags & SEC_ALLOC) != 0
3034 && (input_section->flags & SEC_READONLY) != 0
3036 && ((bfd_link_executable (info)
3037 && ((h->root.type == bfd_link_hash_undefweak
3038 && !resolved_to_zero)
3039 || (bfd_link_pie (info)
3042 || ((info->nocopyreloc
3043 || (eh->def_protected
3044 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3046 && !(h->root.u.def.section->flags & SEC_CODE))))
3047 || bfd_link_dll (info)))
3049 bfd_boolean fail = FALSE;
3050 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3052 /* Symbol is referenced locally. Make sure it is
3054 fail = !(h->def_regular || ELF_COMMON_DEF_P (h));
3056 else if (!(bfd_link_pie (info)
3057 && (h->needs_copy || eh->needs_copy)))
3059 /* Symbol doesn't need copy reloc and isn't referenced
3060 locally. Address of protected function may not be
3061 reachable at run-time. */
3062 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3063 || (ELF_ST_VISIBILITY (h->other) == STV_PROTECTED
3064 && h->type == STT_FUNC));
3068 return elf_x86_64_need_pic (info, input_bfd, input_section,
3069 h, NULL, NULL, howto);
3071 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3072 as function address. */
3074 && (input_section->flags & SEC_CODE) == 0
3075 && bfd_link_pie (info)
3076 && h->type == STT_FUNC
3087 /* FIXME: The ABI says the linker should make sure the value is
3088 the same when it's zeroextended to 64 bit. */
3091 if ((input_section->flags & SEC_ALLOC) == 0)
3094 need_copy_reloc_in_pie = (bfd_link_pie (info)
3099 == bfd_link_hash_undefined))
3100 && (X86_PCREL_TYPE_P (r_type)
3101 || X86_SIZE_TYPE_P (r_type)));
3103 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type,
3104 need_copy_reloc_in_pie,
3105 resolved_to_zero, FALSE))
3107 Elf_Internal_Rela outrel;
3108 bfd_boolean skip, relocate;
3111 /* When generating a shared object, these relocations
3112 are copied into the output file to be resolved at run
3118 _bfd_elf_section_offset (output_bfd, info, input_section,
3120 if (outrel.r_offset == (bfd_vma) -1)
3122 else if (outrel.r_offset == (bfd_vma) -2)
3123 skip = TRUE, relocate = TRUE;
3125 outrel.r_offset += (input_section->output_section->vma
3126 + input_section->output_offset);
3129 memset (&outrel, 0, sizeof outrel);
3131 else if (COPY_INPUT_RELOC_P (info, h, r_type))
3133 outrel.r_info = htab->r_info (h->dynindx, r_type);
3134 outrel.r_addend = rel->r_addend;
3138 /* This symbol is local, or marked to become local.
3139 When relocation overflow check is disabled, we
3140 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3141 if (r_type == htab->pointer_r_type
3142 || (r_type == R_X86_64_32
3143 && info->no_reloc_overflow_check))
3146 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3147 outrel.r_addend = relocation + rel->r_addend;
3149 else if (r_type == R_X86_64_64
3150 && !ABI_64_P (output_bfd))
3153 outrel.r_info = htab->r_info (0,
3154 R_X86_64_RELATIVE64);
3155 outrel.r_addend = relocation + rel->r_addend;
3156 /* Check addend overflow. */
3157 if ((outrel.r_addend & 0x80000000)
3158 != (rel->r_addend & 0x80000000))
3161 int addend = rel->r_addend;
3162 if (h && h->root.root.string)
3163 name = h->root.root.string;
3165 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3168 /* xgettext:c-format */
3169 (_("%pB: addend %s%#x in relocation %s against "
3170 "symbol `%s' at %#" PRIx64
3171 " in section `%pA' is out of range"),
3172 input_bfd, addend < 0 ? "-" : "", addend,
3173 howto->name, name, (uint64_t) rel->r_offset,
3175 bfd_set_error (bfd_error_bad_value);
3183 if (bfd_is_abs_section (sec))
3185 else if (sec == NULL || sec->owner == NULL)
3187 bfd_set_error (bfd_error_bad_value);
3194 /* We are turning this relocation into one
3195 against a section symbol. It would be
3196 proper to subtract the symbol's value,
3197 osec->vma, from the emitted reloc addend,
3198 but ld.so expects buggy relocs. */
3199 osec = sec->output_section;
3200 sindx = elf_section_data (osec)->dynindx;
3203 asection *oi = htab->elf.text_index_section;
3204 sindx = elf_section_data (oi)->dynindx;
3206 BFD_ASSERT (sindx != 0);
3209 outrel.r_info = htab->r_info (sindx, r_type);
3210 outrel.r_addend = relocation + rel->r_addend;
3214 sreloc = elf_section_data (input_section)->sreloc;
3216 if (sreloc == NULL || sreloc->contents == NULL)
3218 r = bfd_reloc_notsupported;
3219 goto check_relocation_error;
3222 elf_append_rela (output_bfd, sreloc, &outrel);
3224 /* If this reloc is against an external symbol, we do
3225 not want to fiddle with the addend. Otherwise, we
3226 need to include the symbol value so that it becomes
3227 an addend for the dynamic reloc. */
3234 case R_X86_64_TLSGD:
3235 case R_X86_64_GOTPC32_TLSDESC:
3236 case R_X86_64_TLSDESC_CALL:
3237 case R_X86_64_GOTTPOFF:
3238 tls_type = GOT_UNKNOWN;
3239 if (h == NULL && local_got_offsets)
3240 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3242 tls_type = elf_x86_hash_entry (h)->tls_type;
3244 r_type_tls = r_type;
3245 if (! elf_x86_64_tls_transition (info, input_bfd,
3246 input_section, contents,
3247 symtab_hdr, sym_hashes,
3248 &r_type_tls, tls_type, rel,
3249 relend, h, r_symndx, TRUE))
3252 if (r_type_tls == R_X86_64_TPOFF32)
3254 bfd_vma roff = rel->r_offset;
3256 BFD_ASSERT (! unresolved_reloc);
3258 if (r_type == R_X86_64_TLSGD)
3260 /* GD->LE transition. For 64bit, change
3261 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3262 .word 0x6666; rex64; call __tls_get_addr@PLT
3264 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3266 call *__tls_get_addr@GOTPCREL(%rip)
3267 which may be converted to
3268 addr32 call __tls_get_addr
3271 leaq foo@tpoff(%rax), %rax
3273 leaq foo@tlsgd(%rip), %rdi
3274 .word 0x6666; rex64; call __tls_get_addr@PLT
3276 leaq foo@tlsgd(%rip), %rdi
3278 call *__tls_get_addr@GOTPCREL(%rip)
3279 which may be converted to
3280 addr32 call __tls_get_addr
3283 leaq foo@tpoff(%rax), %rax
3284 For largepic, change:
3285 leaq foo@tlsgd(%rip), %rdi
3286 movabsq $__tls_get_addr@pltoff, %rax
3291 leaq foo@tpoff(%rax), %rax
3292 nopw 0x0(%rax,%rax,1) */
3294 if (ABI_64_P (output_bfd))
3296 if (contents[roff + 5] == 0xb8)
3298 memcpy (contents + roff - 3,
3299 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3300 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3304 memcpy (contents + roff - 4,
3305 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3309 memcpy (contents + roff - 3,
3310 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3312 bfd_put_32 (output_bfd,
3313 elf_x86_64_tpoff (info, relocation),
3314 contents + roff + 8 + largepic);
3315 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3316 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3321 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3323 /* GDesc -> LE transition.
3324 It's originally something like:
3325 leaq x@tlsdesc(%rip), %rax
3328 movl $x@tpoff, %rax. */
3330 unsigned int val, type;
3332 type = bfd_get_8 (input_bfd, contents + roff - 3);
3333 val = bfd_get_8 (input_bfd, contents + roff - 1);
3334 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1),
3335 contents + roff - 3);
3336 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3337 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3338 contents + roff - 1);
3339 bfd_put_32 (output_bfd,
3340 elf_x86_64_tpoff (info, relocation),
3344 else if (r_type == R_X86_64_TLSDESC_CALL)
3346 /* GDesc -> LE transition.
3351 bfd_put_8 (output_bfd, 0x66, contents + roff);
3352 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3355 else if (r_type == R_X86_64_GOTTPOFF)
3357 /* IE->LE transition:
3358 For 64bit, originally it can be one of:
3359 movq foo@gottpoff(%rip), %reg
3360 addq foo@gottpoff(%rip), %reg
3363 leaq foo(%reg), %reg
3365 For 32bit, originally it can be one of:
3366 movq foo@gottpoff(%rip), %reg
3367 addl foo@gottpoff(%rip), %reg
3370 leal foo(%reg), %reg
3373 unsigned int val, type, reg;
3376 val = bfd_get_8 (input_bfd, contents + roff - 3);
3379 type = bfd_get_8 (input_bfd, contents + roff - 2);
3380 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3386 bfd_put_8 (output_bfd, 0x49,
3387 contents + roff - 3);
3388 else if (!ABI_64_P (output_bfd) && val == 0x44)
3389 bfd_put_8 (output_bfd, 0x41,
3390 contents + roff - 3);
3391 bfd_put_8 (output_bfd, 0xc7,
3392 contents + roff - 2);
3393 bfd_put_8 (output_bfd, 0xc0 | reg,
3394 contents + roff - 1);
3398 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3401 bfd_put_8 (output_bfd, 0x49,
3402 contents + roff - 3);
3403 else if (!ABI_64_P (output_bfd) && val == 0x44)
3404 bfd_put_8 (output_bfd, 0x41,
3405 contents + roff - 3);
3406 bfd_put_8 (output_bfd, 0x81,
3407 contents + roff - 2);
3408 bfd_put_8 (output_bfd, 0xc0 | reg,
3409 contents + roff - 1);
3413 /* addq/addl -> leaq/leal */
3415 bfd_put_8 (output_bfd, 0x4d,
3416 contents + roff - 3);
3417 else if (!ABI_64_P (output_bfd) && val == 0x44)
3418 bfd_put_8 (output_bfd, 0x45,
3419 contents + roff - 3);
3420 bfd_put_8 (output_bfd, 0x8d,
3421 contents + roff - 2);
3422 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3423 contents + roff - 1);
3425 bfd_put_32 (output_bfd,
3426 elf_x86_64_tpoff (info, relocation),
3434 if (htab->elf.sgot == NULL)
3439 off = h->got.offset;
3440 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3444 if (local_got_offsets == NULL)
3447 off = local_got_offsets[r_symndx];
3448 offplt = local_tlsdesc_gotents[r_symndx];
3455 Elf_Internal_Rela outrel;
3459 if (htab->elf.srelgot == NULL)
3462 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3464 if (GOT_TLS_GDESC_P (tls_type))
3466 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3467 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3468 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3469 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3470 + htab->elf.sgotplt->output_offset
3472 + htab->sgotplt_jump_table_size);
3473 sreloc = htab->elf.srelplt;
3475 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3477 outrel.r_addend = 0;
3478 elf_append_rela (output_bfd, sreloc, &outrel);
3481 sreloc = htab->elf.srelgot;
3483 outrel.r_offset = (htab->elf.sgot->output_section->vma
3484 + htab->elf.sgot->output_offset + off);
3486 if (GOT_TLS_GD_P (tls_type))
3487 dr_type = R_X86_64_DTPMOD64;
3488 else if (GOT_TLS_GDESC_P (tls_type))
3491 dr_type = R_X86_64_TPOFF64;
3493 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3494 outrel.r_addend = 0;
3495 if ((dr_type == R_X86_64_TPOFF64
3496 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3497 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3498 outrel.r_info = htab->r_info (indx, dr_type);
3500 elf_append_rela (output_bfd, sreloc, &outrel);
3502 if (GOT_TLS_GD_P (tls_type))
3506 BFD_ASSERT (! unresolved_reloc);
3507 bfd_put_64 (output_bfd,
3508 relocation - _bfd_x86_elf_dtpoff_base (info),
3509 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3513 bfd_put_64 (output_bfd, 0,
3514 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3515 outrel.r_info = htab->r_info (indx,
3517 outrel.r_offset += GOT_ENTRY_SIZE;
3518 elf_append_rela (output_bfd, sreloc,
3527 local_got_offsets[r_symndx] |= 1;
3530 if (off >= (bfd_vma) -2
3531 && ! GOT_TLS_GDESC_P (tls_type))
3533 if (r_type_tls == r_type)
3535 if (r_type == R_X86_64_GOTPC32_TLSDESC
3536 || r_type == R_X86_64_TLSDESC_CALL)
3537 relocation = htab->elf.sgotplt->output_section->vma
3538 + htab->elf.sgotplt->output_offset
3539 + offplt + htab->sgotplt_jump_table_size;
3541 relocation = htab->elf.sgot->output_section->vma
3542 + htab->elf.sgot->output_offset + off;
3543 unresolved_reloc = FALSE;
3547 bfd_vma roff = rel->r_offset;
3549 if (r_type == R_X86_64_TLSGD)
3551 /* GD->IE transition. For 64bit, change
3552 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3553 .word 0x6666; rex64; call __tls_get_addr@PLT
3555 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3557 call *__tls_get_addr@GOTPCREL(%rip
3558 which may be converted to
3559 addr32 call __tls_get_addr
3562 addq foo@gottpoff(%rip), %rax
3564 leaq foo@tlsgd(%rip), %rdi
3565 .word 0x6666; rex64; call __tls_get_addr@PLT
3567 leaq foo@tlsgd(%rip), %rdi
3569 call *__tls_get_addr@GOTPCREL(%rip)
3570 which may be converted to
3571 addr32 call __tls_get_addr
3574 addq foo@gottpoff(%rip), %rax
3575 For largepic, change:
3576 leaq foo@tlsgd(%rip), %rdi
3577 movabsq $__tls_get_addr@pltoff, %rax
3582 addq foo@gottpoff(%rax), %rax
3583 nopw 0x0(%rax,%rax,1) */
3585 if (ABI_64_P (output_bfd))
3587 if (contents[roff + 5] == 0xb8)
3589 memcpy (contents + roff - 3,
3590 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
3591 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3595 memcpy (contents + roff - 4,
3596 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3600 memcpy (contents + roff - 3,
3601 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
3604 relocation = (htab->elf.sgot->output_section->vma
3605 + htab->elf.sgot->output_offset + off
3608 - input_section->output_section->vma
3609 - input_section->output_offset
3611 bfd_put_32 (output_bfd, relocation,
3612 contents + roff + 8 + largepic);
3613 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
3618 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3620 /* GDesc -> IE transition.
3621 It's originally something like:
3622 leaq x@tlsdesc(%rip), %rax
3625 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */
3627 /* Now modify the instruction as appropriate. To
3628 turn a leaq into a movq in the form we use it, it
3629 suffices to change the second byte from 0x8d to
3631 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
3633 bfd_put_32 (output_bfd,
3634 htab->elf.sgot->output_section->vma
3635 + htab->elf.sgot->output_offset + off
3637 - input_section->output_section->vma
3638 - input_section->output_offset
3643 else if (r_type == R_X86_64_TLSDESC_CALL)
3645 /* GDesc -> IE transition.
3652 bfd_put_8 (output_bfd, 0x66, contents + roff);
3653 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3661 case R_X86_64_TLSLD:
3662 if (! elf_x86_64_tls_transition (info, input_bfd,
3663 input_section, contents,
3664 symtab_hdr, sym_hashes,
3665 &r_type, GOT_UNKNOWN, rel,
3666 relend, h, r_symndx, TRUE))
3669 if (r_type != R_X86_64_TLSLD)
3671 /* LD->LE transition:
3672 leaq foo@tlsld(%rip), %rdi
3673 call __tls_get_addr@PLT
3674 For 64bit, we change it into:
3675 .word 0x6666; .byte 0x66; movq %fs:0, %rax
3676 For 32bit, we change it into:
3677 nopl 0x0(%rax); movl %fs:0, %eax
3679 leaq foo@tlsld(%rip), %rdi;
3680 call *__tls_get_addr@GOTPCREL(%rip)
3681 which may be converted to
3682 addr32 call __tls_get_addr
3683 For 64bit, we change it into:
3684 .word 0x6666; .word 0x6666; movq %fs:0, %rax
3685 For 32bit, we change it into:
3686 nopw 0x0(%rax); movl %fs:0, %eax
3687 For largepic, change:
3688 leaq foo@tlsgd(%rip), %rdi
3689 movabsq $__tls_get_addr@pltoff, %rax
3693 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
3696 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
3697 if (ABI_64_P (output_bfd))
3699 if (contents[rel->r_offset + 5] == 0xb8)
3700 memcpy (contents + rel->r_offset - 3,
3701 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
3702 "\x64\x48\x8b\x04\x25\0\0\0", 22);
3703 else if (contents[rel->r_offset + 4] == 0xff
3704 || contents[rel->r_offset + 4] == 0x67)
3705 memcpy (contents + rel->r_offset - 3,
3706 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
3709 memcpy (contents + rel->r_offset - 3,
3710 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
3714 if (contents[rel->r_offset + 4] == 0xff)
3715 memcpy (contents + rel->r_offset - 3,
3716 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
3719 memcpy (contents + rel->r_offset - 3,
3720 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
3722 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
3723 and R_X86_64_PLTOFF64. */
3729 if (htab->elf.sgot == NULL)
3732 off = htab->tls_ld_or_ldm_got.offset;
3737 Elf_Internal_Rela outrel;
3739 if (htab->elf.srelgot == NULL)
3742 outrel.r_offset = (htab->elf.sgot->output_section->vma
3743 + htab->elf.sgot->output_offset + off);
3745 bfd_put_64 (output_bfd, 0,
3746 htab->elf.sgot->contents + off);
3747 bfd_put_64 (output_bfd, 0,
3748 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
3749 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
3750 outrel.r_addend = 0;
3751 elf_append_rela (output_bfd, htab->elf.srelgot,
3753 htab->tls_ld_or_ldm_got.offset |= 1;
3755 relocation = htab->elf.sgot->output_section->vma
3756 + htab->elf.sgot->output_offset + off;
3757 unresolved_reloc = FALSE;
3760 case R_X86_64_DTPOFF32:
3761 if (!bfd_link_executable (info)
3762 || (input_section->flags & SEC_CODE) == 0)
3763 relocation -= _bfd_x86_elf_dtpoff_base (info);
3765 relocation = elf_x86_64_tpoff (info, relocation);
3768 case R_X86_64_TPOFF32:
3769 case R_X86_64_TPOFF64:
3770 BFD_ASSERT (bfd_link_executable (info));
3771 relocation = elf_x86_64_tpoff (info, relocation);
3774 case R_X86_64_DTPOFF64:
3775 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
3776 relocation -= _bfd_x86_elf_dtpoff_base (info);
3783 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
3784 because such sections are not SEC_ALLOC and thus ld.so will
3785 not process them. */
3786 if (unresolved_reloc
3787 && !((input_section->flags & SEC_DEBUGGING) != 0
3789 && _bfd_elf_section_offset (output_bfd, info, input_section,
3790 rel->r_offset) != (bfd_vma) -1)
3795 sec = h->root.u.def.section;
3796 if ((info->nocopyreloc
3797 || (eh->def_protected
3798 && elf_has_no_copy_on_protected (h->root.u.def.section->owner)))
3799 && !(h->root.u.def.section->flags & SEC_CODE))
3800 return elf_x86_64_need_pic (info, input_bfd, input_section,
3801 h, NULL, NULL, howto);
3806 /* xgettext:c-format */
3807 (_("%pB(%pA+%#" PRIx64 "): "
3808 "unresolvable %s relocation against symbol `%s'"),
3811 (uint64_t) rel->r_offset,
3813 h->root.root.string);
3819 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
3820 contents, rel->r_offset,
3821 relocation, rel->r_addend);
3823 check_relocation_error:
3824 if (r != bfd_reloc_ok)
3829 name = h->root.root.string;
3832 name = bfd_elf_string_from_elf_section (input_bfd,
3833 symtab_hdr->sh_link,
3838 name = bfd_section_name (input_bfd, sec);
3841 if (r == bfd_reloc_overflow)
3843 if (converted_reloc)
3845 info->callbacks->einfo
3846 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n"));
3849 (*info->callbacks->reloc_overflow)
3850 (info, (h ? &h->root : NULL), name, howto->name,
3851 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
3856 /* xgettext:c-format */
3857 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
3858 input_bfd, input_section,
3859 (uint64_t) rel->r_offset, name, (int) r);
3870 Elf_Internal_Shdr *rel_hdr;
3871 size_t deleted = rel - wrel;
3873 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
3874 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3875 if (rel_hdr->sh_size == 0)
3877 /* It is too late to remove an empty reloc section. Leave
3879 ??? What is wrong with an empty section??? */
3880 rel_hdr->sh_size = rel_hdr->sh_entsize;
3883 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
3884 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
3885 input_section->reloc_count -= deleted;
3891 /* Finish up dynamic symbol handling. We set the contents of various
3892 dynamic sections here. */
3895 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
3896 struct bfd_link_info *info,
3897 struct elf_link_hash_entry *h,
3898 Elf_Internal_Sym *sym)
3900 struct elf_x86_link_hash_table *htab;
3901 bfd_boolean use_plt_second;
3902 struct elf_x86_link_hash_entry *eh;
3903 bfd_boolean local_undefweak;
3905 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
3909 /* Use the second PLT section only if there is .plt section. */
3910 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
3912 eh = (struct elf_x86_link_hash_entry *) h;
3913 if (eh->no_finish_dynamic_symbol)
3916 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
3917 resolved undefined weak symbols in executable so that their
3918 references have value 0 at run-time. */
3919 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
3921 if (h->plt.offset != (bfd_vma) -1)
3924 bfd_vma got_offset, plt_offset;
3925 Elf_Internal_Rela rela;
3927 asection *plt, *gotplt, *relplt, *resolved_plt;
3928 const struct elf_backend_data *bed;
3929 bfd_vma plt_got_pcrel_offset;
3931 /* When building a static executable, use .iplt, .igot.plt and
3932 .rela.iplt sections for STT_GNU_IFUNC symbols. */
3933 if (htab->elf.splt != NULL)
3935 plt = htab->elf.splt;
3936 gotplt = htab->elf.sgotplt;
3937 relplt = htab->elf.srelplt;
3941 plt = htab->elf.iplt;
3942 gotplt = htab->elf.igotplt;
3943 relplt = htab->elf.irelplt;
3946 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
3948 /* Get the index in the procedure linkage table which
3949 corresponds to this symbol. This is the index of this symbol
3950 in all the symbols for which we are making plt entries. The
3951 first entry in the procedure linkage table is reserved.
3953 Get the offset into the .got table of the entry that
3954 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
3955 bytes. The first three are reserved for the dynamic linker.
3957 For static executables, we don't reserve anything. */
3959 if (plt == htab->elf.splt)
3961 got_offset = (h->plt.offset / htab->plt.plt_entry_size
3962 - htab->plt.has_plt0);
3963 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
3967 got_offset = h->plt.offset / htab->plt.plt_entry_size;
3968 got_offset = got_offset * GOT_ENTRY_SIZE;
3971 /* Fill in the entry in the procedure linkage table. */
3972 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
3973 htab->plt.plt_entry_size);
3976 memcpy (htab->plt_second->contents + eh->plt_second.offset,
3977 htab->non_lazy_plt->plt_entry,
3978 htab->non_lazy_plt->plt_entry_size);
3980 resolved_plt = htab->plt_second;
3981 plt_offset = eh->plt_second.offset;
3986 plt_offset = h->plt.offset;
3989 /* Insert the relocation positions of the plt section. */
3991 /* Put offset the PC-relative instruction referring to the GOT entry,
3992 subtracting the size of that instruction. */
3993 plt_got_pcrel_offset = (gotplt->output_section->vma
3994 + gotplt->output_offset
3996 - resolved_plt->output_section->vma
3997 - resolved_plt->output_offset
3999 - htab->plt.plt_got_insn_size);
4001 /* Check PC-relative offset overflow in PLT entry. */
4002 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4003 /* xgettext:c-format */
4004 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4005 output_bfd, h->root.root.string);
4007 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4008 (resolved_plt->contents + plt_offset
4009 + htab->plt.plt_got_offset));
4011 /* Fill in the entry in the global offset table, initially this
4012 points to the second part of the PLT entry. Leave the entry
4013 as zero for undefined weak symbol in PIE. No PLT relocation
4014 against undefined weak symbol in PIE. */
4015 if (!local_undefweak)
4017 if (htab->plt.has_plt0)
4018 bfd_put_64 (output_bfd, (plt->output_section->vma
4019 + plt->output_offset
4021 + htab->lazy_plt->plt_lazy_offset),
4022 gotplt->contents + got_offset);
4024 /* Fill in the entry in the .rela.plt section. */
4025 rela.r_offset = (gotplt->output_section->vma
4026 + gotplt->output_offset
4028 if (PLT_LOCAL_IFUNC_P (info, h))
4030 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4031 h->root.root.string,
4032 h->root.u.def.section->owner);
4034 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4035 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4036 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4037 rela.r_addend = (h->root.u.def.value
4038 + h->root.u.def.section->output_section->vma
4039 + h->root.u.def.section->output_offset);
4040 /* R_X86_64_IRELATIVE comes last. */
4041 plt_index = htab->next_irelative_index--;
4045 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4047 plt_index = htab->next_jump_slot_index++;
4050 /* Don't fill the second and third slots in PLT entry for
4051 static executables nor without PLT0. */
4052 if (plt == htab->elf.splt && htab->plt.has_plt0)
4055 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4057 /* Put relocation index. */
4058 bfd_put_32 (output_bfd, plt_index,
4059 (plt->contents + h->plt.offset
4060 + htab->lazy_plt->plt_reloc_offset));
4062 /* Put offset for jmp .PLT0 and check for overflow. We don't
4063 check relocation index for overflow since branch displacement
4064 will overflow first. */
4065 if (plt0_offset > 0x80000000)
4066 /* xgettext:c-format */
4067 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4068 output_bfd, h->root.root.string);
4069 bfd_put_32 (output_bfd, - plt0_offset,
4070 (plt->contents + h->plt.offset
4071 + htab->lazy_plt->plt_plt_offset));
4074 bed = get_elf_backend_data (output_bfd);
4075 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4076 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4079 else if (eh->plt_got.offset != (bfd_vma) -1)
4081 bfd_vma got_offset, plt_offset;
4082 asection *plt, *got;
4083 bfd_boolean got_after_plt;
4084 int32_t got_pcrel_offset;
4086 /* Set the entry in the GOT procedure linkage table. */
4087 plt = htab->plt_got;
4088 got = htab->elf.sgot;
4089 got_offset = h->got.offset;
4091 if (got_offset == (bfd_vma) -1
4092 || (h->type == STT_GNU_IFUNC && h->def_regular)
4097 /* Use the non-lazy PLT entry template for the GOT PLT since they
4098 are the identical. */
4099 /* Fill in the entry in the GOT procedure linkage table. */
4100 plt_offset = eh->plt_got.offset;
4101 memcpy (plt->contents + plt_offset,
4102 htab->non_lazy_plt->plt_entry,
4103 htab->non_lazy_plt->plt_entry_size);
4105 /* Put offset the PC-relative instruction referring to the GOT
4106 entry, subtracting the size of that instruction. */
4107 got_pcrel_offset = (got->output_section->vma
4108 + got->output_offset
4110 - plt->output_section->vma
4111 - plt->output_offset
4113 - htab->non_lazy_plt->plt_got_insn_size);
4115 /* Check PC-relative offset overflow in GOT PLT entry. */
4116 got_after_plt = got->output_section->vma > plt->output_section->vma;
4117 if ((got_after_plt && got_pcrel_offset < 0)
4118 || (!got_after_plt && got_pcrel_offset > 0))
4119 /* xgettext:c-format */
4120 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4121 output_bfd, h->root.root.string);
4123 bfd_put_32 (output_bfd, got_pcrel_offset,
4124 (plt->contents + plt_offset
4125 + htab->non_lazy_plt->plt_got_offset));
4128 if (!local_undefweak
4130 && (h->plt.offset != (bfd_vma) -1
4131 || eh->plt_got.offset != (bfd_vma) -1))
4133 /* Mark the symbol as undefined, rather than as defined in
4134 the .plt section. Leave the value if there were any
4135 relocations where pointer equality matters (this is a clue
4136 for the dynamic linker, to make function pointer
4137 comparisons work between an application and shared
4138 library), otherwise set it to zero. If a function is only
4139 called from a binary, there is no need to slow down
4140 shared libraries because of that. */
4141 sym->st_shndx = SHN_UNDEF;
4142 if (!h->pointer_equality_needed)
4146 /* Don't generate dynamic GOT relocation against undefined weak
4147 symbol in executable. */
4148 if (h->got.offset != (bfd_vma) -1
4149 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4150 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4151 && !local_undefweak)
4153 Elf_Internal_Rela rela;
4154 asection *relgot = htab->elf.srelgot;
4156 /* This symbol has an entry in the global offset table. Set it
4158 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4161 rela.r_offset = (htab->elf.sgot->output_section->vma
4162 + htab->elf.sgot->output_offset
4163 + (h->got.offset &~ (bfd_vma) 1));
4165 /* If this is a static link, or it is a -Bsymbolic link and the
4166 symbol is defined locally or was forced to be local because
4167 of a version file, we just want to emit a RELATIVE reloc.
4168 The entry in the global offset table will already have been
4169 initialized in the relocate_section function. */
4171 && h->type == STT_GNU_IFUNC)
4173 if (h->plt.offset == (bfd_vma) -1)
4175 /* STT_GNU_IFUNC is referenced without PLT. */
4176 if (htab->elf.splt == NULL)
4178 /* use .rel[a].iplt section to store .got relocations
4179 in static executable. */
4180 relgot = htab->elf.irelplt;
4182 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4184 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4185 h->root.root.string,
4186 h->root.u.def.section->owner);
4188 rela.r_info = htab->r_info (0,
4189 R_X86_64_IRELATIVE);
4190 rela.r_addend = (h->root.u.def.value
4191 + h->root.u.def.section->output_section->vma
4192 + h->root.u.def.section->output_offset);
4197 else if (bfd_link_pic (info))
4199 /* Generate R_X86_64_GLOB_DAT. */
4207 if (!h->pointer_equality_needed)
4210 /* For non-shared object, we can't use .got.plt, which
4211 contains the real function addres if we need pointer
4212 equality. We load the GOT entry with the PLT entry. */
4213 if (htab->plt_second != NULL)
4215 plt = htab->plt_second;
4216 plt_offset = eh->plt_second.offset;
4220 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4221 plt_offset = h->plt.offset;
4223 bfd_put_64 (output_bfd, (plt->output_section->vma
4224 + plt->output_offset
4226 htab->elf.sgot->contents + h->got.offset);
4230 else if (bfd_link_pic (info)
4231 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4233 if (!(h->def_regular || ELF_COMMON_DEF_P (h)))
4235 BFD_ASSERT((h->got.offset & 1) != 0);
4236 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4237 rela.r_addend = (h->root.u.def.value
4238 + h->root.u.def.section->output_section->vma
4239 + h->root.u.def.section->output_offset);
4243 BFD_ASSERT((h->got.offset & 1) == 0);
4245 bfd_put_64 (output_bfd, (bfd_vma) 0,
4246 htab->elf.sgot->contents + h->got.offset);
4247 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4251 elf_append_rela (output_bfd, relgot, &rela);
4256 Elf_Internal_Rela rela;
4259 /* This symbol needs a copy reloc. Set it up. */
4260 VERIFY_COPY_RELOC (h, htab)
4262 rela.r_offset = (h->root.u.def.value
4263 + h->root.u.def.section->output_section->vma
4264 + h->root.u.def.section->output_offset);
4265 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4267 if (h->root.u.def.section == htab->elf.sdynrelro)
4268 s = htab->elf.sreldynrelro;
4270 s = htab->elf.srelbss;
4271 elf_append_rela (output_bfd, s, &rela);
4277 /* Finish up local dynamic symbol handling. We set the contents of
4278 various dynamic sections here. */
4281 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4283 struct elf_link_hash_entry *h
4284 = (struct elf_link_hash_entry *) *slot;
4285 struct bfd_link_info *info
4286 = (struct bfd_link_info *) inf;
4288 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4292 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4293 here since undefined weak symbol may not be dynamic and may not be
4294 called for elf_x86_64_finish_dynamic_symbol. */
4297 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4300 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4301 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4303 if (h->root.type != bfd_link_hash_undefweak
4304 || h->dynindx != -1)
4307 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4311 /* Used to decide how to sort relocs in an optimal manner for the
4312 dynamic linker, before writing them out. */
4314 static enum elf_reloc_type_class
4315 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4316 const asection *rel_sec ATTRIBUTE_UNUSED,
4317 const Elf_Internal_Rela *rela)
4319 bfd *abfd = info->output_bfd;
4320 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4321 struct elf_x86_link_hash_table *htab
4322 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4324 if (htab->elf.dynsym != NULL
4325 && htab->elf.dynsym->contents != NULL)
4327 /* Check relocation against STT_GNU_IFUNC symbol if there are
4329 unsigned long r_symndx = htab->r_sym (rela->r_info);
4330 if (r_symndx != STN_UNDEF)
4332 Elf_Internal_Sym sym;
4333 if (!bed->s->swap_symbol_in (abfd,
4334 (htab->elf.dynsym->contents
4335 + r_symndx * bed->s->sizeof_sym),
4339 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4340 return reloc_class_ifunc;
4344 switch ((int) ELF32_R_TYPE (rela->r_info))
4346 case R_X86_64_IRELATIVE:
4347 return reloc_class_ifunc;
4348 case R_X86_64_RELATIVE:
4349 case R_X86_64_RELATIVE64:
4350 return reloc_class_relative;
4351 case R_X86_64_JUMP_SLOT:
4352 return reloc_class_plt;
4354 return reloc_class_copy;
4356 return reloc_class_normal;
4360 /* Finish up the dynamic sections. */
4363 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4364 struct bfd_link_info *info)
4366 struct elf_x86_link_hash_table *htab;
4368 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4372 if (! htab->elf.dynamic_sections_created)
4375 if (htab->elf.splt && htab->elf.splt->size > 0)
4377 elf_section_data (htab->elf.splt->output_section)
4378 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4380 if (htab->plt.has_plt0)
4382 /* Fill in the special first entry in the procedure linkage
4384 memcpy (htab->elf.splt->contents,
4385 htab->lazy_plt->plt0_entry,
4386 htab->lazy_plt->plt0_entry_size);
4387 /* Add offset for pushq GOT+8(%rip), since the instruction
4388 uses 6 bytes subtract this value. */
4389 bfd_put_32 (output_bfd,
4390 (htab->elf.sgotplt->output_section->vma
4391 + htab->elf.sgotplt->output_offset
4393 - htab->elf.splt->output_section->vma
4394 - htab->elf.splt->output_offset
4396 (htab->elf.splt->contents
4397 + htab->lazy_plt->plt0_got1_offset));
4398 /* Add offset for the PC-relative instruction accessing
4399 GOT+16, subtracting the offset to the end of that
4401 bfd_put_32 (output_bfd,
4402 (htab->elf.sgotplt->output_section->vma
4403 + htab->elf.sgotplt->output_offset
4405 - htab->elf.splt->output_section->vma
4406 - htab->elf.splt->output_offset
4407 - htab->lazy_plt->plt0_got2_insn_end),
4408 (htab->elf.splt->contents
4409 + htab->lazy_plt->plt0_got2_offset));
4412 if (htab->tlsdesc_plt)
4414 /* The TLSDESC entry in a lazy procedure linkage table. */
4415 static const bfd_byte tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
4417 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
4418 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
4419 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
4422 bfd_put_64 (output_bfd, (bfd_vma) 0,
4423 htab->elf.sgot->contents + htab->tlsdesc_got);
4425 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt,
4426 tlsdesc_plt_entry, LAZY_PLT_ENTRY_SIZE);
4428 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
4429 bytes and the instruction uses 6 bytes, subtract these
4431 bfd_put_32 (output_bfd,
4432 (htab->elf.sgotplt->output_section->vma
4433 + htab->elf.sgotplt->output_offset
4435 - htab->elf.splt->output_section->vma
4436 - htab->elf.splt->output_offset
4439 (htab->elf.splt->contents
4442 /* Add offset for indirect branch via GOT+TDG, where TDG
4443 stands for htab->tlsdesc_got, subtracting the offset
4444 to the end of that instruction. */
4445 bfd_put_32 (output_bfd,
4446 (htab->elf.sgot->output_section->vma
4447 + htab->elf.sgot->output_offset
4449 - htab->elf.splt->output_section->vma
4450 - htab->elf.splt->output_offset
4453 (htab->elf.splt->contents
4454 + htab->tlsdesc_plt + 4 + 6 + 2));
4458 /* Fill PLT entries for undefined weak symbols in PIE. */
4459 if (bfd_link_pie (info))
4460 bfd_hash_traverse (&info->hash->table,
4461 elf_x86_64_pie_finish_undefweak_symbol,
4467 /* Fill PLT/GOT entries and allocate dynamic relocations for local
4468 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
4469 It has to be done before elf_link_sort_relocs is called so that
4470 dynamic relocations are properly sorted. */
4473 elf_x86_64_output_arch_local_syms
4474 (bfd *output_bfd ATTRIBUTE_UNUSED,
4475 struct bfd_link_info *info,
4476 void *flaginfo ATTRIBUTE_UNUSED,
4477 int (*func) (void *, const char *,
4480 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
4482 struct elf_x86_link_hash_table *htab
4483 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4487 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
4488 htab_traverse (htab->loc_hash_table,
4489 elf_x86_64_finish_local_dynamic_symbol,
4495 /* Forward declaration. */
4496 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt;
4498 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
4499 dynamic relocations. */
4502 elf_x86_64_get_synthetic_symtab (bfd *abfd,
4503 long symcount ATTRIBUTE_UNUSED,
4504 asymbol **syms ATTRIBUTE_UNUSED,
4511 bfd_byte *plt_contents;
4513 const struct elf_x86_lazy_plt_layout *lazy_plt;
4514 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
4515 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
4516 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
4517 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
4518 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
4520 enum elf_x86_plt_type plt_type;
4521 struct elf_x86_plt plts[] =
4523 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
4524 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
4525 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
4526 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
4527 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
4532 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
4535 if (dynsymcount <= 0)
4538 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
4542 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl)
4544 lazy_plt = &elf_x86_64_lazy_plt;
4545 non_lazy_plt = &elf_x86_64_non_lazy_plt;
4546 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
4547 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
4548 if (ABI_64_P (abfd))
4550 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4551 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4555 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4556 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4561 lazy_plt = &elf_x86_64_nacl_plt;
4562 non_lazy_plt = NULL;
4563 lazy_bnd_plt = NULL;
4564 non_lazy_bnd_plt = NULL;
4565 lazy_ibt_plt = NULL;
4566 non_lazy_ibt_plt = NULL;
4570 for (j = 0; plts[j].name != NULL; j++)
4572 plt = bfd_get_section_by_name (abfd, plts[j].name);
4573 if (plt == NULL || plt->size == 0)
4576 /* Get the PLT section contents. */
4577 plt_contents = (bfd_byte *) bfd_malloc (plt->size);
4578 if (plt_contents == NULL)
4580 if (!bfd_get_section_contents (abfd, (asection *) plt,
4581 plt_contents, 0, plt->size))
4583 free (plt_contents);
4587 /* Check what kind of PLT it is. */
4588 plt_type = plt_unknown;
4589 if (plts[j].type == plt_unknown
4590 && (plt->size >= (lazy_plt->plt_entry_size
4591 + lazy_plt->plt_entry_size)))
4593 /* Match lazy PLT first. Need to check the first two
4595 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
4596 lazy_plt->plt0_got1_offset) == 0)
4597 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
4599 plt_type = plt_lazy;
4600 else if (lazy_bnd_plt != NULL
4601 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
4602 lazy_bnd_plt->plt0_got1_offset) == 0)
4603 && (memcmp (plt_contents + 6,
4604 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
4606 plt_type = plt_lazy | plt_second;
4607 /* The fist entry in the lazy IBT PLT is the same as the
4609 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
4610 lazy_ibt_plt->plt_entry,
4611 lazy_ibt_plt->plt_got_offset) == 0))
4612 lazy_plt = lazy_ibt_plt;
4614 lazy_plt = lazy_bnd_plt;
4618 if (non_lazy_plt != NULL
4619 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
4620 && plt->size >= non_lazy_plt->plt_entry_size)
4622 /* Match non-lazy PLT. */
4623 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
4624 non_lazy_plt->plt_got_offset) == 0)
4625 plt_type = plt_non_lazy;
4628 if (plt_type == plt_unknown || plt_type == plt_second)
4630 if (non_lazy_bnd_plt != NULL
4631 && plt->size >= non_lazy_bnd_plt->plt_entry_size
4632 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
4633 non_lazy_bnd_plt->plt_got_offset) == 0))
4635 /* Match BND PLT. */
4636 plt_type = plt_second;
4637 non_lazy_plt = non_lazy_bnd_plt;
4639 else if (non_lazy_ibt_plt != NULL
4640 && plt->size >= non_lazy_ibt_plt->plt_entry_size
4641 && (memcmp (plt_contents,
4642 non_lazy_ibt_plt->plt_entry,
4643 non_lazy_ibt_plt->plt_got_offset) == 0))
4645 /* Match IBT PLT. */
4646 plt_type = plt_second;
4647 non_lazy_plt = non_lazy_ibt_plt;
4651 if (plt_type == plt_unknown)
4653 free (plt_contents);
4658 plts[j].type = plt_type;
4660 if ((plt_type & plt_lazy))
4662 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
4663 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
4664 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
4665 /* Skip PLT0 in lazy PLT. */
4670 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
4671 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
4672 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
4676 /* Skip lazy PLT when the second PLT is used. */
4677 if (plt_type == (plt_lazy | plt_second))
4681 n = plt->size / plts[j].plt_entry_size;
4686 plts[j].contents = plt_contents;
4689 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
4690 (bfd_vma) 0, plts, dynsyms,
4694 /* Handle an x86-64 specific section when reading an object file. This
4695 is called when elfcode.h finds a section with an unknown type. */
4698 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
4699 const char *name, int shindex)
4701 if (hdr->sh_type != SHT_X86_64_UNWIND)
4704 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
4710 /* Hook called by the linker routine which adds symbols from an object
4711 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
4715 elf_x86_64_add_symbol_hook (bfd *abfd,
4716 struct bfd_link_info *info ATTRIBUTE_UNUSED,
4717 Elf_Internal_Sym *sym,
4718 const char **namep ATTRIBUTE_UNUSED,
4719 flagword *flagsp ATTRIBUTE_UNUSED,
4725 switch (sym->st_shndx)
4727 case SHN_X86_64_LCOMMON:
4728 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
4731 lcomm = bfd_make_section_with_flags (abfd,
4735 | SEC_LINKER_CREATED));
4738 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
4741 *valp = sym->st_size;
4749 /* Given a BFD section, try to locate the corresponding ELF section
4753 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
4754 asection *sec, int *index_return)
4756 if (sec == &_bfd_elf_large_com_section)
4758 *index_return = SHN_X86_64_LCOMMON;
4764 /* Process a symbol. */
4767 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
4770 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
4772 switch (elfsym->internal_elf_sym.st_shndx)
4774 case SHN_X86_64_LCOMMON:
4775 asym->section = &_bfd_elf_large_com_section;
4776 asym->value = elfsym->internal_elf_sym.st_size;
4777 /* Common symbol doesn't set BSF_GLOBAL. */
4778 asym->flags &= ~BSF_GLOBAL;
4784 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
4786 return (sym->st_shndx == SHN_COMMON
4787 || sym->st_shndx == SHN_X86_64_LCOMMON);
4791 elf_x86_64_common_section_index (asection *sec)
4793 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4796 return SHN_X86_64_LCOMMON;
4800 elf_x86_64_common_section (asection *sec)
4802 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
4803 return bfd_com_section_ptr;
4805 return &_bfd_elf_large_com_section;
4809 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
4810 const Elf_Internal_Sym *sym,
4815 const asection *oldsec)
4817 /* A normal common symbol and a large common symbol result in a
4818 normal common symbol. We turn the large common symbol into a
4821 && h->root.type == bfd_link_hash_common
4823 && bfd_is_com_section (*psec)
4826 if (sym->st_shndx == SHN_COMMON
4827 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
4829 h->root.u.c.p->section
4830 = bfd_make_section_old_way (oldbfd, "COMMON");
4831 h->root.u.c.p->section->flags = SEC_ALLOC;
4833 else if (sym->st_shndx == SHN_X86_64_LCOMMON
4834 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
4835 *psec = bfd_com_section_ptr;
4842 elf_x86_64_additional_program_headers (bfd *abfd,
4843 struct bfd_link_info *info ATTRIBUTE_UNUSED)
4848 /* Check to see if we need a large readonly segment. */
4849 s = bfd_get_section_by_name (abfd, ".lrodata");
4850 if (s && (s->flags & SEC_LOAD))
4853 /* Check to see if we need a large data segment. Since .lbss sections
4854 is placed right after the .bss section, there should be no need for
4855 a large data segment just because of .lbss. */
4856 s = bfd_get_section_by_name (abfd, ".ldata");
4857 if (s && (s->flags & SEC_LOAD))
4863 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
4866 elf_x86_64_relocs_compatible (const bfd_target *input,
4867 const bfd_target *output)
4869 return ((xvec_get_elf_backend_data (input)->s->elfclass
4870 == xvec_get_elf_backend_data (output)->s->elfclass)
4871 && _bfd_elf_relocs_compatible (input, output));
4874 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
4875 with GNU properties if found. Otherwise, return NULL. */
4878 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
4880 struct elf_x86_init_table init_table;
4882 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
4883 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
4884 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
4885 != (int) R_X86_64_GNU_VTINHERIT)
4886 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
4887 != (int) R_X86_64_GNU_VTENTRY))
4890 /* This is unused for x86-64. */
4891 init_table.plt0_pad_byte = 0x90;
4893 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl)
4897 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
4898 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
4902 init_table.lazy_plt = &elf_x86_64_lazy_plt;
4903 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
4906 if (ABI_64_P (info->output_bfd))
4908 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
4909 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
4913 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
4914 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
4919 init_table.lazy_plt = &elf_x86_64_nacl_plt;
4920 init_table.non_lazy_plt = NULL;
4921 init_table.lazy_ibt_plt = NULL;
4922 init_table.non_lazy_ibt_plt = NULL;
4925 if (ABI_64_P (info->output_bfd))
4927 init_table.r_info = elf64_r_info;
4928 init_table.r_sym = elf64_r_sym;
4932 init_table.r_info = elf32_r_info;
4933 init_table.r_sym = elf32_r_sym;
4936 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
4939 static const struct bfd_elf_special_section
4940 elf_x86_64_special_sections[]=
4942 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4943 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4944 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
4945 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4946 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
4947 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
4948 { NULL, 0, 0, 0, 0 }
4951 #define TARGET_LITTLE_SYM x86_64_elf64_vec
4952 #define TARGET_LITTLE_NAME "elf64-x86-64"
4953 #define ELF_ARCH bfd_arch_i386
4954 #define ELF_TARGET_ID X86_64_ELF_DATA
4955 #define ELF_MACHINE_CODE EM_X86_64
4956 #if DEFAULT_LD_Z_SEPARATE_CODE
4957 # define ELF_MAXPAGESIZE 0x1000
4959 # define ELF_MAXPAGESIZE 0x200000
4961 #define ELF_MINPAGESIZE 0x1000
4962 #define ELF_COMMONPAGESIZE 0x1000
4964 #define elf_backend_can_gc_sections 1
4965 #define elf_backend_can_refcount 1
4966 #define elf_backend_want_got_plt 1
4967 #define elf_backend_plt_readonly 1
4968 #define elf_backend_want_plt_sym 0
4969 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
4970 #define elf_backend_rela_normal 1
4971 #define elf_backend_plt_alignment 4
4972 #define elf_backend_extern_protected_data 1
4973 #define elf_backend_caches_rawsize 1
4974 #define elf_backend_dtrel_excludes_plt 1
4975 #define elf_backend_want_dynrelro 1
4977 #define elf_info_to_howto elf_x86_64_info_to_howto
4979 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
4980 #define bfd_elf64_bfd_reloc_name_lookup \
4981 elf_x86_64_reloc_name_lookup
4983 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
4984 #define elf_backend_check_relocs elf_x86_64_check_relocs
4985 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
4986 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
4987 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
4988 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
4989 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
4990 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
4992 #define elf_backend_write_core_note elf_x86_64_write_core_note
4994 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
4995 #define elf_backend_relocate_section elf_x86_64_relocate_section
4996 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
4997 #define elf_backend_object_p elf64_x86_64_elf_object_p
4998 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5000 #define elf_backend_section_from_shdr \
5001 elf_x86_64_section_from_shdr
5003 #define elf_backend_section_from_bfd_section \
5004 elf_x86_64_elf_section_from_bfd_section
5005 #define elf_backend_add_symbol_hook \
5006 elf_x86_64_add_symbol_hook
5007 #define elf_backend_symbol_processing \
5008 elf_x86_64_symbol_processing
5009 #define elf_backend_common_section_index \
5010 elf_x86_64_common_section_index
5011 #define elf_backend_common_section \
5012 elf_x86_64_common_section
5013 #define elf_backend_common_definition \
5014 elf_x86_64_common_definition
5015 #define elf_backend_merge_symbol \
5016 elf_x86_64_merge_symbol
5017 #define elf_backend_special_sections \
5018 elf_x86_64_special_sections
5019 #define elf_backend_additional_program_headers \
5020 elf_x86_64_additional_program_headers
5021 #define elf_backend_setup_gnu_properties \
5022 elf_x86_64_link_setup_gnu_properties
5023 #define elf_backend_hide_symbol \
5024 _bfd_x86_elf_hide_symbol
5026 #include "elf64-target.h"
5028 /* CloudABI support. */
5030 #undef TARGET_LITTLE_SYM
5031 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5032 #undef TARGET_LITTLE_NAME
5033 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5036 #define ELF_OSABI ELFOSABI_CLOUDABI
5039 #define elf64_bed elf64_x86_64_cloudabi_bed
5041 #include "elf64-target.h"
5043 /* FreeBSD support. */
5045 #undef TARGET_LITTLE_SYM
5046 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5047 #undef TARGET_LITTLE_NAME
5048 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5051 #define ELF_OSABI ELFOSABI_FREEBSD
5054 #define elf64_bed elf64_x86_64_fbsd_bed
5056 #include "elf64-target.h"
5058 /* Solaris 2 support. */
5060 #undef TARGET_LITTLE_SYM
5061 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5062 #undef TARGET_LITTLE_NAME
5063 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5065 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed =
5070 #undef elf_backend_arch_data
5071 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed
5073 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5074 objects won't be recognized. */
5078 #define elf64_bed elf64_x86_64_sol2_bed
5080 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5082 #undef elf_backend_static_tls_alignment
5083 #define elf_backend_static_tls_alignment 16
5085 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5087 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5089 #undef elf_backend_want_plt_sym
5090 #define elf_backend_want_plt_sym 1
5092 #undef elf_backend_strtab_flags
5093 #define elf_backend_strtab_flags SHF_STRINGS
5096 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5097 bfd *obfd ATTRIBUTE_UNUSED,
5098 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5099 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5101 /* PR 19938: FIXME: Need to add code for setting the sh_info
5102 and sh_link fields of Solaris specific section types. */
5106 #undef elf_backend_copy_special_section_fields
5107 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5109 #include "elf64-target.h"
5111 /* Native Client support. */
5114 elf64_x86_64_nacl_elf_object_p (bfd *abfd)
5116 /* Set the right machine number for a NaCl x86-64 ELF64 file. */
5117 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl);
5121 #undef TARGET_LITTLE_SYM
5122 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec
5123 #undef TARGET_LITTLE_NAME
5124 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl"
5126 #define elf64_bed elf64_x86_64_nacl_bed
5128 #undef ELF_MAXPAGESIZE
5129 #undef ELF_MINPAGESIZE
5130 #undef ELF_COMMONPAGESIZE
5131 #define ELF_MAXPAGESIZE 0x10000
5132 #define ELF_MINPAGESIZE 0x10000
5133 #define ELF_COMMONPAGESIZE 0x10000
5135 /* Restore defaults. */
5137 #undef elf_backend_static_tls_alignment
5138 #undef elf_backend_want_plt_sym
5139 #define elf_backend_want_plt_sym 0
5140 #undef elf_backend_strtab_flags
5141 #undef elf_backend_copy_special_section_fields
5143 /* NaCl uses substantially different PLT entries for the same effects. */
5145 #undef elf_backend_plt_alignment
5146 #define elf_backend_plt_alignment 5
5147 #define NACL_PLT_ENTRY_SIZE 64
5148 #define NACLMASK 0xe0 /* 32-byte alignment mask. */
5150 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] =
5152 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
5153 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */
5154 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5155 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5156 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5158 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */
5159 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */
5161 /* 32 bytes of nop to pad out to the standard size. */
5162 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5163 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5164 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5165 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5166 0x66, /* excess data16 prefix */
5170 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] =
5172 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */
5173 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */
5174 0x4d, 0x01, 0xfb, /* add %r15, %r11 */
5175 0x41, 0xff, 0xe3, /* jmpq *%r11 */
5177 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */
5178 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5179 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5181 /* Lazy GOT entries point here (32-byte aligned). */
5182 0x68, /* pushq immediate */
5183 0, 0, 0, 0, /* replaced with index into relocation table. */
5184 0xe9, /* jmp relative */
5185 0, 0, 0, 0, /* replaced with offset to start of .plt0. */
5187 /* 22 bytes of nop to pad out to the standard size. */
5188 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */
5189 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */
5190 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */
5193 /* .eh_frame covering the .plt section. */
5195 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] =
5197 #if (PLT_CIE_LENGTH != 20 \
5198 || PLT_FDE_LENGTH != 36 \
5199 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \
5200 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12)
5201 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!"
5203 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
5204 0, 0, 0, 0, /* CIE ID */
5205 1, /* CIE version */
5206 'z', 'R', 0, /* Augmentation string */
5207 1, /* Code alignment factor */
5208 0x78, /* Data alignment factor */
5209 16, /* Return address column */
5210 1, /* Augmentation size */
5211 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
5212 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
5213 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
5214 DW_CFA_nop, DW_CFA_nop,
5216 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
5217 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */
5218 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
5219 0, 0, 0, 0, /* .plt size goes here */
5220 0, /* Augmentation size */
5221 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
5222 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
5223 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
5224 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */
5225 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
5226 13, /* Block length */
5227 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
5228 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
5229 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge,
5230 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
5231 DW_CFA_nop, DW_CFA_nop
5234 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt =
5236 elf_x86_64_nacl_plt0_entry, /* plt0_entry */
5237 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */
5238 elf_x86_64_nacl_plt_entry, /* plt_entry */
5239 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */
5240 2, /* plt0_got1_offset */
5241 9, /* plt0_got2_offset */
5242 13, /* plt0_got2_insn_end */
5243 3, /* plt_got_offset */
5244 33, /* plt_reloc_offset */
5245 38, /* plt_plt_offset */
5246 7, /* plt_got_insn_size */
5247 42, /* plt_plt_insn_end */
5248 32, /* plt_lazy_offset */
5249 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */
5250 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */
5251 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */
5252 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */
5255 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed =
5260 #undef elf_backend_arch_data
5261 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed
5263 #undef elf_backend_object_p
5264 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p
5265 #undef elf_backend_modify_segment_map
5266 #define elf_backend_modify_segment_map nacl_modify_segment_map
5267 #undef elf_backend_modify_program_headers
5268 #define elf_backend_modify_program_headers nacl_modify_program_headers
5269 #undef elf_backend_final_write_processing
5270 #define elf_backend_final_write_processing nacl_final_write_processing
5272 #include "elf64-target.h"
5274 /* Native Client x32 support. */
5277 elf32_x86_64_nacl_elf_object_p (bfd *abfd)
5279 /* Set the right machine number for a NaCl x86-64 ELF32 file. */
5280 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl);
5284 #undef TARGET_LITTLE_SYM
5285 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec
5286 #undef TARGET_LITTLE_NAME
5287 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl"
5289 #define elf32_bed elf32_x86_64_nacl_bed
5291 #define bfd_elf32_bfd_reloc_type_lookup \
5292 elf_x86_64_reloc_type_lookup
5293 #define bfd_elf32_bfd_reloc_name_lookup \
5294 elf_x86_64_reloc_name_lookup
5295 #define bfd_elf32_get_synthetic_symtab \
5296 elf_x86_64_get_synthetic_symtab
5298 #undef elf_backend_object_p
5299 #define elf_backend_object_p \
5300 elf32_x86_64_nacl_elf_object_p
5302 #undef elf_backend_bfd_from_remote_memory
5303 #define elf_backend_bfd_from_remote_memory \
5304 _bfd_elf32_bfd_from_remote_memory
5306 #undef elf_backend_size_info
5307 #define elf_backend_size_info \
5308 _bfd_elf32_size_info
5310 #include "elf32-target.h"
5312 /* Restore defaults. */
5313 #undef elf_backend_object_p
5314 #define elf_backend_object_p elf64_x86_64_elf_object_p
5315 #undef elf_backend_bfd_from_remote_memory
5316 #undef elf_backend_size_info
5317 #undef elf_backend_modify_segment_map
5318 #undef elf_backend_modify_program_headers
5319 #undef elf_backend_final_write_processing
5321 /* Intel L1OM support. */
5324 elf64_l1om_elf_object_p (bfd *abfd)
5326 /* Set the right machine number for an L1OM elf64 file. */
5327 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om);
5331 #undef TARGET_LITTLE_SYM
5332 #define TARGET_LITTLE_SYM l1om_elf64_vec
5333 #undef TARGET_LITTLE_NAME
5334 #define TARGET_LITTLE_NAME "elf64-l1om"
5336 #define ELF_ARCH bfd_arch_l1om
5338 #undef ELF_MACHINE_CODE
5339 #define ELF_MACHINE_CODE EM_L1OM
5344 #define elf64_bed elf64_l1om_bed
5346 #undef elf_backend_object_p
5347 #define elf_backend_object_p elf64_l1om_elf_object_p
5349 /* Restore defaults. */
5350 #undef ELF_MAXPAGESIZE
5351 #undef ELF_MINPAGESIZE
5352 #undef ELF_COMMONPAGESIZE
5353 #if DEFAULT_LD_Z_SEPARATE_CODE
5354 # define ELF_MAXPAGESIZE 0x1000
5356 # define ELF_MAXPAGESIZE 0x200000
5358 #define ELF_MINPAGESIZE 0x1000
5359 #define ELF_COMMONPAGESIZE 0x1000
5360 #undef elf_backend_plt_alignment
5361 #define elf_backend_plt_alignment 4
5362 #undef elf_backend_arch_data
5363 #define elf_backend_arch_data &elf_x86_64_arch_bed
5365 #include "elf64-target.h"
5367 /* FreeBSD L1OM support. */
5369 #undef TARGET_LITTLE_SYM
5370 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec
5371 #undef TARGET_LITTLE_NAME
5372 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd"
5375 #define ELF_OSABI ELFOSABI_FREEBSD
5378 #define elf64_bed elf64_l1om_fbsd_bed
5380 #include "elf64-target.h"
5382 /* Intel K1OM support. */
5385 elf64_k1om_elf_object_p (bfd *abfd)
5387 /* Set the right machine number for an K1OM elf64 file. */
5388 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om);
5392 #undef TARGET_LITTLE_SYM
5393 #define TARGET_LITTLE_SYM k1om_elf64_vec
5394 #undef TARGET_LITTLE_NAME
5395 #define TARGET_LITTLE_NAME "elf64-k1om"
5397 #define ELF_ARCH bfd_arch_k1om
5399 #undef ELF_MACHINE_CODE
5400 #define ELF_MACHINE_CODE EM_K1OM
5405 #define elf64_bed elf64_k1om_bed
5407 #undef elf_backend_object_p
5408 #define elf_backend_object_p elf64_k1om_elf_object_p
5410 #undef elf_backend_static_tls_alignment
5412 #undef elf_backend_want_plt_sym
5413 #define elf_backend_want_plt_sym 0
5415 #include "elf64-target.h"
5417 /* FreeBSD K1OM support. */
5419 #undef TARGET_LITTLE_SYM
5420 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec
5421 #undef TARGET_LITTLE_NAME
5422 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd"
5425 #define ELF_OSABI ELFOSABI_FREEBSD
5428 #define elf64_bed elf64_k1om_fbsd_bed
5430 #include "elf64-target.h"
5432 /* 32bit x86-64 support. */
5434 #undef TARGET_LITTLE_SYM
5435 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5436 #undef TARGET_LITTLE_NAME
5437 #define TARGET_LITTLE_NAME "elf32-x86-64"
5441 #define ELF_ARCH bfd_arch_i386
5443 #undef ELF_MACHINE_CODE
5444 #define ELF_MACHINE_CODE EM_X86_64
5448 #undef elf_backend_object_p
5449 #define elf_backend_object_p \
5450 elf32_x86_64_elf_object_p
5452 #undef elf_backend_bfd_from_remote_memory
5453 #define elf_backend_bfd_from_remote_memory \
5454 _bfd_elf32_bfd_from_remote_memory
5456 #undef elf_backend_size_info
5457 #define elf_backend_size_info \
5458 _bfd_elf32_size_info
5460 #include "elf32-target.h"