1 /* AArch64-specific support for ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
22 #include "elfxx-aarch64.h"
26 #define MASK(n) ((1u << (n)) - 1)
28 /* Decode the 26-bit offset of unconditional branch. */
29 static inline uint32_t
30 decode_branch_ofs_26 (uint32_t insn)
32 return insn & MASK (26);
35 /* Decode the 19-bit offset of conditional branch and compare & branch. */
36 static inline uint32_t
37 decode_cond_branch_ofs_19 (uint32_t insn)
39 return (insn >> 5) & MASK (19);
42 /* Decode the 19-bit offset of load literal. */
43 static inline uint32_t
44 decode_ld_lit_ofs_19 (uint32_t insn)
46 return (insn >> 5) & MASK (19);
49 /* Decode the 14-bit offset of test & branch. */
50 static inline uint32_t
51 decode_tst_branch_ofs_14 (uint32_t insn)
53 return (insn >> 5) & MASK (14);
56 /* Decode the 16-bit imm of move wide. */
57 static inline uint32_t
58 decode_movw_imm (uint32_t insn)
60 return (insn >> 5) & MASK (16);
63 /* Decode the 12-bit imm of add immediate. */
64 static inline uint32_t
65 decode_add_imm (uint32_t insn)
67 return (insn >> 10) & MASK (12);
70 /* Reencode the imm field of add immediate. */
71 static inline uint32_t
72 reencode_add_imm (uint32_t insn, uint32_t imm)
74 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
77 /* Reencode the imm field of adr. */
78 static inline uint32_t
79 reencode_adr_imm (uint32_t insn, uint32_t imm)
81 return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
82 | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
85 /* Reencode the imm field of ld/st pos immediate. */
86 static inline uint32_t
87 reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
89 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
92 /* Encode the 26-bit offset of unconditional branch. */
93 static inline uint32_t
94 reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
96 return (insn & ~MASK (26)) | (ofs & MASK (26));
99 /* Encode the 19-bit offset of conditional branch and compare & branch. */
100 static inline uint32_t
101 reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
103 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
106 /* Decode the 19-bit offset of load literal. */
107 static inline uint32_t
108 reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
110 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
113 /* Encode the 14-bit offset of test & branch. */
114 static inline uint32_t
115 reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
117 return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
120 /* Reencode the imm field of move wide. */
121 static inline uint32_t
122 reencode_movw_imm (uint32_t insn, uint32_t imm)
124 return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
127 /* Reencode mov[zn] to movz. */
128 static inline uint32_t
129 reencode_movzn_to_movz (uint32_t opcode)
131 return opcode | (1 << 30);
134 /* Reencode mov[zn] to movn. */
135 static inline uint32_t
136 reencode_movzn_to_movn (uint32_t opcode)
138 return opcode & ~(1 << 30);
141 /* Return non-zero if the indicated VALUE has overflowed the maximum
142 range expressible by a unsigned number with the indicated number of
145 static bfd_reloc_status_type
146 aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
149 if (bits >= sizeof (bfd_vma) * 8)
151 lim = (bfd_vma) 1 << bits;
153 return bfd_reloc_overflow;
157 /* Return non-zero if the indicated VALUE has overflowed the maximum
158 range expressible by an signed number with the indicated number of
161 static bfd_reloc_status_type
162 aarch64_signed_overflow (bfd_vma value, unsigned int bits)
164 bfd_signed_vma svalue = (bfd_signed_vma) value;
167 if (bits >= sizeof (bfd_vma) * 8)
169 lim = (bfd_signed_vma) 1 << (bits - 1);
170 if (svalue < -lim || svalue >= lim)
171 return bfd_reloc_overflow;
175 /* Insert the addend/value into the instruction or data object being
177 bfd_reloc_status_type
178 _bfd_aarch64_elf_put_addend (bfd *abfd,
179 bfd_byte *address, bfd_reloc_code_real_type r_type,
180 reloc_howto_type *howto, bfd_signed_vma addend)
182 bfd_reloc_status_type status = bfd_reloc_ok;
183 bfd_signed_vma old_addend = addend;
187 size = bfd_get_reloc_size (howto);
193 contents = bfd_get_16 (abfd, address);
196 if (howto->src_mask != 0xffffffff)
197 /* Must be 32-bit instruction, always little-endian. */
198 contents = bfd_getl32 (address);
200 /* Must be 32-bit data (endianness dependent). */
201 contents = bfd_get_32 (abfd, address);
204 contents = bfd_get_64 (abfd, address);
210 switch (howto->complain_on_overflow)
212 case complain_overflow_dont:
214 case complain_overflow_signed:
215 status = aarch64_signed_overflow (addend,
216 howto->bitsize + howto->rightshift);
218 case complain_overflow_unsigned:
219 status = aarch64_unsigned_overflow (addend,
220 howto->bitsize + howto->rightshift);
222 case complain_overflow_bitfield:
227 addend >>= howto->rightshift;
231 case BFD_RELOC_AARCH64_JUMP26:
232 case BFD_RELOC_AARCH64_CALL26:
233 contents = reencode_branch_ofs_26 (contents, addend);
236 case BFD_RELOC_AARCH64_BRANCH19:
237 contents = reencode_cond_branch_ofs_19 (contents, addend);
240 case BFD_RELOC_AARCH64_TSTBR14:
241 contents = reencode_tst_branch_ofs_14 (contents, addend);
244 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
245 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
246 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
247 if (old_addend & ((1 << howto->rightshift) - 1))
248 return bfd_reloc_overflow;
249 contents = reencode_ld_lit_ofs_19 (contents, addend);
252 case BFD_RELOC_AARCH64_TLSDESC_CALL:
255 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
256 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
257 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
258 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
259 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
260 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
261 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
262 contents = reencode_adr_imm (contents, addend);
265 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
266 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
267 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
268 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
269 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
270 case BFD_RELOC_AARCH64_ADD_LO12:
271 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
272 12 bits of the page offset following
273 BFD_RELOC_AARCH64_ADR_HI21_PCREL which computes the
274 (pc-relative) page base. */
275 contents = reencode_add_imm (contents, addend);
278 case BFD_RELOC_AARCH64_LDST8_LO12:
279 case BFD_RELOC_AARCH64_LDST16_LO12:
280 case BFD_RELOC_AARCH64_LDST32_LO12:
281 case BFD_RELOC_AARCH64_LDST64_LO12:
282 case BFD_RELOC_AARCH64_LDST128_LO12:
283 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
284 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
285 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
286 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
287 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
288 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
289 if (old_addend & ((1 << howto->rightshift) - 1))
290 return bfd_reloc_overflow;
291 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
292 12 bits of the page offset following BFD_RELOC_AARCH64_ADR_HI21_PCREL
293 which computes the (pc-relative) page base. */
294 contents = reencode_ldst_pos_imm (contents, addend);
297 /* Group relocations to create high bits of a 16, 32, 48 or 64
298 bit signed data or abs address inline. Will change
299 instruction to MOVN or MOVZ depending on sign of calculated
302 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
303 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
304 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
305 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
306 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
307 case BFD_RELOC_AARCH64_MOVW_G0_S:
308 case BFD_RELOC_AARCH64_MOVW_G1_S:
309 case BFD_RELOC_AARCH64_MOVW_G2_S:
310 /* NOTE: We can only come here with movz or movn. */
313 /* Force use of MOVN. */
315 contents = reencode_movzn_to_movn (contents);
319 /* Force use of MOVZ. */
320 contents = reencode_movzn_to_movz (contents);
324 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
325 data or abs address inline. */
327 case BFD_RELOC_AARCH64_MOVW_G0:
328 case BFD_RELOC_AARCH64_MOVW_G0_NC:
329 case BFD_RELOC_AARCH64_MOVW_G1:
330 case BFD_RELOC_AARCH64_MOVW_G1_NC:
331 case BFD_RELOC_AARCH64_MOVW_G2:
332 case BFD_RELOC_AARCH64_MOVW_G2_NC:
333 case BFD_RELOC_AARCH64_MOVW_G3:
334 contents = reencode_movw_imm (contents, addend);
338 /* Repack simple data */
339 if (howto->dst_mask & (howto->dst_mask + 1))
340 return bfd_reloc_notsupported;
342 contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
349 bfd_put_16 (abfd, contents, address);
352 if (howto->dst_mask != 0xffffffff)
353 /* must be 32-bit instruction, always little-endian */
354 bfd_putl32 (contents, address);
356 /* must be 32-bit data (endianness dependent) */
357 bfd_put_32 (abfd, contents, address);
360 bfd_put_64 (abfd, contents, address);
370 _bfd_aarch64_elf_resolve_relocation (bfd_reloc_code_real_type r_type,
371 bfd_vma place, bfd_vma value,
372 bfd_vma addend, bfd_boolean weak_undef_p)
376 case BFD_RELOC_AARCH64_TLSDESC_CALL:
377 case BFD_RELOC_AARCH64_NONE:
380 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
381 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
382 case BFD_RELOC_AARCH64_BRANCH19:
383 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
384 case BFD_RELOC_AARCH64_16_PCREL:
385 case BFD_RELOC_AARCH64_32_PCREL:
386 case BFD_RELOC_AARCH64_64_PCREL:
387 case BFD_RELOC_AARCH64_TSTBR14:
390 value = value + addend - place;
393 case BFD_RELOC_AARCH64_CALL26:
394 case BFD_RELOC_AARCH64_JUMP26:
395 value = value + addend - place;
398 case BFD_RELOC_AARCH64_16:
399 case BFD_RELOC_AARCH64_32:
400 case BFD_RELOC_AARCH64_MOVW_G0_S:
401 case BFD_RELOC_AARCH64_MOVW_G1_S:
402 case BFD_RELOC_AARCH64_MOVW_G2_S:
403 case BFD_RELOC_AARCH64_MOVW_G0:
404 case BFD_RELOC_AARCH64_MOVW_G0_NC:
405 case BFD_RELOC_AARCH64_MOVW_G1:
406 case BFD_RELOC_AARCH64_MOVW_G1_NC:
407 case BFD_RELOC_AARCH64_MOVW_G2:
408 case BFD_RELOC_AARCH64_MOVW_G2_NC:
409 case BFD_RELOC_AARCH64_MOVW_G3:
410 value = value + addend;
413 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
414 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
417 value = PG (value + addend) - PG (place);
420 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
421 value = value + addend - place;
424 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
425 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
426 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
427 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
428 value = PG (value + addend) - PG (place);
431 case BFD_RELOC_AARCH64_ADD_LO12:
432 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
433 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
434 case BFD_RELOC_AARCH64_LDST8_LO12:
435 case BFD_RELOC_AARCH64_LDST16_LO12:
436 case BFD_RELOC_AARCH64_LDST32_LO12:
437 case BFD_RELOC_AARCH64_LDST64_LO12:
438 case BFD_RELOC_AARCH64_LDST128_LO12:
439 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
440 case BFD_RELOC_AARCH64_TLSDESC_ADD:
441 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
442 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
443 case BFD_RELOC_AARCH64_TLSDESC_LDR:
444 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
445 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
446 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
447 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
448 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
449 value = PG_OFFSET (value + addend);
452 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
453 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
454 value = (value + addend) & (bfd_vma) 0xffff0000;
456 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
457 /* Mask off low 12bits, keep all other high bits, so that the later
458 generic code could check whehter there is overflow. */
459 value = (value + addend) & ~(bfd_vma) 0xfff;
462 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
463 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
464 value = (value + addend) & (bfd_vma) 0xffff;
467 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
468 value = (value + addend) & ~(bfd_vma) 0xffffffff;
469 value -= place & ~(bfd_vma) 0xffffffff;
479 /* Hook called by the linker routine which adds symbols from an object
483 _bfd_aarch64_elf_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
484 Elf_Internal_Sym *sym,
485 const char **namep ATTRIBUTE_UNUSED,
486 flagword *flagsp ATTRIBUTE_UNUSED,
487 asection **secp ATTRIBUTE_UNUSED,
488 bfd_vma *valp ATTRIBUTE_UNUSED)
490 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
491 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE)
492 && (abfd->flags & DYNAMIC) == 0
493 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
494 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
499 /* Support for core dump NOTE sections. */
502 _bfd_aarch64_elf_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
507 switch (note->descsz)
512 case 392: /* sizeof(struct elf_prstatus) on Linux/arm64. */
514 elf_tdata (abfd)->core->signal
515 = bfd_get_16 (abfd, note->descdata + 12);
518 elf_tdata (abfd)->core->lwpid
519 = bfd_get_32 (abfd, note->descdata + 32);
528 /* Make a ".reg/999" section. */
529 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
530 size, note->descpos + offset);
534 _bfd_aarch64_elf_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
536 switch (note->descsz)
541 case 136: /* This is sizeof(struct elf_prpsinfo) on Linux/aarch64. */
542 elf_tdata (abfd)->core->pid = bfd_get_32 (abfd, note->descdata + 24);
543 elf_tdata (abfd)->core->program
544 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
545 elf_tdata (abfd)->core->command
546 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
549 /* Note that for some reason, a spurious space is tacked
550 onto the end of the args in some (at least one anyway)
551 implementations, so strip it off if it exists. */
554 char *command = elf_tdata (abfd)->core->command;
555 int n = strlen (command);
557 if (0 < n && command[n - 1] == ' ')
558 command[n - 1] = '\0';
565 _bfd_aarch64_elf_write_core_note (bfd *abfd, char *buf, int *bufsiz, int note_type,
578 va_start (ap, note_type);
579 memset (data, 0, sizeof (data));
580 strncpy (data + 40, va_arg (ap, const char *), 16);
581 strncpy (data + 56, va_arg (ap, const char *), 80);
584 return elfcore_write_note (abfd, buf, bufsiz, "CORE",
585 note_type, data, sizeof (data));
596 va_start (ap, note_type);
597 memset (data, 0, sizeof (data));
598 pid = va_arg (ap, long);
599 bfd_put_32 (abfd, pid, data + 32);
600 cursig = va_arg (ap, int);
601 bfd_put_16 (abfd, cursig, data + 12);
602 greg = va_arg (ap, const void *);
603 memcpy (data + 112, greg, 272);
606 return elfcore_write_note (abfd, buf, bufsiz, "CORE",
607 note_type, data, sizeof (data));