1 /* AArch64-specific support for ELF.
2 Copyright (C) 2009-2014 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
22 #include "elfxx-aarch64.h"
24 #define MASK(n) ((1u << (n)) - 1)
26 /* Decode the 26-bit offset of unconditional branch. */
27 static inline uint32_t
28 decode_branch_ofs_26 (uint32_t insn)
30 return insn & MASK (26);
33 /* Decode the 19-bit offset of conditional branch and compare & branch. */
34 static inline uint32_t
35 decode_cond_branch_ofs_19 (uint32_t insn)
37 return (insn >> 5) & MASK (19);
40 /* Decode the 19-bit offset of load literal. */
41 static inline uint32_t
42 decode_ld_lit_ofs_19 (uint32_t insn)
44 return (insn >> 5) & MASK (19);
47 /* Decode the 14-bit offset of test & branch. */
48 static inline uint32_t
49 decode_tst_branch_ofs_14 (uint32_t insn)
51 return (insn >> 5) & MASK (14);
54 /* Decode the 16-bit imm of move wide. */
55 static inline uint32_t
56 decode_movw_imm (uint32_t insn)
58 return (insn >> 5) & MASK (16);
61 /* Decode the 12-bit imm of add immediate. */
62 static inline uint32_t
63 decode_add_imm (uint32_t insn)
65 return (insn >> 10) & MASK (12);
68 /* Reencode the imm field of add immediate. */
69 static inline uint32_t
70 reencode_add_imm (uint32_t insn, uint32_t imm)
72 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
75 /* Reencode the imm field of adr. */
76 static inline uint32_t
77 reencode_adr_imm (uint32_t insn, uint32_t imm)
79 return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
80 | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
83 /* Reencode the imm field of ld/st pos immediate. */
84 static inline uint32_t
85 reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
87 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
90 /* Encode the 26-bit offset of unconditional branch. */
91 static inline uint32_t
92 reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
94 return (insn & ~MASK (26)) | (ofs & MASK (26));
97 /* Encode the 19-bit offset of conditional branch and compare & branch. */
98 static inline uint32_t
99 reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
101 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
104 /* Decode the 19-bit offset of load literal. */
105 static inline uint32_t
106 reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
108 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
111 /* Encode the 14-bit offset of test & branch. */
112 static inline uint32_t
113 reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
115 return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
118 /* Reencode the imm field of move wide. */
119 static inline uint32_t
120 reencode_movw_imm (uint32_t insn, uint32_t imm)
122 return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
125 /* Reencode mov[zn] to movz. */
126 static inline uint32_t
127 reencode_movzn_to_movz (uint32_t opcode)
129 return opcode | (1 << 30);
132 /* Reencode mov[zn] to movn. */
133 static inline uint32_t
134 reencode_movzn_to_movn (uint32_t opcode)
136 return opcode & ~(1 << 30);
139 /* Return non-zero if the indicated VALUE has overflowed the maximum
140 range expressible by a unsigned number with the indicated number of
143 static bfd_reloc_status_type
144 aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
147 if (bits >= sizeof (bfd_vma) * 8)
149 lim = (bfd_vma) 1 << bits;
151 return bfd_reloc_overflow;
155 /* Return non-zero if the indicated VALUE has overflowed the maximum
156 range expressible by an signed number with the indicated number of
159 static bfd_reloc_status_type
160 aarch64_signed_overflow (bfd_vma value, unsigned int bits)
162 bfd_signed_vma svalue = (bfd_signed_vma) value;
165 if (bits >= sizeof (bfd_vma) * 8)
167 lim = (bfd_signed_vma) 1 << (bits - 1);
168 if (svalue < -lim || svalue >= lim)
169 return bfd_reloc_overflow;
173 /* Insert the addend/value into the instruction or data object being
175 bfd_reloc_status_type
176 _bfd_aarch64_elf_put_addend (bfd *abfd,
177 bfd_byte *address, bfd_reloc_code_real_type r_type,
178 reloc_howto_type *howto, bfd_signed_vma addend)
180 bfd_reloc_status_type status = bfd_reloc_ok;
181 bfd_signed_vma old_addend = addend;
185 size = bfd_get_reloc_size (howto);
189 contents = bfd_get_16 (abfd, address);
192 if (howto->src_mask != 0xffffffff)
193 /* Must be 32-bit instruction, always little-endian. */
194 contents = bfd_getl32 (address);
196 /* Must be 32-bit data (endianness dependent). */
197 contents = bfd_get_32 (abfd, address);
200 contents = bfd_get_64 (abfd, address);
206 switch (howto->complain_on_overflow)
208 case complain_overflow_dont:
210 case complain_overflow_signed:
211 status = aarch64_signed_overflow (addend,
212 howto->bitsize + howto->rightshift);
214 case complain_overflow_unsigned:
215 status = aarch64_unsigned_overflow (addend,
216 howto->bitsize + howto->rightshift);
218 case complain_overflow_bitfield:
223 addend >>= howto->rightshift;
227 case BFD_RELOC_AARCH64_JUMP26:
228 case BFD_RELOC_AARCH64_CALL26:
229 contents = reencode_branch_ofs_26 (contents, addend);
232 case BFD_RELOC_AARCH64_BRANCH19:
233 contents = reencode_cond_branch_ofs_19 (contents, addend);
236 case BFD_RELOC_AARCH64_TSTBR14:
237 contents = reencode_tst_branch_ofs_14 (contents, addend);
240 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
241 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
242 if (old_addend & ((1 << howto->rightshift) - 1))
243 return bfd_reloc_overflow;
244 contents = reencode_ld_lit_ofs_19 (contents, addend);
247 case BFD_RELOC_AARCH64_TLSDESC_CALL:
250 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
251 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
252 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
253 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
254 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
255 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
256 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
257 contents = reencode_adr_imm (contents, addend);
260 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
261 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
262 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
263 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
264 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
265 case BFD_RELOC_AARCH64_ADD_LO12:
266 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
267 12 bits of the page offset following
268 BFD_RELOC_AARCH64_ADR_HI21_PCREL which computes the
269 (pc-relative) page base. */
270 contents = reencode_add_imm (contents, addend);
273 case BFD_RELOC_AARCH64_LDST8_LO12:
274 case BFD_RELOC_AARCH64_LDST16_LO12:
275 case BFD_RELOC_AARCH64_LDST32_LO12:
276 case BFD_RELOC_AARCH64_LDST64_LO12:
277 case BFD_RELOC_AARCH64_LDST128_LO12:
278 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
279 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
280 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
281 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
282 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
283 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
284 if (old_addend & ((1 << howto->rightshift) - 1))
285 return bfd_reloc_overflow;
286 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
287 12 bits of the page offset following BFD_RELOC_AARCH64_ADR_HI21_PCREL
288 which computes the (pc-relative) page base. */
289 contents = reencode_ldst_pos_imm (contents, addend);
292 /* Group relocations to create high bits of a 16, 32, 48 or 64
293 bit signed data or abs address inline. Will change
294 instruction to MOVN or MOVZ depending on sign of calculated
297 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
298 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
299 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
300 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
301 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
302 case BFD_RELOC_AARCH64_MOVW_G0_S:
303 case BFD_RELOC_AARCH64_MOVW_G1_S:
304 case BFD_RELOC_AARCH64_MOVW_G2_S:
305 /* NOTE: We can only come here with movz or movn. */
308 /* Force use of MOVN. */
310 contents = reencode_movzn_to_movn (contents);
314 /* Force use of MOVZ. */
315 contents = reencode_movzn_to_movz (contents);
319 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
320 data or abs address inline. */
322 case BFD_RELOC_AARCH64_MOVW_G0:
323 case BFD_RELOC_AARCH64_MOVW_G0_NC:
324 case BFD_RELOC_AARCH64_MOVW_G1:
325 case BFD_RELOC_AARCH64_MOVW_G1_NC:
326 case BFD_RELOC_AARCH64_MOVW_G2:
327 case BFD_RELOC_AARCH64_MOVW_G2_NC:
328 case BFD_RELOC_AARCH64_MOVW_G3:
329 contents = reencode_movw_imm (contents, addend);
333 /* Repack simple data */
334 if (howto->dst_mask & (howto->dst_mask + 1))
335 return bfd_reloc_notsupported;
337 contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
344 bfd_put_16 (abfd, contents, address);
347 if (howto->dst_mask != 0xffffffff)
348 /* must be 32-bit instruction, always little-endian */
349 bfd_putl32 (contents, address);
351 /* must be 32-bit data (endianness dependent) */
352 bfd_put_32 (abfd, contents, address);
355 bfd_put_64 (abfd, contents, address);
365 _bfd_aarch64_elf_resolve_relocation (bfd_reloc_code_real_type r_type,
366 bfd_vma place, bfd_vma value,
367 bfd_vma addend, bfd_boolean weak_undef_p)
371 case BFD_RELOC_AARCH64_TLSDESC_CALL:
372 case BFD_RELOC_AARCH64_NONE:
375 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
376 case BFD_RELOC_AARCH64_BRANCH19:
377 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
378 case BFD_RELOC_AARCH64_16_PCREL:
379 case BFD_RELOC_AARCH64_32_PCREL:
380 case BFD_RELOC_AARCH64_64_PCREL:
381 case BFD_RELOC_AARCH64_TSTBR14:
384 value = value + addend - place;
387 case BFD_RELOC_AARCH64_CALL26:
388 case BFD_RELOC_AARCH64_JUMP26:
389 value = value + addend - place;
392 case BFD_RELOC_AARCH64_16:
393 case BFD_RELOC_AARCH64_32:
394 case BFD_RELOC_AARCH64_MOVW_G0_S:
395 case BFD_RELOC_AARCH64_MOVW_G1_S:
396 case BFD_RELOC_AARCH64_MOVW_G2_S:
397 case BFD_RELOC_AARCH64_MOVW_G0:
398 case BFD_RELOC_AARCH64_MOVW_G0_NC:
399 case BFD_RELOC_AARCH64_MOVW_G1:
400 case BFD_RELOC_AARCH64_MOVW_G1_NC:
401 case BFD_RELOC_AARCH64_MOVW_G2:
402 case BFD_RELOC_AARCH64_MOVW_G2_NC:
403 case BFD_RELOC_AARCH64_MOVW_G3:
404 value = value + addend;
407 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
408 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
411 value = PG (value + addend) - PG (place);
414 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
415 value = value + addend - place;
418 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
419 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
420 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
421 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
422 value = PG (value + addend) - PG (place);
425 case BFD_RELOC_AARCH64_ADD_LO12:
426 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
427 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
428 case BFD_RELOC_AARCH64_LDST8_LO12:
429 case BFD_RELOC_AARCH64_LDST16_LO12:
430 case BFD_RELOC_AARCH64_LDST32_LO12:
431 case BFD_RELOC_AARCH64_LDST64_LO12:
432 case BFD_RELOC_AARCH64_LDST128_LO12:
433 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
434 case BFD_RELOC_AARCH64_TLSDESC_ADD:
435 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
436 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
437 case BFD_RELOC_AARCH64_TLSDESC_LDR:
438 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
439 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
440 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
441 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
442 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
443 value = PG_OFFSET (value + addend);
446 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
447 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
448 value = (value + addend) & (bfd_vma) 0xffff0000;
450 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
451 value = (value + addend) & (bfd_vma) 0xfff000;
454 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
455 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
456 value = (value + addend) & (bfd_vma) 0xffff;
459 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
460 value = (value + addend) & ~(bfd_vma) 0xffffffff;
461 value -= place & ~(bfd_vma) 0xffffffff;
471 /* Hook called by the linker routine which adds symbols from an object
475 _bfd_aarch64_elf_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
476 Elf_Internal_Sym *sym,
477 const char **namep ATTRIBUTE_UNUSED,
478 flagword *flagsp ATTRIBUTE_UNUSED,
479 asection **secp ATTRIBUTE_UNUSED,
480 bfd_vma *valp ATTRIBUTE_UNUSED)
482 if ((abfd->flags & DYNAMIC) == 0
483 && (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
484 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE))
485 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
490 /* Support for core dump NOTE sections. */
493 _bfd_aarch64_elf_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
498 switch (note->descsz)
503 case 392: /* sizeof(struct elf_prstatus) on Linux/arm64. */
505 elf_tdata (abfd)->core->signal
506 = bfd_get_16 (abfd, note->descdata + 12);
509 elf_tdata (abfd)->core->lwpid
510 = bfd_get_32 (abfd, note->descdata + 32);
519 /* Make a ".reg/999" section. */
520 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
521 size, note->descpos + offset);