1 /* ELF support for AArch64.
2 Copyright 2009-2013 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 /* Notes on implementation:
23 Thread Local Store (TLS)
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD64
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL64 relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
95 aarch64_check_relocs()
97 This function is invoked for each relocation.
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
107 elf64_aarch64_allocate_dynrelocs ()
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
115 elf64_aarch64_size_dynamic_sections ()
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
122 elf64_aarch64_relocate_section ()
124 Calls elf64_aarch64_final_link_relocate ()
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
134 elf64_aarch64_final_link_relocate ()
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
140 #include "libiberty.h"
142 #include "bfd_stdint.h"
145 #include "elf/aarch64.h"
147 static bfd_reloc_status_type
148 bfd_elf_aarch64_put_addend (bfd *abfd,
150 reloc_howto_type *howto, bfd_signed_vma addend);
152 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
153 ((R_TYPE) == R_AARCH64_TLSGD_ADR_PAGE21 \
154 || (R_TYPE) == R_AARCH64_TLSGD_ADD_LO12_NC \
155 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
156 || (R_TYPE) == R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
157 || (R_TYPE) == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
158 || (R_TYPE) == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
159 || (R_TYPE) == R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
160 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12 \
161 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_HI12 \
162 || (R_TYPE) == R_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
163 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G2 \
164 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1 \
165 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
166 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0 \
167 || (R_TYPE) == R_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
168 || (R_TYPE) == R_AARCH64_TLS_DTPMOD64 \
169 || (R_TYPE) == R_AARCH64_TLS_DTPREL64 \
170 || (R_TYPE) == R_AARCH64_TLS_TPREL64 \
171 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
173 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
174 ((R_TYPE) == R_AARCH64_TLSDESC_LD64_PREL19 \
175 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PREL21 \
176 || (R_TYPE) == R_AARCH64_TLSDESC_ADR_PAGE \
177 || (R_TYPE) == R_AARCH64_TLSDESC_ADD_LO12_NC \
178 || (R_TYPE) == R_AARCH64_TLSDESC_LD64_LO12_NC \
179 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G1 \
180 || (R_TYPE) == R_AARCH64_TLSDESC_OFF_G0_NC \
181 || (R_TYPE) == R_AARCH64_TLSDESC_LDR \
182 || (R_TYPE) == R_AARCH64_TLSDESC_ADD \
183 || (R_TYPE) == R_AARCH64_TLSDESC_CALL \
184 || (R_TYPE) == R_AARCH64_TLSDESC)
186 #define ELIMINATE_COPY_RELOCS 0
188 /* Return the relocation section associated with NAME. HTAB is the
189 bfd's elf64_aarch64_link_hash_entry. */
190 #define RELOC_SECTION(HTAB, NAME) \
191 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
193 /* Return size of a relocation entry. HTAB is the bfd's
194 elf64_aarch64_link_hash_entry. */
195 #define RELOC_SIZE(HTAB) (sizeof (Elf64_External_Rela))
197 /* Return function to swap relocations in. HTAB is the bfd's
198 elf64_aarch64_link_hash_entry. */
199 #define SWAP_RELOC_IN(HTAB) (bfd_elf64_swap_reloca_in)
201 /* Return function to swap relocations out. HTAB is the bfd's
202 elf64_aarch64_link_hash_entry. */
203 #define SWAP_RELOC_OUT(HTAB) (bfd_elf64_swap_reloca_out)
205 /* GOT Entry size - 8 bytes. */
206 #define GOT_ENTRY_SIZE (8)
207 #define PLT_ENTRY_SIZE (32)
208 #define PLT_SMALL_ENTRY_SIZE (16)
209 #define PLT_TLSDESC_ENTRY_SIZE (32)
211 /* Take the PAGE component of an address or offset. */
212 #define PG(x) ((x) & ~ 0xfff)
213 #define PG_OFFSET(x) ((x) & 0xfff)
215 /* Encoding of the nop instruction */
216 #define INSN_NOP 0xd503201f
218 #define aarch64_compute_jump_table_size(htab) \
219 (((htab)->root.srelplt == NULL) ? 0 \
220 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
222 /* The first entry in a procedure linkage table looks like this
223 if the distance between the PLTGOT and the PLT is < 4GB use
224 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
225 in x16 and needs to work out PLTGOT[1] by using an address of
227 static const bfd_byte elf64_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
229 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
230 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
231 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
232 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
233 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
234 0x1f, 0x20, 0x03, 0xd5, /* nop */
235 0x1f, 0x20, 0x03, 0xd5, /* nop */
236 0x1f, 0x20, 0x03, 0xd5, /* nop */
239 /* Per function entry in a procedure linkage table looks like this
240 if the distance between the PLTGOT and the PLT is < 4GB use
241 these PLT entries. */
242 static const bfd_byte elf64_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
244 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
245 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
246 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
247 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
250 static const bfd_byte
251 elf64_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
253 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
254 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
255 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
256 0x42, 0x08, 0x40, 0xF9, /* ldr x2, [x2, #0] */
257 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
258 0x40, 0x00, 0x1F, 0xD6, /* br x2 */
259 0x1f, 0x20, 0x03, 0xd5, /* nop */
260 0x1f, 0x20, 0x03, 0xd5, /* nop */
263 #define elf_info_to_howto elf64_aarch64_info_to_howto
264 #define elf_info_to_howto_rel elf64_aarch64_info_to_howto
266 #define AARCH64_ELF_ABI_VERSION 0
267 #define AARCH64_ELF_OS_ABI_VERSION 0
269 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
270 #define ALL_ONES (~ (bfd_vma) 0)
272 static reloc_howto_type elf64_aarch64_howto_none =
273 HOWTO (R_AARCH64_NONE, /* type */
275 0, /* size (0 = byte, 1 = short, 2 = long) */
277 FALSE, /* pc_relative */
279 complain_overflow_dont,/* complain_on_overflow */
280 bfd_elf_generic_reloc, /* special_function */
281 "R_AARCH64_NONE", /* name */
282 FALSE, /* partial_inplace */
285 FALSE); /* pcrel_offset */
287 static reloc_howto_type elf64_aarch64_howto_dynrelocs[] =
289 HOWTO (R_AARCH64_COPY, /* type */
291 2, /* size (0 = byte, 1 = short, 2 = long) */
293 FALSE, /* pc_relative */
295 complain_overflow_bitfield, /* complain_on_overflow */
296 bfd_elf_generic_reloc, /* special_function */
297 "R_AARCH64_COPY", /* name */
298 TRUE, /* partial_inplace */
299 0xffffffff, /* src_mask */
300 0xffffffff, /* dst_mask */
301 FALSE), /* pcrel_offset */
303 HOWTO (R_AARCH64_GLOB_DAT, /* type */
305 2, /* size (0 = byte, 1 = short, 2 = long) */
307 FALSE, /* pc_relative */
309 complain_overflow_bitfield, /* complain_on_overflow */
310 bfd_elf_generic_reloc, /* special_function */
311 "R_AARCH64_GLOB_DAT", /* name */
312 TRUE, /* partial_inplace */
313 0xffffffff, /* src_mask */
314 0xffffffff, /* dst_mask */
315 FALSE), /* pcrel_offset */
317 HOWTO (R_AARCH64_JUMP_SLOT, /* type */
319 2, /* size (0 = byte, 1 = short, 2 = long) */
321 FALSE, /* pc_relative */
323 complain_overflow_bitfield, /* complain_on_overflow */
324 bfd_elf_generic_reloc, /* special_function */
325 "R_AARCH64_JUMP_SLOT", /* name */
326 TRUE, /* partial_inplace */
327 0xffffffff, /* src_mask */
328 0xffffffff, /* dst_mask */
329 FALSE), /* pcrel_offset */
331 HOWTO (R_AARCH64_RELATIVE, /* type */
333 2, /* size (0 = byte, 1 = short, 2 = long) */
335 FALSE, /* pc_relative */
337 complain_overflow_bitfield, /* complain_on_overflow */
338 bfd_elf_generic_reloc, /* special_function */
339 "R_AARCH64_RELATIVE", /* name */
340 TRUE, /* partial_inplace */
341 ALL_ONES, /* src_mask */
342 ALL_ONES, /* dst_mask */
343 FALSE), /* pcrel_offset */
345 HOWTO (R_AARCH64_TLS_DTPMOD64, /* type */
347 2, /* size (0 = byte, 1 = short, 2 = long) */
349 FALSE, /* pc_relative */
351 complain_overflow_dont, /* complain_on_overflow */
352 bfd_elf_generic_reloc, /* special_function */
353 "R_AARCH64_TLS_DTPMOD64", /* name */
354 FALSE, /* partial_inplace */
356 ALL_ONES, /* dst_mask */
357 FALSE), /* pc_reloffset */
359 HOWTO (R_AARCH64_TLS_DTPREL64, /* type */
361 2, /* size (0 = byte, 1 = short, 2 = long) */
363 FALSE, /* pc_relative */
365 complain_overflow_dont, /* complain_on_overflow */
366 bfd_elf_generic_reloc, /* special_function */
367 "R_AARCH64_TLS_DTPREL64", /* name */
368 FALSE, /* partial_inplace */
370 ALL_ONES, /* dst_mask */
371 FALSE), /* pcrel_offset */
373 HOWTO (R_AARCH64_TLS_TPREL64, /* type */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
377 FALSE, /* pc_relative */
379 complain_overflow_dont, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_AARCH64_TLS_TPREL64", /* name */
382 FALSE, /* partial_inplace */
384 ALL_ONES, /* dst_mask */
385 FALSE), /* pcrel_offset */
387 HOWTO (R_AARCH64_TLSDESC, /* type */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
391 FALSE, /* pc_relative */
393 complain_overflow_dont, /* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_AARCH64_TLSDESC", /* name */
396 FALSE, /* partial_inplace */
398 ALL_ONES, /* dst_mask */
399 FALSE), /* pcrel_offset */
403 /* Note: code such as elf64_aarch64_reloc_type_lookup expect to use e.g.
404 R_AARCH64_PREL64 as an index into this, and find the R_AARCH64_PREL64 HOWTO
407 static reloc_howto_type elf64_aarch64_howto_table[] =
409 /* Basic data relocations. */
411 HOWTO (R_AARCH64_NULL, /* type */
413 0, /* size (0 = byte, 1 = short, 2 = long) */
415 FALSE, /* pc_relative */
417 complain_overflow_dont, /* complain_on_overflow */
418 bfd_elf_generic_reloc, /* special_function */
419 "R_AARCH64_NULL", /* name */
420 FALSE, /* partial_inplace */
423 FALSE), /* pcrel_offset */
426 HOWTO (R_AARCH64_ABS64, /* type */
428 4, /* size (4 = long long) */
430 FALSE, /* pc_relative */
432 complain_overflow_unsigned, /* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_AARCH64_ABS64", /* name */
435 FALSE, /* partial_inplace */
436 ALL_ONES, /* src_mask */
437 ALL_ONES, /* dst_mask */
438 FALSE), /* pcrel_offset */
441 HOWTO (R_AARCH64_ABS32, /* type */
443 2, /* size (0 = byte, 1 = short, 2 = long) */
445 FALSE, /* pc_relative */
447 complain_overflow_unsigned, /* complain_on_overflow */
448 bfd_elf_generic_reloc, /* special_function */
449 "R_AARCH64_ABS32", /* name */
450 FALSE, /* partial_inplace */
451 0xffffffff, /* src_mask */
452 0xffffffff, /* dst_mask */
453 FALSE), /* pcrel_offset */
456 HOWTO (R_AARCH64_ABS16, /* type */
458 1, /* size (0 = byte, 1 = short, 2 = long) */
460 FALSE, /* pc_relative */
462 complain_overflow_unsigned, /* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_AARCH64_ABS16", /* name */
465 FALSE, /* partial_inplace */
466 0xffff, /* src_mask */
467 0xffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
470 /* .xword: (S+A-P) */
471 HOWTO (R_AARCH64_PREL64, /* type */
473 4, /* size (4 = long long) */
475 TRUE, /* pc_relative */
477 complain_overflow_signed, /* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_AARCH64_PREL64", /* name */
480 FALSE, /* partial_inplace */
481 ALL_ONES, /* src_mask */
482 ALL_ONES, /* dst_mask */
483 TRUE), /* pcrel_offset */
486 HOWTO (R_AARCH64_PREL32, /* type */
488 2, /* size (0 = byte, 1 = short, 2 = long) */
490 TRUE, /* pc_relative */
492 complain_overflow_signed, /* complain_on_overflow */
493 bfd_elf_generic_reloc, /* special_function */
494 "R_AARCH64_PREL32", /* name */
495 FALSE, /* partial_inplace */
496 0xffffffff, /* src_mask */
497 0xffffffff, /* dst_mask */
498 TRUE), /* pcrel_offset */
501 HOWTO (R_AARCH64_PREL16, /* type */
503 1, /* size (0 = byte, 1 = short, 2 = long) */
505 TRUE, /* pc_relative */
507 complain_overflow_signed, /* complain_on_overflow */
508 bfd_elf_generic_reloc, /* special_function */
509 "R_AARCH64_PREL16", /* name */
510 FALSE, /* partial_inplace */
511 0xffff, /* src_mask */
512 0xffff, /* dst_mask */
513 TRUE), /* pcrel_offset */
515 /* Group relocations to create a 16, 32, 48 or 64 bit
516 unsigned data or abs address inline. */
518 /* MOVZ: ((S+A) >> 0) & 0xffff */
519 HOWTO (R_AARCH64_MOVW_UABS_G0, /* type */
521 2, /* size (0 = byte, 1 = short, 2 = long) */
523 FALSE, /* pc_relative */
525 complain_overflow_unsigned, /* complain_on_overflow */
526 bfd_elf_generic_reloc, /* special_function */
527 "R_AARCH64_MOVW_UABS_G0", /* name */
528 FALSE, /* partial_inplace */
529 0xffff, /* src_mask */
530 0xffff, /* dst_mask */
531 FALSE), /* pcrel_offset */
533 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
534 HOWTO (R_AARCH64_MOVW_UABS_G0_NC, /* type */
536 2, /* size (0 = byte, 1 = short, 2 = long) */
538 FALSE, /* pc_relative */
540 complain_overflow_dont, /* complain_on_overflow */
541 bfd_elf_generic_reloc, /* special_function */
542 "R_AARCH64_MOVW_UABS_G0_NC", /* name */
543 FALSE, /* partial_inplace */
544 0xffff, /* src_mask */
545 0xffff, /* dst_mask */
546 FALSE), /* pcrel_offset */
548 /* MOVZ: ((S+A) >> 16) & 0xffff */
549 HOWTO (R_AARCH64_MOVW_UABS_G1, /* type */
551 2, /* size (0 = byte, 1 = short, 2 = long) */
553 FALSE, /* pc_relative */
555 complain_overflow_unsigned, /* complain_on_overflow */
556 bfd_elf_generic_reloc, /* special_function */
557 "R_AARCH64_MOVW_UABS_G1", /* name */
558 FALSE, /* partial_inplace */
559 0xffff, /* src_mask */
560 0xffff, /* dst_mask */
561 FALSE), /* pcrel_offset */
563 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
564 HOWTO (R_AARCH64_MOVW_UABS_G1_NC, /* type */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
568 FALSE, /* pc_relative */
570 complain_overflow_dont, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_AARCH64_MOVW_UABS_G1_NC", /* name */
573 FALSE, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 FALSE), /* pcrel_offset */
578 /* MOVZ: ((S+A) >> 32) & 0xffff */
579 HOWTO (R_AARCH64_MOVW_UABS_G2, /* type */
581 2, /* size (0 = byte, 1 = short, 2 = long) */
583 FALSE, /* pc_relative */
585 complain_overflow_unsigned, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 "R_AARCH64_MOVW_UABS_G2", /* name */
588 FALSE, /* partial_inplace */
589 0xffff, /* src_mask */
590 0xffff, /* dst_mask */
591 FALSE), /* pcrel_offset */
593 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
594 HOWTO (R_AARCH64_MOVW_UABS_G2_NC, /* type */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
598 FALSE, /* pc_relative */
600 complain_overflow_dont, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_AARCH64_MOVW_UABS_G2_NC", /* name */
603 FALSE, /* partial_inplace */
604 0xffff, /* src_mask */
605 0xffff, /* dst_mask */
606 FALSE), /* pcrel_offset */
608 /* MOVZ: ((S+A) >> 48) & 0xffff */
609 HOWTO (R_AARCH64_MOVW_UABS_G3, /* type */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
613 FALSE, /* pc_relative */
615 complain_overflow_unsigned, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 "R_AARCH64_MOVW_UABS_G3", /* name */
618 FALSE, /* partial_inplace */
619 0xffff, /* src_mask */
620 0xffff, /* dst_mask */
621 FALSE), /* pcrel_offset */
623 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
624 signed data or abs address inline. Will change instruction
625 to MOVN or MOVZ depending on sign of calculated value. */
627 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
628 HOWTO (R_AARCH64_MOVW_SABS_G0, /* type */
630 2, /* size (0 = byte, 1 = short, 2 = long) */
632 FALSE, /* pc_relative */
634 complain_overflow_signed, /* complain_on_overflow */
635 bfd_elf_generic_reloc, /* special_function */
636 "R_AARCH64_MOVW_SABS_G0", /* name */
637 FALSE, /* partial_inplace */
638 0xffff, /* src_mask */
639 0xffff, /* dst_mask */
640 FALSE), /* pcrel_offset */
642 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
643 HOWTO (R_AARCH64_MOVW_SABS_G1, /* type */
645 2, /* size (0 = byte, 1 = short, 2 = long) */
647 FALSE, /* pc_relative */
649 complain_overflow_signed, /* complain_on_overflow */
650 bfd_elf_generic_reloc, /* special_function */
651 "R_AARCH64_MOVW_SABS_G1", /* name */
652 FALSE, /* partial_inplace */
653 0xffff, /* src_mask */
654 0xffff, /* dst_mask */
655 FALSE), /* pcrel_offset */
657 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
658 HOWTO (R_AARCH64_MOVW_SABS_G2, /* type */
660 2, /* size (0 = byte, 1 = short, 2 = long) */
662 FALSE, /* pc_relative */
664 complain_overflow_signed, /* complain_on_overflow */
665 bfd_elf_generic_reloc, /* special_function */
666 "R_AARCH64_MOVW_SABS_G2", /* name */
667 FALSE, /* partial_inplace */
668 0xffff, /* src_mask */
669 0xffff, /* dst_mask */
670 FALSE), /* pcrel_offset */
672 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
673 addresses: PG(x) is (x & ~0xfff). */
675 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
676 HOWTO (R_AARCH64_LD_PREL_LO19, /* type */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
680 TRUE, /* pc_relative */
682 complain_overflow_signed, /* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_AARCH64_LD_PREL_LO19", /* name */
685 FALSE, /* partial_inplace */
686 0x7ffff, /* src_mask */
687 0x7ffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
690 /* ADR: (S+A-P) & 0x1fffff */
691 HOWTO (R_AARCH64_ADR_PREL_LO21, /* type */
693 2, /* size (0 = byte, 1 = short, 2 = long) */
695 TRUE, /* pc_relative */
697 complain_overflow_signed, /* complain_on_overflow */
698 bfd_elf_generic_reloc, /* special_function */
699 "R_AARCH64_ADR_PREL_LO21", /* name */
700 FALSE, /* partial_inplace */
701 0x1fffff, /* src_mask */
702 0x1fffff, /* dst_mask */
703 TRUE), /* pcrel_offset */
705 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
706 HOWTO (R_AARCH64_ADR_PREL_PG_HI21, /* type */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
710 TRUE, /* pc_relative */
712 complain_overflow_signed, /* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_AARCH64_ADR_PREL_PG_HI21", /* name */
715 FALSE, /* partial_inplace */
716 0x1fffff, /* src_mask */
717 0x1fffff, /* dst_mask */
718 TRUE), /* pcrel_offset */
720 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
721 HOWTO (R_AARCH64_ADR_PREL_PG_HI21_NC, /* type */
723 2, /* size (0 = byte, 1 = short, 2 = long) */
725 TRUE, /* pc_relative */
727 complain_overflow_dont, /* complain_on_overflow */
728 bfd_elf_generic_reloc, /* special_function */
729 "R_AARCH64_ADR_PREL_PG_HI21_NC", /* name */
730 FALSE, /* partial_inplace */
731 0x1fffff, /* src_mask */
732 0x1fffff, /* dst_mask */
733 TRUE), /* pcrel_offset */
735 /* ADD: (S+A) & 0xfff [no overflow check] */
736 HOWTO (R_AARCH64_ADD_ABS_LO12_NC, /* type */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
740 FALSE, /* pc_relative */
742 complain_overflow_dont, /* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_AARCH64_ADD_ABS_LO12_NC", /* name */
745 FALSE, /* partial_inplace */
746 0x3ffc00, /* src_mask */
747 0x3ffc00, /* dst_mask */
748 FALSE), /* pcrel_offset */
750 /* LD/ST8: (S+A) & 0xfff */
751 HOWTO (R_AARCH64_LDST8_ABS_LO12_NC, /* type */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
755 FALSE, /* pc_relative */
757 complain_overflow_dont, /* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_AARCH64_LDST8_ABS_LO12_NC", /* name */
760 FALSE, /* partial_inplace */
761 0xfff, /* src_mask */
762 0xfff, /* dst_mask */
763 FALSE), /* pcrel_offset */
765 /* Relocations for control-flow instructions. */
767 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
768 HOWTO (R_AARCH64_TSTBR14, /* type */
770 2, /* size (0 = byte, 1 = short, 2 = long) */
772 TRUE, /* pc_relative */
774 complain_overflow_signed, /* complain_on_overflow */
775 bfd_elf_generic_reloc, /* special_function */
776 "R_AARCH64_TSTBR14", /* name */
777 FALSE, /* partial_inplace */
778 0x3fff, /* src_mask */
779 0x3fff, /* dst_mask */
780 TRUE), /* pcrel_offset */
782 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
783 HOWTO (R_AARCH64_CONDBR19, /* type */
785 2, /* size (0 = byte, 1 = short, 2 = long) */
787 TRUE, /* pc_relative */
789 complain_overflow_signed, /* complain_on_overflow */
790 bfd_elf_generic_reloc, /* special_function */
791 "R_AARCH64_CONDBR19", /* name */
792 FALSE, /* partial_inplace */
793 0x7ffff, /* src_mask */
794 0x7ffff, /* dst_mask */
795 TRUE), /* pcrel_offset */
799 /* B: ((S+A-P) >> 2) & 0x3ffffff */
800 HOWTO (R_AARCH64_JUMP26, /* type */
802 2, /* size (0 = byte, 1 = short, 2 = long) */
804 TRUE, /* pc_relative */
806 complain_overflow_signed, /* complain_on_overflow */
807 bfd_elf_generic_reloc, /* special_function */
808 "R_AARCH64_JUMP26", /* name */
809 FALSE, /* partial_inplace */
810 0x3ffffff, /* src_mask */
811 0x3ffffff, /* dst_mask */
812 TRUE), /* pcrel_offset */
814 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
815 HOWTO (R_AARCH64_CALL26, /* type */
817 2, /* size (0 = byte, 1 = short, 2 = long) */
819 TRUE, /* pc_relative */
821 complain_overflow_signed, /* complain_on_overflow */
822 bfd_elf_generic_reloc, /* special_function */
823 "R_AARCH64_CALL26", /* name */
824 FALSE, /* partial_inplace */
825 0x3ffffff, /* src_mask */
826 0x3ffffff, /* dst_mask */
827 TRUE), /* pcrel_offset */
829 /* LD/ST16: (S+A) & 0xffe */
830 HOWTO (R_AARCH64_LDST16_ABS_LO12_NC, /* type */
832 2, /* size (0 = byte, 1 = short, 2 = long) */
834 FALSE, /* pc_relative */
836 complain_overflow_dont, /* complain_on_overflow */
837 bfd_elf_generic_reloc, /* special_function */
838 "R_AARCH64_LDST16_ABS_LO12_NC", /* name */
839 FALSE, /* partial_inplace */
840 0xffe, /* src_mask */
841 0xffe, /* dst_mask */
842 FALSE), /* pcrel_offset */
844 /* LD/ST32: (S+A) & 0xffc */
845 HOWTO (R_AARCH64_LDST32_ABS_LO12_NC, /* type */
847 2, /* size (0 = byte, 1 = short, 2 = long) */
849 FALSE, /* pc_relative */
851 complain_overflow_dont, /* complain_on_overflow */
852 bfd_elf_generic_reloc, /* special_function */
853 "R_AARCH64_LDST32_ABS_LO12_NC", /* name */
854 FALSE, /* partial_inplace */
855 0xffc, /* src_mask */
856 0xffc, /* dst_mask */
857 FALSE), /* pcrel_offset */
859 /* LD/ST64: (S+A) & 0xff8 */
860 HOWTO (R_AARCH64_LDST64_ABS_LO12_NC, /* type */
862 2, /* size (0 = byte, 1 = short, 2 = long) */
864 FALSE, /* pc_relative */
866 complain_overflow_dont, /* complain_on_overflow */
867 bfd_elf_generic_reloc, /* special_function */
868 "R_AARCH64_LDST64_ABS_LO12_NC", /* name */
869 FALSE, /* partial_inplace */
870 0xff8, /* src_mask */
871 0xff8, /* dst_mask */
872 FALSE), /* pcrel_offset */
887 /* LD/ST128: (S+A) & 0xff0 */
888 HOWTO (R_AARCH64_LDST128_ABS_LO12_NC, /* type */
890 2, /* size (0 = byte, 1 = short, 2 = long) */
892 FALSE, /* pc_relative */
894 complain_overflow_dont, /* complain_on_overflow */
895 bfd_elf_generic_reloc, /* special_function */
896 "R_AARCH64_LDST128_ABS_LO12_NC", /* name */
897 FALSE, /* partial_inplace */
898 0xff0, /* src_mask */
899 0xff0, /* dst_mask */
900 FALSE), /* pcrel_offset */
912 /* Set a load-literal immediate field to bits
913 0x1FFFFC of G(S)-P */
914 HOWTO (R_AARCH64_GOT_LD_PREL19, /* type */
916 2, /* size (0 = byte,1 = short,2 = long) */
918 TRUE, /* pc_relative */
920 complain_overflow_signed, /* complain_on_overflow */
921 bfd_elf_generic_reloc, /* special_function */
922 "R_AARCH64_GOT_LD_PREL19", /* name */
923 FALSE, /* partial_inplace */
924 0xffffe0, /* src_mask */
925 0xffffe0, /* dst_mask */
926 TRUE), /* pcrel_offset */
930 /* Get to the page for the GOT entry for the symbol
931 (G(S) - P) using an ADRP instruction. */
932 HOWTO (R_AARCH64_ADR_GOT_PAGE, /* type */
934 2, /* size (0 = byte, 1 = short, 2 = long) */
936 TRUE, /* pc_relative */
938 complain_overflow_dont, /* complain_on_overflow */
939 bfd_elf_generic_reloc, /* special_function */
940 "R_AARCH64_ADR_GOT_PAGE", /* name */
941 FALSE, /* partial_inplace */
942 0x1fffff, /* src_mask */
943 0x1fffff, /* dst_mask */
944 TRUE), /* pcrel_offset */
946 /* LD64: GOT offset G(S) & 0xff8 */
947 HOWTO (R_AARCH64_LD64_GOT_LO12_NC, /* type */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
951 FALSE, /* pc_relative */
953 complain_overflow_dont, /* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_AARCH64_LD64_GOT_LO12_NC", /* name */
956 FALSE, /* partial_inplace */
957 0xff8, /* src_mask */
958 0xff8, /* dst_mask */
959 FALSE) /* pcrel_offset */
962 static reloc_howto_type elf64_aarch64_tls_howto_table[] =
966 /* Get to the page for the GOT entry for the symbol
967 (G(S) - P) using an ADRP instruction. */
968 HOWTO (R_AARCH64_TLSGD_ADR_PAGE21, /* type */
970 2, /* size (0 = byte, 1 = short, 2 = long) */
972 TRUE, /* pc_relative */
974 complain_overflow_dont, /* complain_on_overflow */
975 bfd_elf_generic_reloc, /* special_function */
976 "R_AARCH64_TLSGD_ADR_PAGE21", /* name */
977 FALSE, /* partial_inplace */
978 0x1fffff, /* src_mask */
979 0x1fffff, /* dst_mask */
980 TRUE), /* pcrel_offset */
982 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
983 HOWTO (R_AARCH64_TLSGD_ADD_LO12_NC, /* type */
985 2, /* size (0 = byte, 1 = short, 2 = long) */
987 FALSE, /* pc_relative */
989 complain_overflow_dont, /* complain_on_overflow */
990 bfd_elf_generic_reloc, /* special_function */
991 "R_AARCH64_TLSGD_ADD_LO12_NC", /* name */
992 FALSE, /* partial_inplace */
993 0xfff, /* src_mask */
994 0xfff, /* dst_mask */
995 FALSE), /* pcrel_offset */
1022 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G1, /* type */
1023 16, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1026 FALSE, /* pc_relative */
1028 complain_overflow_dont, /* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", /* name */
1031 FALSE, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE), /* pcrel_offset */
1036 HOWTO (R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC, /* type */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1040 FALSE, /* pc_relative */
1042 complain_overflow_dont, /* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", /* name */
1045 FALSE, /* partial_inplace */
1046 0xffff, /* src_mask */
1047 0xffff, /* dst_mask */
1048 FALSE), /* pcrel_offset */
1050 HOWTO (R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, /* type */
1051 12, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1054 FALSE, /* pc_relative */
1056 complain_overflow_dont, /* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", /* name */
1059 FALSE, /* partial_inplace */
1060 0x1fffff, /* src_mask */
1061 0x1fffff, /* dst_mask */
1062 FALSE), /* pcrel_offset */
1064 HOWTO (R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, /* type */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1068 FALSE, /* pc_relative */
1070 complain_overflow_dont, /* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", /* name */
1073 FALSE, /* partial_inplace */
1074 0xff8, /* src_mask */
1075 0xff8, /* dst_mask */
1076 FALSE), /* pcrel_offset */
1078 HOWTO (R_AARCH64_TLSIE_LD_GOTTPREL_PREL19, /* type */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1082 FALSE, /* pc_relative */
1084 complain_overflow_dont, /* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", /* name */
1087 FALSE, /* partial_inplace */
1088 0x1ffffc, /* src_mask */
1089 0x1ffffc, /* dst_mask */
1090 FALSE), /* pcrel_offset */
1092 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G2, /* type */
1093 32, /* rightshift */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1096 FALSE, /* pc_relative */
1098 complain_overflow_dont, /* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_AARCH64_TLSLE_MOVW_TPREL_G2", /* name */
1101 FALSE, /* partial_inplace */
1102 0xffff, /* src_mask */
1103 0xffff, /* dst_mask */
1104 FALSE), /* pcrel_offset */
1106 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1, /* type */
1107 16, /* rightshift */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1110 FALSE, /* pc_relative */
1112 complain_overflow_dont, /* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_AARCH64_TLSLE_MOVW_TPREL_G1", /* name */
1115 FALSE, /* partial_inplace */
1116 0xffff, /* src_mask */
1117 0xffff, /* dst_mask */
1118 FALSE), /* pcrel_offset */
1120 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G1_NC, /* type */
1121 16, /* rightshift */
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1124 FALSE, /* pc_relative */
1126 complain_overflow_dont, /* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", /* name */
1129 FALSE, /* partial_inplace */
1130 0xffff, /* src_mask */
1131 0xffff, /* dst_mask */
1132 FALSE), /* pcrel_offset */
1134 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0, /* type */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1138 FALSE, /* pc_relative */
1140 complain_overflow_dont, /* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_AARCH64_TLSLE_MOVW_TPREL_G0", /* name */
1143 FALSE, /* partial_inplace */
1144 0xffff, /* src_mask */
1145 0xffff, /* dst_mask */
1146 FALSE), /* pcrel_offset */
1148 HOWTO (R_AARCH64_TLSLE_MOVW_TPREL_G0_NC, /* type */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1152 FALSE, /* pc_relative */
1154 complain_overflow_dont, /* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", /* name */
1157 FALSE, /* partial_inplace */
1158 0xffff, /* src_mask */
1159 0xffff, /* dst_mask */
1160 FALSE), /* pcrel_offset */
1162 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_HI12, /* type */
1163 12, /* rightshift */
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1166 FALSE, /* pc_relative */
1168 complain_overflow_dont, /* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_AARCH64_TLSLE_ADD_TPREL_HI12", /* name */
1171 FALSE, /* partial_inplace */
1172 0xfff, /* src_mask */
1173 0xfff, /* dst_mask */
1174 FALSE), /* pcrel_offset */
1176 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12, /* type */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1180 FALSE, /* pc_relative */
1182 complain_overflow_dont, /* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_AARCH64_TLSLE_ADD_TPREL_LO12", /* name */
1185 FALSE, /* partial_inplace */
1186 0xfff, /* src_mask */
1187 0xfff, /* dst_mask */
1188 FALSE), /* pcrel_offset */
1190 HOWTO (R_AARCH64_TLSLE_ADD_TPREL_LO12_NC, /* type */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1194 FALSE, /* pc_relative */
1196 complain_overflow_dont, /* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", /* name */
1199 FALSE, /* partial_inplace */
1200 0xfff, /* src_mask */
1201 0xfff, /* dst_mask */
1202 FALSE), /* pcrel_offset */
1205 static reloc_howto_type elf64_aarch64_tlsdesc_howto_table[] =
1207 HOWTO (R_AARCH64_TLSDESC_LD64_PREL19, /* type */
1209 2, /* size (0 = byte, 1 = short, 2 = long) */
1211 TRUE, /* pc_relative */
1213 complain_overflow_dont, /* complain_on_overflow */
1214 bfd_elf_generic_reloc, /* special_function */
1215 "R_AARCH64_TLSDESC_LD64_PREL19", /* name */
1216 FALSE, /* partial_inplace */
1217 0x1ffffc, /* src_mask */
1218 0x1ffffc, /* dst_mask */
1219 TRUE), /* pcrel_offset */
1221 HOWTO (R_AARCH64_TLSDESC_ADR_PREL21, /* type */
1223 2, /* size (0 = byte, 1 = short, 2 = long) */
1225 TRUE, /* pc_relative */
1227 complain_overflow_dont, /* complain_on_overflow */
1228 bfd_elf_generic_reloc, /* special_function */
1229 "R_AARCH64_TLSDESC_ADR_PREL21", /* name */
1230 FALSE, /* partial_inplace */
1231 0x1fffff, /* src_mask */
1232 0x1fffff, /* dst_mask */
1233 TRUE), /* pcrel_offset */
1235 /* Get to the page for the GOT entry for the symbol
1236 (G(S) - P) using an ADRP instruction. */
1237 HOWTO (R_AARCH64_TLSDESC_ADR_PAGE, /* type */
1238 12, /* rightshift */
1239 2, /* size (0 = byte, 1 = short, 2 = long) */
1241 TRUE, /* pc_relative */
1243 complain_overflow_dont, /* complain_on_overflow */
1244 bfd_elf_generic_reloc, /* special_function */
1245 "R_AARCH64_TLSDESC_ADR_PAGE", /* name */
1246 FALSE, /* partial_inplace */
1247 0x1fffff, /* src_mask */
1248 0x1fffff, /* dst_mask */
1249 TRUE), /* pcrel_offset */
1251 /* LD64: GOT offset G(S) & 0xfff. */
1252 HOWTO (R_AARCH64_TLSDESC_LD64_LO12_NC, /* type */
1254 2, /* size (0 = byte, 1 = short, 2 = long) */
1256 FALSE, /* pc_relative */
1258 complain_overflow_dont, /* complain_on_overflow */
1259 bfd_elf_generic_reloc, /* special_function */
1260 "R_AARCH64_TLSDESC_LD64_LO12_NC", /* name */
1261 FALSE, /* partial_inplace */
1262 0xfff, /* src_mask */
1263 0xfff, /* dst_mask */
1264 FALSE), /* pcrel_offset */
1266 /* ADD: GOT offset G(S) & 0xfff. */
1267 HOWTO (R_AARCH64_TLSDESC_ADD_LO12_NC, /* type */
1269 2, /* size (0 = byte, 1 = short, 2 = long) */
1271 FALSE, /* pc_relative */
1273 complain_overflow_dont, /* complain_on_overflow */
1274 bfd_elf_generic_reloc, /* special_function */
1275 "R_AARCH64_TLSDESC_ADD_LO12_NC", /* name */
1276 FALSE, /* partial_inplace */
1277 0xfff, /* src_mask */
1278 0xfff, /* dst_mask */
1279 FALSE), /* pcrel_offset */
1281 HOWTO (R_AARCH64_TLSDESC_OFF_G1, /* type */
1282 16, /* rightshift */
1283 2, /* size (0 = byte, 1 = short, 2 = long) */
1285 FALSE, /* pc_relative */
1287 complain_overflow_dont, /* complain_on_overflow */
1288 bfd_elf_generic_reloc, /* special_function */
1289 "R_AARCH64_TLSDESC_OFF_G1", /* name */
1290 FALSE, /* partial_inplace */
1291 0xffff, /* src_mask */
1292 0xffff, /* dst_mask */
1293 FALSE), /* pcrel_offset */
1295 HOWTO (R_AARCH64_TLSDESC_OFF_G0_NC, /* type */
1297 2, /* size (0 = byte, 1 = short, 2 = long) */
1299 FALSE, /* pc_relative */
1301 complain_overflow_dont, /* complain_on_overflow */
1302 bfd_elf_generic_reloc, /* special_function */
1303 "R_AARCH64_TLSDESC_OFF_G0_NC", /* name */
1304 FALSE, /* partial_inplace */
1305 0xffff, /* src_mask */
1306 0xffff, /* dst_mask */
1307 FALSE), /* pcrel_offset */
1309 HOWTO (R_AARCH64_TLSDESC_LDR, /* type */
1311 2, /* size (0 = byte, 1 = short, 2 = long) */
1313 FALSE, /* pc_relative */
1315 complain_overflow_dont, /* complain_on_overflow */
1316 bfd_elf_generic_reloc, /* special_function */
1317 "R_AARCH64_TLSDESC_LDR", /* name */
1318 FALSE, /* partial_inplace */
1321 FALSE), /* pcrel_offset */
1323 HOWTO (R_AARCH64_TLSDESC_ADD, /* type */
1325 2, /* size (0 = byte, 1 = short, 2 = long) */
1327 FALSE, /* pc_relative */
1329 complain_overflow_dont, /* complain_on_overflow */
1330 bfd_elf_generic_reloc, /* special_function */
1331 "R_AARCH64_TLSDESC_ADD", /* name */
1332 FALSE, /* partial_inplace */
1335 FALSE), /* pcrel_offset */
1337 HOWTO (R_AARCH64_TLSDESC_CALL, /* type */
1339 2, /* size (0 = byte, 1 = short, 2 = long) */
1341 FALSE, /* pc_relative */
1343 complain_overflow_dont, /* complain_on_overflow */
1344 bfd_elf_generic_reloc, /* special_function */
1345 "R_AARCH64_TLSDESC_CALL", /* name */
1346 FALSE, /* partial_inplace */
1349 FALSE), /* pcrel_offset */
1352 static reloc_howto_type *
1353 elf64_aarch64_howto_from_type (unsigned int r_type)
1355 if (r_type >= R_AARCH64_static_min && r_type < R_AARCH64_static_max)
1356 return &elf64_aarch64_howto_table[r_type - R_AARCH64_static_min];
1358 if (r_type >= R_AARCH64_tls_min && r_type < R_AARCH64_tls_max)
1359 return &elf64_aarch64_tls_howto_table[r_type - R_AARCH64_tls_min];
1361 if (r_type >= R_AARCH64_tlsdesc_min && r_type < R_AARCH64_tlsdesc_max)
1362 return &elf64_aarch64_tlsdesc_howto_table[r_type - R_AARCH64_tlsdesc_min];
1364 if (r_type >= R_AARCH64_dyn_min && r_type < R_AARCH64_dyn_max)
1365 return &elf64_aarch64_howto_dynrelocs[r_type - R_AARCH64_dyn_min];
1369 case R_AARCH64_NONE:
1370 return &elf64_aarch64_howto_none;
1373 bfd_set_error (bfd_error_bad_value);
1378 elf64_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1379 Elf_Internal_Rela *elf_reloc)
1381 unsigned int r_type;
1383 r_type = ELF64_R_TYPE (elf_reloc->r_info);
1384 bfd_reloc->howto = elf64_aarch64_howto_from_type (r_type);
1387 struct elf64_aarch64_reloc_map
1389 bfd_reloc_code_real_type bfd_reloc_val;
1390 unsigned int elf_reloc_val;
1393 /* All entries in this list must also be present in
1394 elf64_aarch64_howto_table. */
1395 static const struct elf64_aarch64_reloc_map elf64_aarch64_reloc_map[] =
1397 {BFD_RELOC_NONE, R_AARCH64_NONE},
1399 /* Basic data relocations. */
1400 {BFD_RELOC_CTOR, R_AARCH64_ABS64},
1401 {BFD_RELOC_64, R_AARCH64_ABS64},
1402 {BFD_RELOC_32, R_AARCH64_ABS32},
1403 {BFD_RELOC_16, R_AARCH64_ABS16},
1404 {BFD_RELOC_64_PCREL, R_AARCH64_PREL64},
1405 {BFD_RELOC_32_PCREL, R_AARCH64_PREL32},
1406 {BFD_RELOC_16_PCREL, R_AARCH64_PREL16},
1408 /* Group relocations to low order bits of a 16, 32, 48 or 64 bit
1410 {BFD_RELOC_AARCH64_MOVW_G0_NC, R_AARCH64_MOVW_UABS_G0_NC},
1411 {BFD_RELOC_AARCH64_MOVW_G1_NC, R_AARCH64_MOVW_UABS_G1_NC},
1412 {BFD_RELOC_AARCH64_MOVW_G2_NC, R_AARCH64_MOVW_UABS_G2_NC},
1414 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1415 signed value inline. */
1416 {BFD_RELOC_AARCH64_MOVW_G0_S, R_AARCH64_MOVW_SABS_G0},
1417 {BFD_RELOC_AARCH64_MOVW_G1_S, R_AARCH64_MOVW_SABS_G1},
1418 {BFD_RELOC_AARCH64_MOVW_G2_S, R_AARCH64_MOVW_SABS_G2},
1420 /* Group relocations to create high bits of a 16, 32, 48 or 64 bit
1421 unsigned value inline. */
1422 {BFD_RELOC_AARCH64_MOVW_G0, R_AARCH64_MOVW_UABS_G0},
1423 {BFD_RELOC_AARCH64_MOVW_G1, R_AARCH64_MOVW_UABS_G1},
1424 {BFD_RELOC_AARCH64_MOVW_G2, R_AARCH64_MOVW_UABS_G2},
1425 {BFD_RELOC_AARCH64_MOVW_G3, R_AARCH64_MOVW_UABS_G3},
1427 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store. */
1428 {BFD_RELOC_AARCH64_LD_LO19_PCREL, R_AARCH64_LD_PREL_LO19},
1429 {BFD_RELOC_AARCH64_ADR_LO21_PCREL, R_AARCH64_ADR_PREL_LO21},
1430 {BFD_RELOC_AARCH64_ADR_HI21_PCREL, R_AARCH64_ADR_PREL_PG_HI21},
1431 {BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL, R_AARCH64_ADR_PREL_PG_HI21_NC},
1432 {BFD_RELOC_AARCH64_ADD_LO12, R_AARCH64_ADD_ABS_LO12_NC},
1433 {BFD_RELOC_AARCH64_LDST8_LO12, R_AARCH64_LDST8_ABS_LO12_NC},
1434 {BFD_RELOC_AARCH64_LDST16_LO12, R_AARCH64_LDST16_ABS_LO12_NC},
1435 {BFD_RELOC_AARCH64_LDST32_LO12, R_AARCH64_LDST32_ABS_LO12_NC},
1436 {BFD_RELOC_AARCH64_LDST64_LO12, R_AARCH64_LDST64_ABS_LO12_NC},
1437 {BFD_RELOC_AARCH64_LDST128_LO12, R_AARCH64_LDST128_ABS_LO12_NC},
1439 /* Relocations for control-flow instructions. */
1440 {BFD_RELOC_AARCH64_TSTBR14, R_AARCH64_TSTBR14},
1441 {BFD_RELOC_AARCH64_BRANCH19, R_AARCH64_CONDBR19},
1442 {BFD_RELOC_AARCH64_JUMP26, R_AARCH64_JUMP26},
1443 {BFD_RELOC_AARCH64_CALL26, R_AARCH64_CALL26},
1445 /* Relocations for PIC. */
1446 {BFD_RELOC_AARCH64_GOT_LD_PREL19, R_AARCH64_GOT_LD_PREL19},
1447 {BFD_RELOC_AARCH64_ADR_GOT_PAGE, R_AARCH64_ADR_GOT_PAGE},
1448 {BFD_RELOC_AARCH64_LD64_GOT_LO12_NC, R_AARCH64_LD64_GOT_LO12_NC},
1450 /* Relocations for TLS. */
1451 {BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21, R_AARCH64_TLSGD_ADR_PAGE21},
1452 {BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC, R_AARCH64_TLSGD_ADD_LO12_NC},
1453 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
1454 R_AARCH64_TLSIE_MOVW_GOTTPREL_G1},
1455 {BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
1456 R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC},
1457 {BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
1458 R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21},
1459 {BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC,
1460 R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC},
1461 {BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19,
1462 R_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
1463 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2, R_AARCH64_TLSLE_MOVW_TPREL_G2},
1464 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1, R_AARCH64_TLSLE_MOVW_TPREL_G1},
1465 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
1466 R_AARCH64_TLSLE_MOVW_TPREL_G1_NC},
1467 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0, R_AARCH64_TLSLE_MOVW_TPREL_G0},
1468 {BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
1469 R_AARCH64_TLSLE_MOVW_TPREL_G0_NC},
1470 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12, R_AARCH64_TLSLE_ADD_TPREL_LO12},
1471 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12, R_AARCH64_TLSLE_ADD_TPREL_HI12},
1472 {BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
1473 R_AARCH64_TLSLE_ADD_TPREL_LO12_NC},
1474 {BFD_RELOC_AARCH64_TLSDESC_LD64_PREL19, R_AARCH64_TLSDESC_LD64_PREL19},
1475 {BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, R_AARCH64_TLSDESC_ADR_PREL21},
1476 {BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE, R_AARCH64_TLSDESC_ADR_PAGE},
1477 {BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC, R_AARCH64_TLSDESC_ADD_LO12_NC},
1478 {BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC, R_AARCH64_TLSDESC_LD64_LO12_NC},
1479 {BFD_RELOC_AARCH64_TLSDESC_OFF_G1, R_AARCH64_TLSDESC_OFF_G1},
1480 {BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC, R_AARCH64_TLSDESC_OFF_G0_NC},
1481 {BFD_RELOC_AARCH64_TLSDESC_LDR, R_AARCH64_TLSDESC_LDR},
1482 {BFD_RELOC_AARCH64_TLSDESC_ADD, R_AARCH64_TLSDESC_ADD},
1483 {BFD_RELOC_AARCH64_TLSDESC_CALL, R_AARCH64_TLSDESC_CALL},
1484 {BFD_RELOC_AARCH64_TLS_DTPMOD64, R_AARCH64_TLS_DTPMOD64},
1485 {BFD_RELOC_AARCH64_TLS_DTPREL64, R_AARCH64_TLS_DTPREL64},
1486 {BFD_RELOC_AARCH64_TLS_TPREL64, R_AARCH64_TLS_TPREL64},
1487 {BFD_RELOC_AARCH64_TLSDESC, R_AARCH64_TLSDESC},
1490 static reloc_howto_type *
1491 elf64_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1492 bfd_reloc_code_real_type code)
1496 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_reloc_map); i++)
1497 if (elf64_aarch64_reloc_map[i].bfd_reloc_val == code)
1498 return elf64_aarch64_howto_from_type
1499 (elf64_aarch64_reloc_map[i].elf_reloc_val);
1501 bfd_set_error (bfd_error_bad_value);
1505 static reloc_howto_type *
1506 elf64_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1511 for (i = 0; i < ARRAY_SIZE (elf64_aarch64_howto_table); i++)
1512 if (elf64_aarch64_howto_table[i].name != NULL
1513 && strcasecmp (elf64_aarch64_howto_table[i].name, r_name) == 0)
1514 return &elf64_aarch64_howto_table[i];
1519 /* Support for core dump NOTE sections. */
1522 elf64_aarch64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1527 switch (note->descsz)
1532 case 408: /* sizeof(struct elf_prstatus) on Linux/arm64. */
1534 elf_tdata (abfd)->core->signal
1535 = bfd_get_16 (abfd, note->descdata + 12);
1538 elf_tdata (abfd)->core->lwpid
1539 = bfd_get_32 (abfd, note->descdata + 32);
1548 /* Make a ".reg/999" section. */
1549 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1550 size, note->descpos + offset);
1553 #define TARGET_LITTLE_SYM bfd_elf64_littleaarch64_vec
1554 #define TARGET_LITTLE_NAME "elf64-littleaarch64"
1555 #define TARGET_BIG_SYM bfd_elf64_bigaarch64_vec
1556 #define TARGET_BIG_NAME "elf64-bigaarch64"
1558 #define elf_backend_grok_prstatus elf64_aarch64_grok_prstatus
1560 typedef unsigned long int insn32;
1562 /* The linker script knows the section names for placement.
1563 The entry_names are used to do simple name mangling on the stubs.
1564 Given a function name, and its type, the stub can be found. The
1565 name can be changed. The only requirement is the %s be present. */
1566 #define STUB_ENTRY_NAME "__%s_veneer"
1568 /* The name of the dynamic interpreter. This is put in the .interp
1570 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1572 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1573 (((1 << 25) - 1) << 2)
1574 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1577 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1578 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1581 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1583 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1584 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1588 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1590 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1591 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1592 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1595 static const uint32_t aarch64_adrp_branch_stub [] =
1597 0x90000010, /* adrp ip0, X */
1598 /* R_AARCH64_ADR_HI21_PCREL(X) */
1599 0x91000210, /* add ip0, ip0, :lo12:X */
1600 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1601 0xd61f0200, /* br ip0 */
1604 static const uint32_t aarch64_long_branch_stub[] =
1606 0x58000090, /* ldr ip0, 1f */
1607 0x10000011, /* adr ip1, #0 */
1608 0x8b110210, /* add ip0, ip0, ip1 */
1609 0xd61f0200, /* br ip0 */
1610 0x00000000, /* 1: .xword
1611 R_AARCH64_PREL64(X) + 12
1616 /* Section name for stubs is the associated section name plus this
1618 #define STUB_SUFFIX ".stub"
1620 enum elf64_aarch64_stub_type
1623 aarch64_stub_adrp_branch,
1624 aarch64_stub_long_branch,
1627 struct elf64_aarch64_stub_hash_entry
1629 /* Base hash table entry structure. */
1630 struct bfd_hash_entry root;
1632 /* The stub section. */
1635 /* Offset within stub_sec of the beginning of this stub. */
1636 bfd_vma stub_offset;
1638 /* Given the symbol's value and its section we can determine its final
1639 value when building the stubs (so the stub knows where to jump). */
1640 bfd_vma target_value;
1641 asection *target_section;
1643 enum elf64_aarch64_stub_type stub_type;
1645 /* The symbol table entry, if any, that this was derived from. */
1646 struct elf64_aarch64_link_hash_entry *h;
1648 /* Destination symbol type */
1649 unsigned char st_type;
1651 /* Where this stub is being called from, or, in the case of combined
1652 stub sections, the first input section in the group. */
1655 /* The name for the local symbol at the start of this stub. The
1656 stub name in the hash table has to be unique; this does not, so
1657 it can be friendlier. */
1661 /* Used to build a map of a section. This is required for mixed-endian
1664 typedef struct elf64_elf_section_map
1669 elf64_aarch64_section_map;
1672 typedef struct _aarch64_elf_section_data
1674 struct bfd_elf_section_data elf;
1675 unsigned int mapcount;
1676 unsigned int mapsize;
1677 elf64_aarch64_section_map *map;
1679 _aarch64_elf_section_data;
1681 #define elf64_aarch64_section_data(sec) \
1682 ((_aarch64_elf_section_data *) elf_section_data (sec))
1684 /* The size of the thread control block. */
1687 struct elf_aarch64_local_symbol
1689 unsigned int got_type;
1690 bfd_signed_vma got_refcount;
1693 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1694 offset is from the end of the jump table and reserved entries
1697 The magic value (bfd_vma) -1 indicates that an offset has not be
1699 bfd_vma tlsdesc_got_jump_table_offset;
1702 struct elf_aarch64_obj_tdata
1704 struct elf_obj_tdata root;
1706 /* local symbol descriptors */
1707 struct elf_aarch64_local_symbol *locals;
1709 /* Zero to warn when linking objects with incompatible enum sizes. */
1710 int no_enum_size_warning;
1712 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1713 int no_wchar_size_warning;
1716 #define elf_aarch64_tdata(bfd) \
1717 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1719 #define elf64_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1721 #define is_aarch64_elf(bfd) \
1722 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1723 && elf_tdata (bfd) != NULL \
1724 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1727 elf64_aarch64_mkobject (bfd *abfd)
1729 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1733 #define elf64_aarch64_hash_entry(ent) \
1734 ((struct elf64_aarch64_link_hash_entry *)(ent))
1736 #define GOT_UNKNOWN 0
1737 #define GOT_NORMAL 1
1738 #define GOT_TLS_GD 2
1739 #define GOT_TLS_IE 4
1740 #define GOT_TLSDESC_GD 8
1742 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1744 /* AArch64 ELF linker hash entry. */
1745 struct elf64_aarch64_link_hash_entry
1747 struct elf_link_hash_entry root;
1749 /* Track dynamic relocs copied for this symbol. */
1750 struct elf_dyn_relocs *dyn_relocs;
1752 /* Since PLT entries have variable size, we need to record the
1753 index into .got.plt instead of recomputing it from the PLT
1755 bfd_signed_vma plt_got_offset;
1757 /* Bit mask representing the type of GOT entry(s) if any required by
1759 unsigned int got_type;
1761 /* A pointer to the most recently used stub hash entry against this
1763 struct elf64_aarch64_stub_hash_entry *stub_cache;
1765 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1766 is from the end of the jump table and reserved entries within the PLTGOT.
1768 The magic value (bfd_vma) -1 indicates that an offset has not
1770 bfd_vma tlsdesc_got_jump_table_offset;
1774 elf64_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1776 unsigned long r_symndx)
1779 return elf64_aarch64_hash_entry (h)->got_type;
1781 if (! elf64_aarch64_locals (abfd))
1784 return elf64_aarch64_locals (abfd)[r_symndx].got_type;
1787 /* Traverse an AArch64 ELF linker hash table. */
1788 #define elf64_aarch64_link_hash_traverse(table, func, info) \
1789 (elf_link_hash_traverse \
1791 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
1794 /* Get the AArch64 elf linker hash table from a link_info structure. */
1795 #define elf64_aarch64_hash_table(info) \
1796 ((struct elf64_aarch64_link_hash_table *) ((info)->hash))
1798 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1799 ((struct elf64_aarch64_stub_hash_entry *) \
1800 bfd_hash_lookup ((table), (string), (create), (copy)))
1802 /* AArch64 ELF linker hash table. */
1803 struct elf64_aarch64_link_hash_table
1805 /* The main hash table. */
1806 struct elf_link_hash_table root;
1808 /* Nonzero to force PIC branch veneers. */
1811 /* The number of bytes in the initial entry in the PLT. */
1812 bfd_size_type plt_header_size;
1814 /* The number of bytes in the subsequent PLT etries. */
1815 bfd_size_type plt_entry_size;
1817 /* Short-cuts to get to dynamic linker sections. */
1821 /* Small local sym cache. */
1822 struct sym_cache sym_cache;
1824 /* For convenience in allocate_dynrelocs. */
1827 /* The amount of space used by the reserved portion of the sgotplt
1828 section, plus whatever space is used by the jump slots. */
1829 bfd_vma sgotplt_jump_table_size;
1831 /* The stub hash table. */
1832 struct bfd_hash_table stub_hash_table;
1834 /* Linker stub bfd. */
1837 /* Linker call-backs. */
1838 asection *(*add_stub_section) (const char *, asection *);
1839 void (*layout_sections_again) (void);
1841 /* Array to keep track of which stub sections have been created, and
1842 information on stub grouping. */
1845 /* This is the section to which stubs in the group will be
1848 /* The stub section. */
1852 /* Assorted information used by elf64_aarch64_size_stubs. */
1853 unsigned int bfd_count;
1855 asection **input_list;
1857 /* The offset into splt of the PLT entry for the TLS descriptor
1858 resolver. Special values are 0, if not necessary (or not found
1859 to be necessary yet), and -1 if needed but not determined
1861 bfd_vma tlsdesc_plt;
1863 /* The GOT offset for the lazy trampoline. Communicated to the
1864 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1865 indicates an offset is not allocated. */
1866 bfd_vma dt_tlsdesc_got;
1870 /* Return non-zero if the indicated VALUE has overflowed the maximum
1871 range expressible by a unsigned number with the indicated number of
1874 static bfd_reloc_status_type
1875 aarch64_unsigned_overflow (bfd_vma value, unsigned int bits)
1878 if (bits >= sizeof (bfd_vma) * 8)
1879 return bfd_reloc_ok;
1880 lim = (bfd_vma) 1 << bits;
1882 return bfd_reloc_overflow;
1883 return bfd_reloc_ok;
1887 /* Return non-zero if the indicated VALUE has overflowed the maximum
1888 range expressible by an signed number with the indicated number of
1891 static bfd_reloc_status_type
1892 aarch64_signed_overflow (bfd_vma value, unsigned int bits)
1894 bfd_signed_vma svalue = (bfd_signed_vma) value;
1897 if (bits >= sizeof (bfd_vma) * 8)
1898 return bfd_reloc_ok;
1899 lim = (bfd_signed_vma) 1 << (bits - 1);
1900 if (svalue < -lim || svalue >= lim)
1901 return bfd_reloc_overflow;
1902 return bfd_reloc_ok;
1905 /* Create an entry in an AArch64 ELF linker hash table. */
1907 static struct bfd_hash_entry *
1908 elf64_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1909 struct bfd_hash_table *table,
1912 struct elf64_aarch64_link_hash_entry *ret =
1913 (struct elf64_aarch64_link_hash_entry *) entry;
1915 /* Allocate the structure if it has not already been allocated by a
1918 ret = bfd_hash_allocate (table,
1919 sizeof (struct elf64_aarch64_link_hash_entry));
1921 return (struct bfd_hash_entry *) ret;
1923 /* Call the allocation method of the superclass. */
1924 ret = ((struct elf64_aarch64_link_hash_entry *)
1925 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1929 ret->dyn_relocs = NULL;
1930 ret->got_type = GOT_UNKNOWN;
1931 ret->plt_got_offset = (bfd_vma) - 1;
1932 ret->stub_cache = NULL;
1933 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1936 return (struct bfd_hash_entry *) ret;
1939 /* Initialize an entry in the stub hash table. */
1941 static struct bfd_hash_entry *
1942 stub_hash_newfunc (struct bfd_hash_entry *entry,
1943 struct bfd_hash_table *table, const char *string)
1945 /* Allocate the structure if it has not already been allocated by a
1949 entry = bfd_hash_allocate (table,
1951 elf64_aarch64_stub_hash_entry));
1956 /* Call the allocation method of the superclass. */
1957 entry = bfd_hash_newfunc (entry, table, string);
1960 struct elf64_aarch64_stub_hash_entry *eh;
1962 /* Initialize the local fields. */
1963 eh = (struct elf64_aarch64_stub_hash_entry *) entry;
1964 eh->stub_sec = NULL;
1965 eh->stub_offset = 0;
1966 eh->target_value = 0;
1967 eh->target_section = NULL;
1968 eh->stub_type = aarch64_stub_none;
1977 /* Copy the extra info we tack onto an elf_link_hash_entry. */
1980 elf64_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
1981 struct elf_link_hash_entry *dir,
1982 struct elf_link_hash_entry *ind)
1984 struct elf64_aarch64_link_hash_entry *edir, *eind;
1986 edir = (struct elf64_aarch64_link_hash_entry *) dir;
1987 eind = (struct elf64_aarch64_link_hash_entry *) ind;
1989 if (eind->dyn_relocs != NULL)
1991 if (edir->dyn_relocs != NULL)
1993 struct elf_dyn_relocs **pp;
1994 struct elf_dyn_relocs *p;
1996 /* Add reloc counts against the indirect sym to the direct sym
1997 list. Merge any entries against the same section. */
1998 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2000 struct elf_dyn_relocs *q;
2002 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2003 if (q->sec == p->sec)
2005 q->pc_count += p->pc_count;
2006 q->count += p->count;
2013 *pp = edir->dyn_relocs;
2016 edir->dyn_relocs = eind->dyn_relocs;
2017 eind->dyn_relocs = NULL;
2020 if (ind->root.type == bfd_link_hash_indirect)
2022 /* Copy over PLT info. */
2023 if (dir->got.refcount <= 0)
2025 edir->got_type = eind->got_type;
2026 eind->got_type = GOT_UNKNOWN;
2030 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2033 /* Create an AArch64 elf linker hash table. */
2035 static struct bfd_link_hash_table *
2036 elf64_aarch64_link_hash_table_create (bfd *abfd)
2038 struct elf64_aarch64_link_hash_table *ret;
2039 bfd_size_type amt = sizeof (struct elf64_aarch64_link_hash_table);
2041 ret = bfd_zmalloc (amt);
2045 if (!_bfd_elf_link_hash_table_init
2046 (&ret->root, abfd, elf64_aarch64_link_hash_newfunc,
2047 sizeof (struct elf64_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2053 ret->plt_header_size = PLT_ENTRY_SIZE;
2054 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2056 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2058 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2059 sizeof (struct elf64_aarch64_stub_hash_entry)))
2065 return &ret->root.root;
2068 /* Free the derived linker hash table. */
2071 elf64_aarch64_hash_table_free (struct bfd_link_hash_table *hash)
2073 struct elf64_aarch64_link_hash_table *ret
2074 = (struct elf64_aarch64_link_hash_table *) hash;
2076 bfd_hash_table_free (&ret->stub_hash_table);
2077 _bfd_elf_link_hash_table_free (hash);
2081 aarch64_resolve_relocation (unsigned int r_type, bfd_vma place, bfd_vma value,
2082 bfd_vma addend, bfd_boolean weak_undef_p)
2086 case R_AARCH64_TLSDESC_CALL:
2087 case R_AARCH64_NONE:
2088 case R_AARCH64_NULL:
2091 case R_AARCH64_ADR_PREL_LO21:
2092 case R_AARCH64_CONDBR19:
2093 case R_AARCH64_LD_PREL_LO19:
2094 case R_AARCH64_PREL16:
2095 case R_AARCH64_PREL32:
2096 case R_AARCH64_PREL64:
2097 case R_AARCH64_TSTBR14:
2100 value = value + addend - place;
2103 case R_AARCH64_CALL26:
2104 case R_AARCH64_JUMP26:
2105 value = value + addend - place;
2108 case R_AARCH64_ABS16:
2109 case R_AARCH64_ABS32:
2110 case R_AARCH64_MOVW_SABS_G0:
2111 case R_AARCH64_MOVW_SABS_G1:
2112 case R_AARCH64_MOVW_SABS_G2:
2113 case R_AARCH64_MOVW_UABS_G0:
2114 case R_AARCH64_MOVW_UABS_G0_NC:
2115 case R_AARCH64_MOVW_UABS_G1:
2116 case R_AARCH64_MOVW_UABS_G1_NC:
2117 case R_AARCH64_MOVW_UABS_G2:
2118 case R_AARCH64_MOVW_UABS_G2_NC:
2119 case R_AARCH64_MOVW_UABS_G3:
2120 value = value + addend;
2123 case R_AARCH64_ADR_PREL_PG_HI21:
2124 case R_AARCH64_ADR_PREL_PG_HI21_NC:
2127 value = PG (value + addend) - PG (place);
2130 case R_AARCH64_GOT_LD_PREL19:
2131 value = value + addend - place;
2134 case R_AARCH64_ADR_GOT_PAGE:
2135 case R_AARCH64_TLSDESC_ADR_PAGE:
2136 case R_AARCH64_TLSGD_ADR_PAGE21:
2137 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
2138 value = PG (value + addend) - PG (place);
2141 case R_AARCH64_ADD_ABS_LO12_NC:
2142 case R_AARCH64_LD64_GOT_LO12_NC:
2143 case R_AARCH64_LDST8_ABS_LO12_NC:
2144 case R_AARCH64_LDST16_ABS_LO12_NC:
2145 case R_AARCH64_LDST32_ABS_LO12_NC:
2146 case R_AARCH64_LDST64_ABS_LO12_NC:
2147 case R_AARCH64_LDST128_ABS_LO12_NC:
2148 case R_AARCH64_TLSDESC_ADD_LO12_NC:
2149 case R_AARCH64_TLSDESC_ADD:
2150 case R_AARCH64_TLSDESC_LD64_LO12_NC:
2151 case R_AARCH64_TLSDESC_LDR:
2152 case R_AARCH64_TLSGD_ADD_LO12_NC:
2153 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
2154 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
2155 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
2156 value = PG_OFFSET (value + addend);
2159 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
2160 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
2161 value = (value + addend) & (bfd_vma) 0xffff0000;
2163 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
2164 value = (value + addend) & (bfd_vma) 0xfff000;
2167 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
2168 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
2169 value = (value + addend) & (bfd_vma) 0xffff;
2172 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
2173 value = (value + addend) & ~(bfd_vma) 0xffffffff;
2174 value -= place & ~(bfd_vma) 0xffffffff;
2181 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2182 bfd_vma offset, bfd_vma value)
2184 reloc_howto_type *howto;
2187 howto = elf64_aarch64_howto_from_type (r_type);
2188 place = (input_section->output_section->vma + input_section->output_offset
2190 value = aarch64_resolve_relocation (r_type, place, value, 0, FALSE);
2191 return bfd_elf_aarch64_put_addend (input_bfd,
2192 input_section->contents + offset,
2196 static enum elf64_aarch64_stub_type
2197 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2199 if (aarch64_valid_for_adrp_p (value, place))
2200 return aarch64_stub_adrp_branch;
2201 return aarch64_stub_long_branch;
2204 /* Determine the type of stub needed, if any, for a call. */
2206 static enum elf64_aarch64_stub_type
2207 aarch64_type_of_stub (struct bfd_link_info *info,
2208 asection *input_sec,
2209 const Elf_Internal_Rela *rel,
2210 unsigned char st_type,
2211 struct elf64_aarch64_link_hash_entry *hash,
2212 bfd_vma destination)
2215 bfd_signed_vma branch_offset;
2216 unsigned int r_type;
2217 struct elf64_aarch64_link_hash_table *globals;
2218 enum elf64_aarch64_stub_type stub_type = aarch64_stub_none;
2219 bfd_boolean via_plt_p;
2221 if (st_type != STT_FUNC)
2224 globals = elf64_aarch64_hash_table (info);
2225 via_plt_p = (globals->root.splt != NULL && hash != NULL
2226 && hash->root.plt.offset != (bfd_vma) - 1);
2231 /* Determine where the call point is. */
2232 location = (input_sec->output_offset
2233 + input_sec->output_section->vma + rel->r_offset);
2235 branch_offset = (bfd_signed_vma) (destination - location);
2237 r_type = ELF64_R_TYPE (rel->r_info);
2239 /* We don't want to redirect any old unconditional jump in this way,
2240 only one which is being used for a sibcall, where it is
2241 acceptable for the IP0 and IP1 registers to be clobbered. */
2242 if ((r_type == R_AARCH64_CALL26 || r_type == R_AARCH64_JUMP26)
2243 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2244 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2246 stub_type = aarch64_stub_long_branch;
2252 /* Build a name for an entry in the stub hash table. */
2255 elf64_aarch64_stub_name (const asection *input_section,
2256 const asection *sym_sec,
2257 const struct elf64_aarch64_link_hash_entry *hash,
2258 const Elf_Internal_Rela *rel)
2265 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2266 stub_name = bfd_malloc (len);
2267 if (stub_name != NULL)
2268 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2269 (unsigned int) input_section->id,
2270 hash->root.root.root.string,
2275 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2276 stub_name = bfd_malloc (len);
2277 if (stub_name != NULL)
2278 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2279 (unsigned int) input_section->id,
2280 (unsigned int) sym_sec->id,
2281 (unsigned int) ELF64_R_SYM (rel->r_info),
2288 /* Look up an entry in the stub hash. Stub entries are cached because
2289 creating the stub name takes a bit of time. */
2291 static struct elf64_aarch64_stub_hash_entry *
2292 elf64_aarch64_get_stub_entry (const asection *input_section,
2293 const asection *sym_sec,
2294 struct elf_link_hash_entry *hash,
2295 const Elf_Internal_Rela *rel,
2296 struct elf64_aarch64_link_hash_table *htab)
2298 struct elf64_aarch64_stub_hash_entry *stub_entry;
2299 struct elf64_aarch64_link_hash_entry *h =
2300 (struct elf64_aarch64_link_hash_entry *) hash;
2301 const asection *id_sec;
2303 if ((input_section->flags & SEC_CODE) == 0)
2306 /* If this input section is part of a group of sections sharing one
2307 stub section, then use the id of the first section in the group.
2308 Stub names need to include a section id, as there may well be
2309 more than one stub used to reach say, printf, and we need to
2310 distinguish between them. */
2311 id_sec = htab->stub_group[input_section->id].link_sec;
2313 if (h != NULL && h->stub_cache != NULL
2314 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2316 stub_entry = h->stub_cache;
2322 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, h, rel);
2323 if (stub_name == NULL)
2326 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2327 stub_name, FALSE, FALSE);
2329 h->stub_cache = stub_entry;
2337 /* Add a new stub entry to the stub hash. Not all fields of the new
2338 stub entry are initialised. */
2340 static struct elf64_aarch64_stub_hash_entry *
2341 elf64_aarch64_add_stub (const char *stub_name,
2343 struct elf64_aarch64_link_hash_table *htab)
2347 struct elf64_aarch64_stub_hash_entry *stub_entry;
2349 link_sec = htab->stub_group[section->id].link_sec;
2350 stub_sec = htab->stub_group[section->id].stub_sec;
2351 if (stub_sec == NULL)
2353 stub_sec = htab->stub_group[link_sec->id].stub_sec;
2354 if (stub_sec == NULL)
2360 namelen = strlen (link_sec->name);
2361 len = namelen + sizeof (STUB_SUFFIX);
2362 s_name = bfd_alloc (htab->stub_bfd, len);
2366 memcpy (s_name, link_sec->name, namelen);
2367 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2368 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
2369 if (stub_sec == NULL)
2371 htab->stub_group[link_sec->id].stub_sec = stub_sec;
2373 htab->stub_group[section->id].stub_sec = stub_sec;
2376 /* Enter this entry into the linker stub hash table. */
2377 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2379 if (stub_entry == NULL)
2381 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2382 section->owner, stub_name);
2386 stub_entry->stub_sec = stub_sec;
2387 stub_entry->stub_offset = 0;
2388 stub_entry->id_sec = link_sec;
2394 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2395 void *in_arg ATTRIBUTE_UNUSED)
2397 struct elf64_aarch64_stub_hash_entry *stub_entry;
2402 unsigned int template_size;
2403 const uint32_t *template;
2406 /* Massage our args to the form they really have. */
2407 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2409 stub_sec = stub_entry->stub_sec;
2411 /* Make a note of the offset within the stubs for this entry. */
2412 stub_entry->stub_offset = stub_sec->size;
2413 loc = stub_sec->contents + stub_entry->stub_offset;
2415 stub_bfd = stub_sec->owner;
2417 /* This is the address of the stub destination. */
2418 sym_value = (stub_entry->target_value
2419 + stub_entry->target_section->output_offset
2420 + stub_entry->target_section->output_section->vma);
2422 if (stub_entry->stub_type == aarch64_stub_long_branch)
2424 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2425 + stub_sec->output_offset);
2427 /* See if we can relax the stub. */
2428 if (aarch64_valid_for_adrp_p (sym_value, place))
2429 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2432 switch (stub_entry->stub_type)
2434 case aarch64_stub_adrp_branch:
2435 template = aarch64_adrp_branch_stub;
2436 template_size = sizeof (aarch64_adrp_branch_stub);
2438 case aarch64_stub_long_branch:
2439 template = aarch64_long_branch_stub;
2440 template_size = sizeof (aarch64_long_branch_stub);
2447 for (i = 0; i < (template_size / sizeof template[0]); i++)
2449 bfd_putl32 (template[i], loc);
2453 template_size = (template_size + 7) & ~7;
2454 stub_sec->size += template_size;
2456 switch (stub_entry->stub_type)
2458 case aarch64_stub_adrp_branch:
2459 if (aarch64_relocate (R_AARCH64_ADR_PREL_PG_HI21, stub_bfd, stub_sec,
2460 stub_entry->stub_offset, sym_value))
2461 /* The stub would not have been relaxed if the offset was out
2465 _bfd_final_link_relocate
2466 (elf64_aarch64_howto_from_type (R_AARCH64_ADD_ABS_LO12_NC),
2470 stub_entry->stub_offset + 4,
2475 case aarch64_stub_long_branch:
2476 /* We want the value relative to the address 12 bytes back from the
2478 _bfd_final_link_relocate (elf64_aarch64_howto_from_type
2479 (R_AARCH64_PREL64), stub_bfd, stub_sec,
2481 stub_entry->stub_offset + 16,
2491 /* As above, but don't actually build the stub. Just bump offset so
2492 we know stub section sizes. */
2495 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2496 void *in_arg ATTRIBUTE_UNUSED)
2498 struct elf64_aarch64_stub_hash_entry *stub_entry;
2501 /* Massage our args to the form they really have. */
2502 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
2504 switch (stub_entry->stub_type)
2506 case aarch64_stub_adrp_branch:
2507 size = sizeof (aarch64_adrp_branch_stub);
2509 case aarch64_stub_long_branch:
2510 size = sizeof (aarch64_long_branch_stub);
2518 size = (size + 7) & ~7;
2519 stub_entry->stub_sec->size += size;
2523 /* External entry points for sizing and building linker stubs. */
2525 /* Set up various things so that we can make a list of input sections
2526 for each output section included in the link. Returns -1 on error,
2527 0 when no stubs will be needed, and 1 on success. */
2530 elf64_aarch64_setup_section_lists (bfd *output_bfd,
2531 struct bfd_link_info *info)
2534 unsigned int bfd_count;
2535 int top_id, top_index;
2537 asection **input_list, **list;
2539 struct elf64_aarch64_link_hash_table *htab =
2540 elf64_aarch64_hash_table (info);
2542 if (!is_elf_hash_table (htab))
2545 /* Count the number of input BFDs and find the top input section id. */
2546 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2547 input_bfd != NULL; input_bfd = input_bfd->link_next)
2550 for (section = input_bfd->sections;
2551 section != NULL; section = section->next)
2553 if (top_id < section->id)
2554 top_id = section->id;
2557 htab->bfd_count = bfd_count;
2559 amt = sizeof (struct map_stub) * (top_id + 1);
2560 htab->stub_group = bfd_zmalloc (amt);
2561 if (htab->stub_group == NULL)
2564 /* We can't use output_bfd->section_count here to find the top output
2565 section index as some sections may have been removed, and
2566 _bfd_strip_section_from_output doesn't renumber the indices. */
2567 for (section = output_bfd->sections, top_index = 0;
2568 section != NULL; section = section->next)
2570 if (top_index < section->index)
2571 top_index = section->index;
2574 htab->top_index = top_index;
2575 amt = sizeof (asection *) * (top_index + 1);
2576 input_list = bfd_malloc (amt);
2577 htab->input_list = input_list;
2578 if (input_list == NULL)
2581 /* For sections we aren't interested in, mark their entries with a
2582 value we can check later. */
2583 list = input_list + top_index;
2585 *list = bfd_abs_section_ptr;
2586 while (list-- != input_list);
2588 for (section = output_bfd->sections;
2589 section != NULL; section = section->next)
2591 if ((section->flags & SEC_CODE) != 0)
2592 input_list[section->index] = NULL;
2598 /* Used by elf64_aarch64_next_input_section and group_sections. */
2599 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2601 /* The linker repeatedly calls this function for each input section,
2602 in the order that input sections are linked into output sections.
2603 Build lists of input sections to determine groupings between which
2604 we may insert linker stubs. */
2607 elf64_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2609 struct elf64_aarch64_link_hash_table *htab =
2610 elf64_aarch64_hash_table (info);
2612 if (isec->output_section->index <= htab->top_index)
2614 asection **list = htab->input_list + isec->output_section->index;
2616 if (*list != bfd_abs_section_ptr)
2618 /* Steal the link_sec pointer for our list. */
2619 /* This happens to make the list in reverse order,
2620 which is what we want. */
2621 PREV_SEC (isec) = *list;
2627 /* See whether we can group stub sections together. Grouping stub
2628 sections may result in fewer stubs. More importantly, we need to
2629 put all .init* and .fini* stubs at the beginning of the .init or
2630 .fini output sections respectively, because glibc splits the
2631 _init and _fini functions into multiple parts. Putting a stub in
2632 the middle of a function is not a good idea. */
2635 group_sections (struct elf64_aarch64_link_hash_table *htab,
2636 bfd_size_type stub_group_size,
2637 bfd_boolean stubs_always_before_branch)
2639 asection **list = htab->input_list + htab->top_index;
2643 asection *tail = *list;
2645 if (tail == bfd_abs_section_ptr)
2648 while (tail != NULL)
2652 bfd_size_type total;
2656 while ((prev = PREV_SEC (curr)) != NULL
2657 && ((total += curr->output_offset - prev->output_offset)
2661 /* OK, the size from the start of CURR to the end is less
2662 than stub_group_size and thus can be handled by one stub
2663 section. (Or the tail section is itself larger than
2664 stub_group_size, in which case we may be toast.)
2665 We should really be keeping track of the total size of
2666 stubs added here, as stubs contribute to the final output
2670 prev = PREV_SEC (tail);
2671 /* Set up this stub group. */
2672 htab->stub_group[tail->id].link_sec = curr;
2674 while (tail != curr && (tail = prev) != NULL);
2676 /* But wait, there's more! Input sections up to stub_group_size
2677 bytes before the stub section can be handled by it too. */
2678 if (!stubs_always_before_branch)
2682 && ((total += tail->output_offset - prev->output_offset)
2686 prev = PREV_SEC (tail);
2687 htab->stub_group[tail->id].link_sec = curr;
2693 while (list-- != htab->input_list);
2695 free (htab->input_list);
2700 /* Determine and set the size of the stub section for a final link.
2702 The basic idea here is to examine all the relocations looking for
2703 PC-relative calls to a target that is unreachable with a "bl"
2707 elf64_aarch64_size_stubs (bfd *output_bfd,
2709 struct bfd_link_info *info,
2710 bfd_signed_vma group_size,
2711 asection * (*add_stub_section) (const char *,
2713 void (*layout_sections_again) (void))
2715 bfd_size_type stub_group_size;
2716 bfd_boolean stubs_always_before_branch;
2717 bfd_boolean stub_changed = 0;
2718 struct elf64_aarch64_link_hash_table *htab = elf64_aarch64_hash_table (info);
2720 /* Propagate mach to stub bfd, because it may not have been
2721 finalized when we created stub_bfd. */
2722 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
2723 bfd_get_mach (output_bfd));
2725 /* Stash our params away. */
2726 htab->stub_bfd = stub_bfd;
2727 htab->add_stub_section = add_stub_section;
2728 htab->layout_sections_again = layout_sections_again;
2729 stubs_always_before_branch = group_size < 0;
2731 stub_group_size = -group_size;
2733 stub_group_size = group_size;
2735 if (stub_group_size == 1)
2737 /* Default values. */
2738 /* Aarch64 branch range is +-128MB. The value used is 1MB less. */
2739 stub_group_size = 127 * 1024 * 1024;
2742 group_sections (htab, stub_group_size, stubs_always_before_branch);
2747 unsigned int bfd_indx;
2750 for (input_bfd = info->input_bfds, bfd_indx = 0;
2751 input_bfd != NULL; input_bfd = input_bfd->link_next, bfd_indx++)
2753 Elf_Internal_Shdr *symtab_hdr;
2755 Elf_Internal_Sym *local_syms = NULL;
2757 /* We'll need the symbol table in a second. */
2758 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
2759 if (symtab_hdr->sh_info == 0)
2762 /* Walk over each section attached to the input bfd. */
2763 for (section = input_bfd->sections;
2764 section != NULL; section = section->next)
2766 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2768 /* If there aren't any relocs, then there's nothing more
2770 if ((section->flags & SEC_RELOC) == 0
2771 || section->reloc_count == 0
2772 || (section->flags & SEC_CODE) == 0)
2775 /* If this section is a link-once section that will be
2776 discarded, then don't create any stubs. */
2777 if (section->output_section == NULL
2778 || section->output_section->owner != output_bfd)
2781 /* Get the relocs. */
2783 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
2784 NULL, info->keep_memory);
2785 if (internal_relocs == NULL)
2786 goto error_ret_free_local;
2788 /* Now examine each relocation. */
2789 irela = internal_relocs;
2790 irelaend = irela + section->reloc_count;
2791 for (; irela < irelaend; irela++)
2793 unsigned int r_type, r_indx;
2794 enum elf64_aarch64_stub_type stub_type;
2795 struct elf64_aarch64_stub_hash_entry *stub_entry;
2798 bfd_vma destination;
2799 struct elf64_aarch64_link_hash_entry *hash;
2800 const char *sym_name;
2802 const asection *id_sec;
2803 unsigned char st_type;
2806 r_type = ELF64_R_TYPE (irela->r_info);
2807 r_indx = ELF64_R_SYM (irela->r_info);
2809 if (r_type >= (unsigned int) R_AARCH64_end)
2811 bfd_set_error (bfd_error_bad_value);
2812 error_ret_free_internal:
2813 if (elf_section_data (section)->relocs == NULL)
2814 free (internal_relocs);
2815 goto error_ret_free_local;
2818 /* Only look for stubs on unconditional branch and
2819 branch and link instructions. */
2820 if (r_type != (unsigned int) R_AARCH64_CALL26
2821 && r_type != (unsigned int) R_AARCH64_JUMP26)
2824 /* Now determine the call target, its name, value,
2831 if (r_indx < symtab_hdr->sh_info)
2833 /* It's a local symbol. */
2834 Elf_Internal_Sym *sym;
2835 Elf_Internal_Shdr *hdr;
2837 if (local_syms == NULL)
2840 = (Elf_Internal_Sym *) symtab_hdr->contents;
2841 if (local_syms == NULL)
2843 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
2844 symtab_hdr->sh_info, 0,
2846 if (local_syms == NULL)
2847 goto error_ret_free_internal;
2850 sym = local_syms + r_indx;
2851 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
2852 sym_sec = hdr->bfd_section;
2854 /* This is an undefined symbol. It can never
2858 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
2859 sym_value = sym->st_value;
2860 destination = (sym_value + irela->r_addend
2861 + sym_sec->output_offset
2862 + sym_sec->output_section->vma);
2863 st_type = ELF_ST_TYPE (sym->st_info);
2865 = bfd_elf_string_from_elf_section (input_bfd,
2866 symtab_hdr->sh_link,
2873 e_indx = r_indx - symtab_hdr->sh_info;
2874 hash = ((struct elf64_aarch64_link_hash_entry *)
2875 elf_sym_hashes (input_bfd)[e_indx]);
2877 while (hash->root.root.type == bfd_link_hash_indirect
2878 || hash->root.root.type == bfd_link_hash_warning)
2879 hash = ((struct elf64_aarch64_link_hash_entry *)
2880 hash->root.root.u.i.link);
2882 if (hash->root.root.type == bfd_link_hash_defined
2883 || hash->root.root.type == bfd_link_hash_defweak)
2885 struct elf64_aarch64_link_hash_table *globals =
2886 elf64_aarch64_hash_table (info);
2887 sym_sec = hash->root.root.u.def.section;
2888 sym_value = hash->root.root.u.def.value;
2889 /* For a destination in a shared library,
2890 use the PLT stub as target address to
2891 decide whether a branch stub is
2893 if (globals->root.splt != NULL && hash != NULL
2894 && hash->root.plt.offset != (bfd_vma) - 1)
2896 sym_sec = globals->root.splt;
2897 sym_value = hash->root.plt.offset;
2898 if (sym_sec->output_section != NULL)
2899 destination = (sym_value
2900 + sym_sec->output_offset
2902 sym_sec->output_section->vma);
2904 else if (sym_sec->output_section != NULL)
2905 destination = (sym_value + irela->r_addend
2906 + sym_sec->output_offset
2907 + sym_sec->output_section->vma);
2909 else if (hash->root.root.type == bfd_link_hash_undefined
2910 || (hash->root.root.type
2911 == bfd_link_hash_undefweak))
2913 /* For a shared library, use the PLT stub as
2914 target address to decide whether a long
2915 branch stub is needed.
2916 For absolute code, they cannot be handled. */
2917 struct elf64_aarch64_link_hash_table *globals =
2918 elf64_aarch64_hash_table (info);
2920 if (globals->root.splt != NULL && hash != NULL
2921 && hash->root.plt.offset != (bfd_vma) - 1)
2923 sym_sec = globals->root.splt;
2924 sym_value = hash->root.plt.offset;
2925 if (sym_sec->output_section != NULL)
2926 destination = (sym_value
2927 + sym_sec->output_offset
2929 sym_sec->output_section->vma);
2936 bfd_set_error (bfd_error_bad_value);
2937 goto error_ret_free_internal;
2939 st_type = ELF_ST_TYPE (hash->root.type);
2940 sym_name = hash->root.root.root.string;
2943 /* Determine what (if any) linker stub is needed. */
2944 stub_type = aarch64_type_of_stub
2945 (info, section, irela, st_type, hash, destination);
2946 if (stub_type == aarch64_stub_none)
2949 /* Support for grouping stub sections. */
2950 id_sec = htab->stub_group[section->id].link_sec;
2952 /* Get the name of this stub. */
2953 stub_name = elf64_aarch64_stub_name (id_sec, sym_sec, hash,
2956 goto error_ret_free_internal;
2959 aarch64_stub_hash_lookup (&htab->stub_hash_table,
2960 stub_name, FALSE, FALSE);
2961 if (stub_entry != NULL)
2963 /* The proper stub has already been created. */
2968 stub_entry = elf64_aarch64_add_stub (stub_name, section,
2970 if (stub_entry == NULL)
2973 goto error_ret_free_internal;
2976 stub_entry->target_value = sym_value;
2977 stub_entry->target_section = sym_sec;
2978 stub_entry->stub_type = stub_type;
2979 stub_entry->h = hash;
2980 stub_entry->st_type = st_type;
2982 if (sym_name == NULL)
2983 sym_name = "unnamed";
2984 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
2985 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
2986 if (stub_entry->output_name == NULL)
2989 goto error_ret_free_internal;
2992 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
2995 stub_changed = TRUE;
2998 /* We're done with the internal relocs, free them. */
2999 if (elf_section_data (section)->relocs == NULL)
3000 free (internal_relocs);
3007 /* OK, we've added some stubs. Find out the new size of the
3009 for (stub_sec = htab->stub_bfd->sections;
3010 stub_sec != NULL; stub_sec = stub_sec->next)
3013 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3015 /* Ask the linker to do its stuff. */
3016 (*htab->layout_sections_again) ();
3017 stub_changed = FALSE;
3022 error_ret_free_local:
3026 /* Build all the stubs associated with the current output file. The
3027 stubs are kept in a hash table attached to the main linker hash
3028 table. We also set up the .plt entries for statically linked PIC
3029 functions here. This function is called via aarch64_elf_finish in the
3033 elf64_aarch64_build_stubs (struct bfd_link_info *info)
3036 struct bfd_hash_table *table;
3037 struct elf64_aarch64_link_hash_table *htab;
3039 htab = elf64_aarch64_hash_table (info);
3041 for (stub_sec = htab->stub_bfd->sections;
3042 stub_sec != NULL; stub_sec = stub_sec->next)
3046 /* Ignore non-stub sections. */
3047 if (!strstr (stub_sec->name, STUB_SUFFIX))
3050 /* Allocate memory to hold the linker stubs. */
3051 size = stub_sec->size;
3052 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3053 if (stub_sec->contents == NULL && size != 0)
3058 /* Build the stubs as directed by the stub hash table. */
3059 table = &htab->stub_hash_table;
3060 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3066 /* Add an entry to the code/data map for section SEC. */
3069 elf64_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3071 struct _aarch64_elf_section_data *sec_data =
3072 elf64_aarch64_section_data (sec);
3073 unsigned int newidx;
3075 if (sec_data->map == NULL)
3077 sec_data->map = bfd_malloc (sizeof (elf64_aarch64_section_map));
3078 sec_data->mapcount = 0;
3079 sec_data->mapsize = 1;
3082 newidx = sec_data->mapcount++;
3084 if (sec_data->mapcount > sec_data->mapsize)
3086 sec_data->mapsize *= 2;
3087 sec_data->map = bfd_realloc_or_free
3088 (sec_data->map, sec_data->mapsize * sizeof (elf64_aarch64_section_map));
3093 sec_data->map[newidx].vma = vma;
3094 sec_data->map[newidx].type = type;
3099 /* Initialise maps of insn/data for input BFDs. */
3101 bfd_elf64_aarch64_init_maps (bfd *abfd)
3103 Elf_Internal_Sym *isymbuf;
3104 Elf_Internal_Shdr *hdr;
3105 unsigned int i, localsyms;
3107 /* Make sure that we are dealing with an AArch64 elf binary. */
3108 if (!is_aarch64_elf (abfd))
3111 if ((abfd->flags & DYNAMIC) != 0)
3114 hdr = &elf_symtab_hdr (abfd);
3115 localsyms = hdr->sh_info;
3117 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3118 should contain the number of local symbols, which should come before any
3119 global symbols. Mapping symbols are always local. */
3120 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3122 /* No internal symbols read? Skip this BFD. */
3123 if (isymbuf == NULL)
3126 for (i = 0; i < localsyms; i++)
3128 Elf_Internal_Sym *isym = &isymbuf[i];
3129 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3132 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3134 name = bfd_elf_string_from_elf_section (abfd,
3138 if (bfd_is_aarch64_special_symbol_name
3139 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3140 elf64_aarch64_section_map_add (sec, name[1], isym->st_value);
3145 /* Set option values needed during linking. */
3147 bfd_elf64_aarch64_set_options (struct bfd *output_bfd,
3148 struct bfd_link_info *link_info,
3150 int no_wchar_warn, int pic_veneer)
3152 struct elf64_aarch64_link_hash_table *globals;
3154 globals = elf64_aarch64_hash_table (link_info);
3155 globals->pic_veneer = pic_veneer;
3157 BFD_ASSERT (is_aarch64_elf (output_bfd));
3158 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3159 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3162 #define MASK(n) ((1u << (n)) - 1)
3164 /* Decode the 26-bit offset of unconditional branch. */
3165 static inline uint32_t
3166 decode_branch_ofs_26 (uint32_t insn)
3168 return insn & MASK (26);
3171 /* Decode the 19-bit offset of conditional branch and compare & branch. */
3172 static inline uint32_t
3173 decode_cond_branch_ofs_19 (uint32_t insn)
3175 return (insn >> 5) & MASK (19);
3178 /* Decode the 19-bit offset of load literal. */
3179 static inline uint32_t
3180 decode_ld_lit_ofs_19 (uint32_t insn)
3182 return (insn >> 5) & MASK (19);
3185 /* Decode the 14-bit offset of test & branch. */
3186 static inline uint32_t
3187 decode_tst_branch_ofs_14 (uint32_t insn)
3189 return (insn >> 5) & MASK (14);
3192 /* Decode the 16-bit imm of move wide. */
3193 static inline uint32_t
3194 decode_movw_imm (uint32_t insn)
3196 return (insn >> 5) & MASK (16);
3199 /* Decode the 21-bit imm of adr. */
3200 static inline uint32_t
3201 decode_adr_imm (uint32_t insn)
3203 return ((insn >> 29) & MASK (2)) | ((insn >> 3) & (MASK (19) << 2));
3206 /* Decode the 12-bit imm of add immediate. */
3207 static inline uint32_t
3208 decode_add_imm (uint32_t insn)
3210 return (insn >> 10) & MASK (12);
3214 /* Encode the 26-bit offset of unconditional branch. */
3215 static inline uint32_t
3216 reencode_branch_ofs_26 (uint32_t insn, uint32_t ofs)
3218 return (insn & ~MASK (26)) | (ofs & MASK (26));
3221 /* Encode the 19-bit offset of conditional branch and compare & branch. */
3222 static inline uint32_t
3223 reencode_cond_branch_ofs_19 (uint32_t insn, uint32_t ofs)
3225 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3228 /* Decode the 19-bit offset of load literal. */
3229 static inline uint32_t
3230 reencode_ld_lit_ofs_19 (uint32_t insn, uint32_t ofs)
3232 return (insn & ~(MASK (19) << 5)) | ((ofs & MASK (19)) << 5);
3235 /* Encode the 14-bit offset of test & branch. */
3236 static inline uint32_t
3237 reencode_tst_branch_ofs_14 (uint32_t insn, uint32_t ofs)
3239 return (insn & ~(MASK (14) << 5)) | ((ofs & MASK (14)) << 5);
3242 /* Reencode the imm field of move wide. */
3243 static inline uint32_t
3244 reencode_movw_imm (uint32_t insn, uint32_t imm)
3246 return (insn & ~(MASK (16) << 5)) | ((imm & MASK (16)) << 5);
3249 /* Reencode the imm field of adr. */
3250 static inline uint32_t
3251 reencode_adr_imm (uint32_t insn, uint32_t imm)
3253 return (insn & ~((MASK (2) << 29) | (MASK (19) << 5)))
3254 | ((imm & MASK (2)) << 29) | ((imm & (MASK (19) << 2)) << 3);
3257 /* Reencode the imm field of ld/st pos immediate. */
3258 static inline uint32_t
3259 reencode_ldst_pos_imm (uint32_t insn, uint32_t imm)
3261 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3264 /* Reencode the imm field of add immediate. */
3265 static inline uint32_t
3266 reencode_add_imm (uint32_t insn, uint32_t imm)
3268 return (insn & ~(MASK (12) << 10)) | ((imm & MASK (12)) << 10);
3271 /* Reencode mov[zn] to movz. */
3272 static inline uint32_t
3273 reencode_movzn_to_movz (uint32_t opcode)
3275 return opcode | (1 << 30);
3278 /* Reencode mov[zn] to movn. */
3279 static inline uint32_t
3280 reencode_movzn_to_movn (uint32_t opcode)
3282 return opcode & ~(1 << 30);
3285 /* Insert the addend/value into the instruction or data object being
3287 static bfd_reloc_status_type
3288 bfd_elf_aarch64_put_addend (bfd *abfd,
3290 reloc_howto_type *howto, bfd_signed_vma addend)
3292 bfd_reloc_status_type status = bfd_reloc_ok;
3293 bfd_signed_vma old_addend = addend;
3297 size = bfd_get_reloc_size (howto);
3301 contents = bfd_get_16 (abfd, address);
3304 if (howto->src_mask != 0xffffffff)
3305 /* Must be 32-bit instruction, always little-endian. */
3306 contents = bfd_getl32 (address);
3308 /* Must be 32-bit data (endianness dependent). */
3309 contents = bfd_get_32 (abfd, address);
3312 contents = bfd_get_64 (abfd, address);
3318 switch (howto->complain_on_overflow)
3320 case complain_overflow_dont:
3322 case complain_overflow_signed:
3323 status = aarch64_signed_overflow (addend,
3324 howto->bitsize + howto->rightshift);
3326 case complain_overflow_unsigned:
3327 status = aarch64_unsigned_overflow (addend,
3328 howto->bitsize + howto->rightshift);
3330 case complain_overflow_bitfield:
3335 addend >>= howto->rightshift;
3337 switch (howto->type)
3339 case R_AARCH64_JUMP26:
3340 case R_AARCH64_CALL26:
3341 contents = reencode_branch_ofs_26 (contents, addend);
3344 case R_AARCH64_CONDBR19:
3345 contents = reencode_cond_branch_ofs_19 (contents, addend);
3348 case R_AARCH64_TSTBR14:
3349 contents = reencode_tst_branch_ofs_14 (contents, addend);
3352 case R_AARCH64_LD_PREL_LO19:
3353 case R_AARCH64_GOT_LD_PREL19:
3354 if (old_addend & ((1 << howto->rightshift) - 1))
3355 return bfd_reloc_overflow;
3356 contents = reencode_ld_lit_ofs_19 (contents, addend);
3359 case R_AARCH64_TLSDESC_CALL:
3362 case R_AARCH64_TLSGD_ADR_PAGE21:
3363 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3364 case R_AARCH64_TLSDESC_ADR_PAGE:
3365 case R_AARCH64_ADR_GOT_PAGE:
3366 case R_AARCH64_ADR_PREL_LO21:
3367 case R_AARCH64_ADR_PREL_PG_HI21:
3368 case R_AARCH64_ADR_PREL_PG_HI21_NC:
3369 contents = reencode_adr_imm (contents, addend);
3372 case R_AARCH64_TLSGD_ADD_LO12_NC:
3373 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3374 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3375 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3376 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3377 case R_AARCH64_ADD_ABS_LO12_NC:
3378 /* Corresponds to: add rd, rn, #uimm12 to provide the low order
3379 12 bits of the page offset following
3380 R_AARCH64_ADR_PREL_PG_HI21 which computes the
3381 (pc-relative) page base. */
3382 contents = reencode_add_imm (contents, addend);
3385 case R_AARCH64_LDST8_ABS_LO12_NC:
3386 case R_AARCH64_LDST16_ABS_LO12_NC:
3387 case R_AARCH64_LDST32_ABS_LO12_NC:
3388 case R_AARCH64_LDST64_ABS_LO12_NC:
3389 case R_AARCH64_LDST128_ABS_LO12_NC:
3390 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3391 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3392 case R_AARCH64_LD64_GOT_LO12_NC:
3393 if (old_addend & ((1 << howto->rightshift) - 1))
3394 return bfd_reloc_overflow;
3395 /* Used for ldr*|str* rt, [rn, #uimm12] to provide the low order
3396 12 bits of the page offset following R_AARCH64_ADR_PREL_PG_HI21
3397 which computes the (pc-relative) page base. */
3398 contents = reencode_ldst_pos_imm (contents, addend);
3401 /* Group relocations to create high bits of a 16, 32, 48 or 64
3402 bit signed data or abs address inline. Will change
3403 instruction to MOVN or MOVZ depending on sign of calculated
3406 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3407 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3408 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3409 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3410 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3411 case R_AARCH64_MOVW_SABS_G0:
3412 case R_AARCH64_MOVW_SABS_G1:
3413 case R_AARCH64_MOVW_SABS_G2:
3414 /* NOTE: We can only come here with movz or movn. */
3417 /* Force use of MOVN. */
3419 contents = reencode_movzn_to_movn (contents);
3423 /* Force use of MOVZ. */
3424 contents = reencode_movzn_to_movz (contents);
3428 /* Group relocations to create a 16, 32, 48 or 64 bit unsigned
3429 data or abs address inline. */
3431 case R_AARCH64_MOVW_UABS_G0:
3432 case R_AARCH64_MOVW_UABS_G0_NC:
3433 case R_AARCH64_MOVW_UABS_G1:
3434 case R_AARCH64_MOVW_UABS_G1_NC:
3435 case R_AARCH64_MOVW_UABS_G2:
3436 case R_AARCH64_MOVW_UABS_G2_NC:
3437 case R_AARCH64_MOVW_UABS_G3:
3438 contents = reencode_movw_imm (contents, addend);
3442 /* Repack simple data */
3443 if (howto->dst_mask & (howto->dst_mask + 1))
3444 return bfd_reloc_notsupported;
3446 contents = ((contents & ~howto->dst_mask) | (addend & howto->dst_mask));
3453 bfd_put_16 (abfd, contents, address);
3456 if (howto->dst_mask != 0xffffffff)
3457 /* must be 32-bit instruction, always little-endian */
3458 bfd_putl32 (contents, address);
3460 /* must be 32-bit data (endianness dependent) */
3461 bfd_put_32 (abfd, contents, address);
3464 bfd_put_64 (abfd, contents, address);
3474 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3475 struct elf64_aarch64_link_hash_table
3476 *globals, struct bfd_link_info *info,
3477 bfd_vma value, bfd *output_bfd,
3478 bfd_boolean *unresolved_reloc_p)
3480 bfd_vma off = (bfd_vma) - 1;
3481 asection *basegot = globals->root.sgot;
3482 bfd_boolean dyn = globals->root.dynamic_sections_created;
3486 off = h->got.offset;
3487 BFD_ASSERT (off != (bfd_vma) - 1);
3488 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3490 && SYMBOL_REFERENCES_LOCAL (info, h))
3491 || (ELF_ST_VISIBILITY (h->other)
3492 && h->root.type == bfd_link_hash_undefweak))
3494 /* This is actually a static link, or it is a -Bsymbolic link
3495 and the symbol is defined locally. We must initialize this
3496 entry in the global offset table. Since the offset must
3497 always be a multiple of 8, we use the least significant bit
3498 to record whether we have initialized it already.
3499 When doing a dynamic link, we create a .rel(a).got relocation
3500 entry to initialize the value. This is done in the
3501 finish_dynamic_symbol routine. */
3506 bfd_put_64 (output_bfd, value, basegot->contents + off);
3511 *unresolved_reloc_p = FALSE;
3513 off = off + basegot->output_section->vma + basegot->output_offset;
3519 /* Change R_TYPE to a more efficient access model where possible,
3520 return the new reloc type. */
3523 aarch64_tls_transition_without_check (unsigned int r_type,
3524 struct elf_link_hash_entry *h)
3526 bfd_boolean is_local = h == NULL;
3529 case R_AARCH64_TLSGD_ADR_PAGE21:
3530 case R_AARCH64_TLSDESC_ADR_PAGE:
3532 ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21;
3534 case R_AARCH64_TLSGD_ADD_LO12_NC:
3535 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3537 ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3538 : R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC;
3540 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3541 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3543 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3544 return is_local ? R_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3546 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3547 case R_AARCH64_TLSDESC_CALL:
3548 /* Instructions with these relocations will become NOPs. */
3549 return R_AARCH64_NONE;
3556 aarch64_reloc_got_type (unsigned int r_type)
3560 case R_AARCH64_LD64_GOT_LO12_NC:
3561 case R_AARCH64_ADR_GOT_PAGE:
3562 case R_AARCH64_GOT_LD_PREL19:
3565 case R_AARCH64_TLSGD_ADR_PAGE21:
3566 case R_AARCH64_TLSGD_ADD_LO12_NC:
3569 case R_AARCH64_TLSDESC_ADD_LO12_NC:
3570 case R_AARCH64_TLSDESC_ADR_PAGE:
3571 case R_AARCH64_TLSDESC_CALL:
3572 case R_AARCH64_TLSDESC_LD64_LO12_NC:
3573 return GOT_TLSDESC_GD;
3575 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3576 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3579 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
3580 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
3581 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3582 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
3583 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3584 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
3585 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3586 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
3593 aarch64_can_relax_tls (bfd *input_bfd,
3594 struct bfd_link_info *info,
3595 unsigned int r_type,
3596 struct elf_link_hash_entry *h,
3597 unsigned long r_symndx)
3599 unsigned int symbol_got_type;
3600 unsigned int reloc_got_type;
3602 if (! IS_AARCH64_TLS_RELOC (r_type))
3605 symbol_got_type = elf64_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3606 reloc_got_type = aarch64_reloc_got_type (r_type);
3608 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3614 if (h && h->root.type == bfd_link_hash_undefweak)
3621 aarch64_tls_transition (bfd *input_bfd,
3622 struct bfd_link_info *info,
3623 unsigned int r_type,
3624 struct elf_link_hash_entry *h,
3625 unsigned long r_symndx)
3627 if (! aarch64_can_relax_tls (input_bfd, info, r_type, h, r_symndx))
3630 return aarch64_tls_transition_without_check (r_type, h);
3633 /* Return the base VMA address which should be subtracted from real addresses
3634 when resolving R_AARCH64_TLS_DTPREL64 relocation. */
3637 dtpoff_base (struct bfd_link_info *info)
3639 /* If tls_sec is NULL, we should have signalled an error already. */
3640 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3641 return elf_hash_table (info)->tls_sec->vma;
3645 /* Return the base VMA address which should be subtracted from real addresses
3646 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3649 tpoff_base (struct bfd_link_info *info)
3651 struct elf_link_hash_table *htab = elf_hash_table (info);
3653 /* If tls_sec is NULL, we should have signalled an error already. */
3654 if (htab->tls_sec == NULL)
3657 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3658 htab->tls_sec->alignment_power);
3659 return htab->tls_sec->vma - base;
3663 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3664 unsigned long r_symndx)
3666 /* Calculate the address of the GOT entry for symbol
3667 referred to in h. */
3669 return &h->got.offset;
3673 struct elf_aarch64_local_symbol *l;
3675 l = elf64_aarch64_locals (input_bfd);
3676 return &l[r_symndx].got_offset;
3681 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3682 unsigned long r_symndx)
3685 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3690 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3691 unsigned long r_symndx)
3694 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3699 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3700 unsigned long r_symndx)
3703 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3709 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3710 unsigned long r_symndx)
3712 /* Calculate the address of the GOT entry for symbol
3713 referred to in h. */
3716 struct elf64_aarch64_link_hash_entry *eh;
3717 eh = (struct elf64_aarch64_link_hash_entry *) h;
3718 return &eh->tlsdesc_got_jump_table_offset;
3723 struct elf_aarch64_local_symbol *l;
3725 l = elf64_aarch64_locals (input_bfd);
3726 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3731 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3732 unsigned long r_symndx)
3735 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3740 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3741 struct elf_link_hash_entry *h,
3742 unsigned long r_symndx)
3745 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3750 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3751 unsigned long r_symndx)
3754 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3759 /* Perform a relocation as part of a final link. */
3760 static bfd_reloc_status_type
3761 elf64_aarch64_final_link_relocate (reloc_howto_type *howto,
3764 asection *input_section,
3766 Elf_Internal_Rela *rel,
3768 struct bfd_link_info *info,
3770 struct elf_link_hash_entry *h,
3771 bfd_boolean *unresolved_reloc_p,
3772 bfd_boolean save_addend,
3773 bfd_vma *saved_addend)
3775 unsigned int r_type = howto->type;
3776 unsigned long r_symndx;
3777 bfd_byte *hit_data = contents + rel->r_offset;
3779 bfd_signed_vma signed_addend;
3780 struct elf64_aarch64_link_hash_table *globals;
3781 bfd_boolean weak_undef_p;
3783 globals = elf64_aarch64_hash_table (info);
3785 BFD_ASSERT (is_aarch64_elf (input_bfd));
3787 r_symndx = ELF64_R_SYM (rel->r_info);
3789 /* It is possible to have linker relaxations on some TLS access
3790 models. Update our information here. */
3791 r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
3793 if (r_type != howto->type)
3794 howto = elf64_aarch64_howto_from_type (r_type);
3796 place = input_section->output_section->vma
3797 + input_section->output_offset + rel->r_offset;
3799 /* Get addend, accumulating the addend for consecutive relocs
3800 which refer to the same offset. */
3801 signed_addend = saved_addend ? *saved_addend : 0;
3802 signed_addend += rel->r_addend;
3804 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
3805 : bfd_is_und_section (sym_sec));
3808 case R_AARCH64_NONE:
3809 case R_AARCH64_NULL:
3810 case R_AARCH64_TLSDESC_CALL:
3811 *unresolved_reloc_p = FALSE;
3812 return bfd_reloc_ok;
3814 case R_AARCH64_ABS64:
3816 /* When generating a shared object or relocatable executable, these
3817 relocations are copied into the output file to be resolved at
3819 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
3820 && (input_section->flags & SEC_ALLOC)
3822 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3823 || h->root.type != bfd_link_hash_undefweak))
3825 Elf_Internal_Rela outrel;
3827 bfd_boolean skip, relocate;
3830 *unresolved_reloc_p = FALSE;
3832 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd,
3835 return bfd_reloc_notsupported;
3840 outrel.r_addend = signed_addend;
3842 _bfd_elf_section_offset (output_bfd, info, input_section,
3844 if (outrel.r_offset == (bfd_vma) - 1)
3846 else if (outrel.r_offset == (bfd_vma) - 2)
3852 outrel.r_offset += (input_section->output_section->vma
3853 + input_section->output_offset);
3856 memset (&outrel, 0, sizeof outrel);
3859 && (!info->shared || !info->symbolic || !h->def_regular))
3860 outrel.r_info = ELF64_R_INFO (h->dynindx, r_type);
3865 /* On SVR4-ish systems, the dynamic loader cannot
3866 relocate the text and data segments independently,
3867 so the symbol does not matter. */
3869 outrel.r_info = ELF64_R_INFO (symbol, R_AARCH64_RELATIVE);
3870 outrel.r_addend += value;
3873 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (htab);
3874 bfd_elf64_swap_reloca_out (output_bfd, &outrel, loc);
3876 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
3878 /* Sanity to check that we have previously allocated
3879 sufficient space in the relocation section for the
3880 number of relocations we actually want to emit. */
3884 /* If this reloc is against an external symbol, we do not want to
3885 fiddle with the addend. Otherwise, we need to include the symbol
3886 value so that it becomes an addend for the dynamic reloc. */
3888 return bfd_reloc_ok;
3890 return _bfd_final_link_relocate (howto, input_bfd, input_section,
3891 contents, rel->r_offset, value,
3895 value += signed_addend;
3898 case R_AARCH64_JUMP26:
3899 case R_AARCH64_CALL26:
3901 asection *splt = globals->root.splt;
3902 bfd_boolean via_plt_p =
3903 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
3905 /* A call to an undefined weak symbol is converted to a jump to
3906 the next instruction unless a PLT entry will be created.
3907 The jump to the next instruction is optimized as a NOP.
3908 Do the same for local undefined symbols. */
3909 if (weak_undef_p && ! via_plt_p)
3911 bfd_putl32 (INSN_NOP, hit_data);
3912 return bfd_reloc_ok;
3915 /* If the call goes through a PLT entry, make sure to
3916 check distance to the right destination address. */
3919 value = (splt->output_section->vma
3920 + splt->output_offset + h->plt.offset);
3921 *unresolved_reloc_p = FALSE;
3924 /* If the target symbol is global and marked as a function the
3925 relocation applies a function call or a tail call. In this
3926 situation we can veneer out of range branches. The veneers
3927 use IP0 and IP1 hence cannot be used arbitrary out of range
3928 branches that occur within the body of a function. */
3929 if (h && h->type == STT_FUNC)
3931 /* Check if a stub has to be inserted because the destination
3933 if (! aarch64_valid_branch_p (value, place))
3935 /* The target is out of reach, so redirect the branch to
3936 the local stub for this function. */
3937 struct elf64_aarch64_stub_hash_entry *stub_entry;
3938 stub_entry = elf64_aarch64_get_stub_entry (input_section,
3941 if (stub_entry != NULL)
3942 value = (stub_entry->stub_offset
3943 + stub_entry->stub_sec->output_offset
3944 + stub_entry->stub_sec->output_section->vma);
3948 value = aarch64_resolve_relocation (r_type, place, value,
3949 signed_addend, weak_undef_p);
3952 case R_AARCH64_ABS16:
3953 case R_AARCH64_ABS32:
3954 case R_AARCH64_ADD_ABS_LO12_NC:
3955 case R_AARCH64_ADR_PREL_LO21:
3956 case R_AARCH64_ADR_PREL_PG_HI21:
3957 case R_AARCH64_ADR_PREL_PG_HI21_NC:
3958 case R_AARCH64_CONDBR19:
3959 case R_AARCH64_LD_PREL_LO19:
3960 case R_AARCH64_LDST8_ABS_LO12_NC:
3961 case R_AARCH64_LDST16_ABS_LO12_NC:
3962 case R_AARCH64_LDST32_ABS_LO12_NC:
3963 case R_AARCH64_LDST64_ABS_LO12_NC:
3964 case R_AARCH64_LDST128_ABS_LO12_NC:
3965 case R_AARCH64_MOVW_SABS_G0:
3966 case R_AARCH64_MOVW_SABS_G1:
3967 case R_AARCH64_MOVW_SABS_G2:
3968 case R_AARCH64_MOVW_UABS_G0:
3969 case R_AARCH64_MOVW_UABS_G0_NC:
3970 case R_AARCH64_MOVW_UABS_G1:
3971 case R_AARCH64_MOVW_UABS_G1_NC:
3972 case R_AARCH64_MOVW_UABS_G2:
3973 case R_AARCH64_MOVW_UABS_G2_NC:
3974 case R_AARCH64_MOVW_UABS_G3:
3975 case R_AARCH64_PREL16:
3976 case R_AARCH64_PREL32:
3977 case R_AARCH64_PREL64:
3978 case R_AARCH64_TSTBR14:
3979 value = aarch64_resolve_relocation (r_type, place, value,
3980 signed_addend, weak_undef_p);
3983 case R_AARCH64_LD64_GOT_LO12_NC:
3984 case R_AARCH64_ADR_GOT_PAGE:
3985 case R_AARCH64_GOT_LD_PREL19:
3986 if (globals->root.sgot == NULL)
3987 BFD_ASSERT (h != NULL);
3991 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
3993 unresolved_reloc_p);
3994 value = aarch64_resolve_relocation (r_type, place, value,
3999 case R_AARCH64_TLSGD_ADR_PAGE21:
4000 case R_AARCH64_TLSGD_ADD_LO12_NC:
4001 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4002 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4003 if (globals->root.sgot == NULL)
4004 return bfd_reloc_notsupported;
4006 value = (symbol_got_offset (input_bfd, h, r_symndx)
4007 + globals->root.sgot->output_section->vma
4008 + globals->root.sgot->output_section->output_offset);
4010 value = aarch64_resolve_relocation (r_type, place, value,
4012 *unresolved_reloc_p = FALSE;
4015 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4016 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4017 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4018 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4019 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4020 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4021 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4022 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4023 value = aarch64_resolve_relocation (r_type, place, value,
4024 signed_addend - tpoff_base (info), weak_undef_p);
4025 *unresolved_reloc_p = FALSE;
4028 case R_AARCH64_TLSDESC_ADR_PAGE:
4029 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4030 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4031 case R_AARCH64_TLSDESC_ADD:
4032 case R_AARCH64_TLSDESC_LDR:
4033 if (globals->root.sgot == NULL)
4034 return bfd_reloc_notsupported;
4036 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4037 + globals->root.sgotplt->output_section->vma
4038 + globals->root.sgotplt->output_section->output_offset
4039 + globals->sgotplt_jump_table_size);
4041 value = aarch64_resolve_relocation (r_type, place, value,
4043 *unresolved_reloc_p = FALSE;
4047 return bfd_reloc_notsupported;
4051 *saved_addend = value;
4053 /* Only apply the final relocation in a sequence. */
4055 return bfd_reloc_continue;
4057 return bfd_elf_aarch64_put_addend (input_bfd, hit_data, howto, value);
4060 /* Handle TLS relaxations. Relaxing is possible for symbols that use
4061 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4064 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4065 is to then call final_link_relocate. Return other values in the
4068 static bfd_reloc_status_type
4069 elf64_aarch64_tls_relax (struct elf64_aarch64_link_hash_table *globals,
4070 bfd *input_bfd, bfd_byte *contents,
4071 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4073 bfd_boolean is_local = h == NULL;
4074 unsigned int r_type = ELF64_R_TYPE (rel->r_info);
4077 BFD_ASSERT (globals && input_bfd && contents && rel);
4081 case R_AARCH64_TLSGD_ADR_PAGE21:
4082 case R_AARCH64_TLSDESC_ADR_PAGE:
4085 /* GD->LE relaxation:
4086 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4088 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4090 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4091 return bfd_reloc_continue;
4095 /* GD->IE relaxation:
4096 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4098 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4100 insn = bfd_getl32 (contents + rel->r_offset);
4101 return bfd_reloc_continue;
4104 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4107 /* GD->LE relaxation:
4108 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4110 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4111 return bfd_reloc_continue;
4115 /* GD->IE relaxation:
4116 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4118 insn = bfd_getl32 (contents + rel->r_offset);
4120 bfd_putl32 (insn, contents + rel->r_offset);
4121 return bfd_reloc_continue;
4124 case R_AARCH64_TLSGD_ADD_LO12_NC:
4127 /* GD->LE relaxation
4128 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4129 bl __tls_get_addr => mrs x1, tpidr_el0
4130 nop => add x0, x1, x0
4133 /* First kill the tls_get_addr reloc on the bl instruction. */
4134 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4135 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4137 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4138 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4139 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4140 return bfd_reloc_continue;
4144 /* GD->IE relaxation
4145 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4146 BL __tls_get_addr => mrs x1, tpidr_el0
4148 NOP => add x0, x1, x0
4151 BFD_ASSERT (ELF64_R_TYPE (rel[1].r_info) == R_AARCH64_CALL26);
4153 /* Remove the relocation on the BL instruction. */
4154 rel[1].r_info = ELF64_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4156 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4158 /* We choose to fixup the BL and NOP instructions using the
4159 offset from the second relocation to allow flexibility in
4160 scheduling instructions between the ADD and BL. */
4161 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4162 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4163 return bfd_reloc_continue;
4166 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4167 case R_AARCH64_TLSDESC_CALL:
4168 /* GD->IE/LE relaxation:
4169 add x0, x0, #:tlsdesc_lo12:var => nop
4172 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4173 return bfd_reloc_ok;
4175 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4176 /* IE->LE relaxation:
4177 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4181 insn = bfd_getl32 (contents + rel->r_offset);
4182 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4184 return bfd_reloc_continue;
4186 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4187 /* IE->LE relaxation:
4188 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4192 insn = bfd_getl32 (contents + rel->r_offset);
4193 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4195 return bfd_reloc_continue;
4198 return bfd_reloc_continue;
4201 return bfd_reloc_ok;
4204 /* Relocate an AArch64 ELF section. */
4207 elf64_aarch64_relocate_section (bfd *output_bfd,
4208 struct bfd_link_info *info,
4210 asection *input_section,
4212 Elf_Internal_Rela *relocs,
4213 Elf_Internal_Sym *local_syms,
4214 asection **local_sections)
4216 Elf_Internal_Shdr *symtab_hdr;
4217 struct elf_link_hash_entry **sym_hashes;
4218 Elf_Internal_Rela *rel;
4219 Elf_Internal_Rela *relend;
4221 struct elf64_aarch64_link_hash_table *globals;
4222 bfd_boolean save_addend = FALSE;
4225 globals = elf64_aarch64_hash_table (info);
4227 symtab_hdr = &elf_symtab_hdr (input_bfd);
4228 sym_hashes = elf_sym_hashes (input_bfd);
4231 relend = relocs + input_section->reloc_count;
4232 for (; rel < relend; rel++)
4234 unsigned int r_type;
4235 unsigned int relaxed_r_type;
4236 reloc_howto_type *howto;
4237 unsigned long r_symndx;
4238 Elf_Internal_Sym *sym;
4240 struct elf_link_hash_entry *h;
4242 bfd_reloc_status_type r;
4245 bfd_boolean unresolved_reloc = FALSE;
4246 char *error_message = NULL;
4248 r_symndx = ELF64_R_SYM (rel->r_info);
4249 r_type = ELF64_R_TYPE (rel->r_info);
4251 bfd_reloc.howto = elf64_aarch64_howto_from_type (r_type);
4252 howto = bfd_reloc.howto;
4256 (*_bfd_error_handler)
4257 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4258 input_bfd, input_section, r_type);
4266 if (r_symndx < symtab_hdr->sh_info)
4268 sym = local_syms + r_symndx;
4269 sym_type = ELF64_ST_TYPE (sym->st_info);
4270 sec = local_sections[r_symndx];
4272 /* An object file might have a reference to a local
4273 undefined symbol. This is a daft object file, but we
4274 should at least do something about it. */
4275 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4276 && bfd_is_und_section (sec)
4277 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4279 if (!info->callbacks->undefined_symbol
4280 (info, bfd_elf_string_from_elf_section
4281 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4282 input_bfd, input_section, rel->r_offset, TRUE))
4286 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4292 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4293 r_symndx, symtab_hdr, sym_hashes,
4295 unresolved_reloc, warned);
4300 if (sec != NULL && discarded_section (sec))
4301 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4302 rel, 1, relend, howto, 0, contents);
4304 if (info->relocatable)
4306 /* This is a relocatable link. We don't have to change
4307 anything, unless the reloc is against a section symbol,
4308 in which case we have to adjust according to where the
4309 section symbol winds up in the output section. */
4310 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
4311 rel->r_addend += sec->output_offset;
4316 name = h->root.root.string;
4319 name = (bfd_elf_string_from_elf_section
4320 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4321 if (name == NULL || *name == '\0')
4322 name = bfd_section_name (input_bfd, sec);
4326 && r_type != R_AARCH64_NONE
4327 && r_type != R_AARCH64_NULL
4329 || h->root.type == bfd_link_hash_defined
4330 || h->root.type == bfd_link_hash_defweak)
4331 && IS_AARCH64_TLS_RELOC (r_type) != (sym_type == STT_TLS))
4333 (*_bfd_error_handler)
4334 ((sym_type == STT_TLS
4335 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4336 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4338 input_section, (long) rel->r_offset, howto->name, name);
4342 /* We relax only if we can see that there can be a valid transition
4343 from a reloc type to another.
4344 We call elf64_aarch64_final_link_relocate unless we're completely
4345 done, i.e., the relaxation produced the final output we want. */
4347 relaxed_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4349 if (relaxed_r_type != r_type)
4351 r_type = relaxed_r_type;
4352 howto = elf64_aarch64_howto_from_type (r_type);
4354 r = elf64_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4355 unresolved_reloc = 0;
4358 r = bfd_reloc_continue;
4360 /* There may be multiple consecutive relocations for the
4361 same offset. In that case we are supposed to treat the
4362 output of each relocation as the addend for the next. */
4363 if (rel + 1 < relend
4364 && rel->r_offset == rel[1].r_offset
4365 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4366 && ELF64_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4369 save_addend = FALSE;
4371 if (r == bfd_reloc_continue)
4372 r = elf64_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4373 input_section, contents, rel,
4374 relocation, info, sec,
4375 h, &unresolved_reloc,
4376 save_addend, &addend);
4380 case R_AARCH64_TLSGD_ADR_PAGE21:
4381 case R_AARCH64_TLSGD_ADD_LO12_NC:
4382 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4384 bfd_boolean need_relocs = FALSE;
4389 off = symbol_got_offset (input_bfd, h, r_symndx);
4390 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4393 (info->shared || indx != 0) &&
4395 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4396 || h->root.type != bfd_link_hash_undefweak);
4398 BFD_ASSERT (globals->root.srelgot != NULL);
4402 Elf_Internal_Rela rela;
4403 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_DTPMOD64);
4405 rela.r_offset = globals->root.sgot->output_section->vma +
4406 globals->root.sgot->output_offset + off;
4409 loc = globals->root.srelgot->contents;
4410 loc += globals->root.srelgot->reloc_count++
4411 * RELOC_SIZE (htab);
4412 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4416 bfd_put_64 (output_bfd,
4417 relocation - dtpoff_base (info),
4418 globals->root.sgot->contents + off
4423 /* This TLS symbol is global. We emit a
4424 relocation to fixup the tls offset at load
4427 ELF64_R_INFO (indx, R_AARCH64_TLS_DTPREL64);
4430 (globals->root.sgot->output_section->vma
4431 + globals->root.sgot->output_offset + off
4434 loc = globals->root.srelgot->contents;
4435 loc += globals->root.srelgot->reloc_count++
4436 * RELOC_SIZE (globals);
4437 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4438 bfd_put_64 (output_bfd, (bfd_vma) 0,
4439 globals->root.sgot->contents + off
4445 bfd_put_64 (output_bfd, (bfd_vma) 1,
4446 globals->root.sgot->contents + off);
4447 bfd_put_64 (output_bfd,
4448 relocation - dtpoff_base (info),
4449 globals->root.sgot->contents + off
4453 symbol_got_offset_mark (input_bfd, h, r_symndx);
4457 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4458 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4459 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4461 bfd_boolean need_relocs = FALSE;
4466 off = symbol_got_offset (input_bfd, h, r_symndx);
4468 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4471 (info->shared || indx != 0) &&
4473 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4474 || h->root.type != bfd_link_hash_undefweak);
4476 BFD_ASSERT (globals->root.srelgot != NULL);
4480 Elf_Internal_Rela rela;
4483 rela.r_addend = relocation - dtpoff_base (info);
4487 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLS_TPREL64);
4488 rela.r_offset = globals->root.sgot->output_section->vma +
4489 globals->root.sgot->output_offset + off;
4491 loc = globals->root.srelgot->contents;
4492 loc += globals->root.srelgot->reloc_count++
4493 * RELOC_SIZE (htab);
4495 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4497 bfd_put_64 (output_bfd, rela.r_addend,
4498 globals->root.sgot->contents + off);
4501 bfd_put_64 (output_bfd, relocation - tpoff_base (info),
4502 globals->root.sgot->contents + off);
4504 symbol_got_offset_mark (input_bfd, h, r_symndx);
4508 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4509 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4510 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4511 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4512 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4513 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4514 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4515 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4518 case R_AARCH64_TLSDESC_ADR_PAGE:
4519 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4520 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4521 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
4523 bfd_boolean need_relocs = FALSE;
4524 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
4525 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
4527 need_relocs = (h == NULL
4528 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4529 || h->root.type != bfd_link_hash_undefweak);
4531 BFD_ASSERT (globals->root.srelgot != NULL);
4532 BFD_ASSERT (globals->root.sgot != NULL);
4537 Elf_Internal_Rela rela;
4538 rela.r_info = ELF64_R_INFO (indx, R_AARCH64_TLSDESC);
4540 rela.r_offset = (globals->root.sgotplt->output_section->vma
4541 + globals->root.sgotplt->output_offset
4542 + off + globals->sgotplt_jump_table_size);
4545 rela.r_addend = relocation - dtpoff_base (info);
4547 /* Allocate the next available slot in the PLT reloc
4548 section to hold our R_AARCH64_TLSDESC, the next
4549 available slot is determined from reloc_count,
4550 which we step. But note, reloc_count was
4551 artifically moved down while allocating slots for
4552 real PLT relocs such that all of the PLT relocs
4553 will fit above the initial reloc_count and the
4554 extra stuff will fit below. */
4555 loc = globals->root.srelplt->contents;
4556 loc += globals->root.srelplt->reloc_count++
4557 * RELOC_SIZE (globals);
4559 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
4561 bfd_put_64 (output_bfd, (bfd_vma) 0,
4562 globals->root.sgotplt->contents + off +
4563 globals->sgotplt_jump_table_size);
4564 bfd_put_64 (output_bfd, (bfd_vma) 0,
4565 globals->root.sgotplt->contents + off +
4566 globals->sgotplt_jump_table_size +
4570 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
4579 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4580 because such sections are not SEC_ALLOC and thus ld.so will
4581 not process them. */
4582 if (unresolved_reloc
4583 && !((input_section->flags & SEC_DEBUGGING) != 0
4585 && _bfd_elf_section_offset (output_bfd, info, input_section,
4586 +rel->r_offset) != (bfd_vma) - 1)
4588 (*_bfd_error_handler)
4590 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
4591 input_bfd, input_section, (long) rel->r_offset, howto->name,
4592 h->root.root.string);
4596 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
4600 case bfd_reloc_overflow:
4601 /* If the overflowing reloc was to an undefined symbol,
4602 we have already printed one error message and there
4603 is no point complaining again. */
4605 h->root.type != bfd_link_hash_undefined)
4606 && (!((*info->callbacks->reloc_overflow)
4607 (info, (h ? &h->root : NULL), name, howto->name,
4608 (bfd_vma) 0, input_bfd, input_section,
4613 case bfd_reloc_undefined:
4614 if (!((*info->callbacks->undefined_symbol)
4615 (info, name, input_bfd, input_section,
4616 rel->r_offset, TRUE)))
4620 case bfd_reloc_outofrange:
4621 error_message = _("out of range");
4624 case bfd_reloc_notsupported:
4625 error_message = _("unsupported relocation");
4628 case bfd_reloc_dangerous:
4629 /* error_message should already be set. */
4633 error_message = _("unknown error");
4637 BFD_ASSERT (error_message != NULL);
4638 if (!((*info->callbacks->reloc_dangerous)
4639 (info, error_message, input_bfd, input_section,
4650 /* Set the right machine number. */
4653 elf64_aarch64_object_p (bfd *abfd)
4655 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
4659 /* Function to keep AArch64 specific flags in the ELF header. */
4662 elf64_aarch64_set_private_flags (bfd *abfd, flagword flags)
4664 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
4669 elf_elfheader (abfd)->e_flags = flags;
4670 elf_flags_init (abfd) = TRUE;
4676 /* Copy backend specific data from one object module to another. */
4679 elf64_aarch64_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
4683 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4686 in_flags = elf_elfheader (ibfd)->e_flags;
4688 elf_elfheader (obfd)->e_flags = in_flags;
4689 elf_flags_init (obfd) = TRUE;
4691 /* Also copy the EI_OSABI field. */
4692 elf_elfheader (obfd)->e_ident[EI_OSABI] =
4693 elf_elfheader (ibfd)->e_ident[EI_OSABI];
4695 /* Copy object attributes. */
4696 _bfd_elf_copy_obj_attributes (ibfd, obfd);
4701 /* Merge backend specific data from an object file to the output
4702 object file when linking. */
4705 elf64_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
4709 bfd_boolean flags_compatible = TRUE;
4712 /* Check if we have the same endianess. */
4713 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
4716 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
4719 /* The input BFD must have had its flags initialised. */
4720 /* The following seems bogus to me -- The flags are initialized in
4721 the assembler but I don't think an elf_flags_init field is
4722 written into the object. */
4723 /* BFD_ASSERT (elf_flags_init (ibfd)); */
4725 in_flags = elf_elfheader (ibfd)->e_flags;
4726 out_flags = elf_elfheader (obfd)->e_flags;
4728 if (!elf_flags_init (obfd))
4730 /* If the input is the default architecture and had the default
4731 flags then do not bother setting the flags for the output
4732 architecture, instead allow future merges to do this. If no
4733 future merges ever set these flags then they will retain their
4734 uninitialised values, which surprise surprise, correspond
4735 to the default values. */
4736 if (bfd_get_arch_info (ibfd)->the_default
4737 && elf_elfheader (ibfd)->e_flags == 0)
4740 elf_flags_init (obfd) = TRUE;
4741 elf_elfheader (obfd)->e_flags = in_flags;
4743 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
4744 && bfd_get_arch_info (obfd)->the_default)
4745 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
4746 bfd_get_mach (ibfd));
4751 /* Identical flags must be compatible. */
4752 if (in_flags == out_flags)
4755 /* Check to see if the input BFD actually contains any sections. If
4756 not, its flags may not have been initialised either, but it
4757 cannot actually cause any incompatiblity. Do not short-circuit
4758 dynamic objects; their section list may be emptied by
4759 elf_link_add_object_symbols.
4761 Also check to see if there are no code sections in the input.
4762 In this case there is no need to check for code specific flags.
4763 XXX - do we need to worry about floating-point format compatability
4764 in data sections ? */
4765 if (!(ibfd->flags & DYNAMIC))
4767 bfd_boolean null_input_bfd = TRUE;
4768 bfd_boolean only_data_sections = TRUE;
4770 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4772 if ((bfd_get_section_flags (ibfd, sec)
4773 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4774 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
4775 only_data_sections = FALSE;
4777 null_input_bfd = FALSE;
4781 if (null_input_bfd || only_data_sections)
4785 return flags_compatible;
4788 /* Display the flags field. */
4791 elf64_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
4793 FILE *file = (FILE *) ptr;
4794 unsigned long flags;
4796 BFD_ASSERT (abfd != NULL && ptr != NULL);
4798 /* Print normal ELF private data. */
4799 _bfd_elf_print_private_bfd_data (abfd, ptr);
4801 flags = elf_elfheader (abfd)->e_flags;
4802 /* Ignore init flag - it may not be set, despite the flags field
4803 containing valid data. */
4805 /* xgettext:c-format */
4806 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
4809 fprintf (file, _("<Unrecognised flag bits set>"));
4816 /* Update the got entry reference counts for the section being removed. */
4819 elf64_aarch64_gc_sweep_hook (bfd *abfd,
4820 struct bfd_link_info *info,
4822 const Elf_Internal_Rela * relocs)
4824 struct elf64_aarch64_link_hash_table *htab;
4825 Elf_Internal_Shdr *symtab_hdr;
4826 struct elf_link_hash_entry **sym_hashes;
4827 struct elf_aarch64_local_symbol *locals;
4828 const Elf_Internal_Rela *rel, *relend;
4830 if (info->relocatable)
4833 htab = elf64_aarch64_hash_table (info);
4838 elf_section_data (sec)->local_dynrel = NULL;
4840 symtab_hdr = &elf_symtab_hdr (abfd);
4841 sym_hashes = elf_sym_hashes (abfd);
4843 locals = elf64_aarch64_locals (abfd);
4845 relend = relocs + sec->reloc_count;
4846 for (rel = relocs; rel < relend; rel++)
4848 unsigned long r_symndx;
4849 unsigned int r_type;
4850 struct elf_link_hash_entry *h = NULL;
4852 r_symndx = ELF64_R_SYM (rel->r_info);
4854 if (r_symndx >= symtab_hdr->sh_info)
4856 struct elf64_aarch64_link_hash_entry *eh;
4857 struct elf_dyn_relocs **pp;
4858 struct elf_dyn_relocs *p;
4860 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4861 while (h->root.type == bfd_link_hash_indirect
4862 || h->root.type == bfd_link_hash_warning)
4863 h = (struct elf_link_hash_entry *) h->root.u.i.link;
4864 eh = (struct elf64_aarch64_link_hash_entry *) h;
4866 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
4870 /* Everything must go for SEC. */
4878 Elf_Internal_Sym *isym;
4880 /* A local symbol. */
4881 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
4887 r_type = ELF64_R_TYPE (rel->r_info);
4888 r_type = aarch64_tls_transition (abfd,info, r_type, h ,r_symndx);
4891 case R_AARCH64_LD64_GOT_LO12_NC:
4892 case R_AARCH64_GOT_LD_PREL19:
4893 case R_AARCH64_ADR_GOT_PAGE:
4894 case R_AARCH64_TLSGD_ADR_PAGE21:
4895 case R_AARCH64_TLSGD_ADD_LO12_NC:
4896 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4897 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4898 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
4899 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
4900 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4901 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
4902 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
4903 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4904 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
4905 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4906 case R_AARCH64_TLSDESC_ADR_PAGE:
4907 case R_AARCH64_TLSDESC_ADD_LO12_NC:
4908 case R_AARCH64_TLSDESC_LD64_LO12_NC:
4911 if (h->got.refcount > 0)
4912 h->got.refcount -= 1;
4914 else if (locals != NULL)
4916 if (locals[r_symndx].got_refcount > 0)
4917 locals[r_symndx].got_refcount -= 1;
4921 case R_AARCH64_ADR_PREL_PG_HI21_NC:
4922 case R_AARCH64_ADR_PREL_PG_HI21:
4923 case R_AARCH64_ADR_PREL_LO21:
4924 if (h != NULL && info->executable)
4926 if (h->plt.refcount > 0)
4927 h->plt.refcount -= 1;
4931 case R_AARCH64_CALL26:
4932 case R_AARCH64_JUMP26:
4933 /* If this is a local symbol then we resolve it
4934 directly without creating a PLT entry. */
4938 if (h->plt.refcount > 0)
4939 h->plt.refcount -= 1;
4942 case R_AARCH64_ABS64:
4943 if (h != NULL && info->executable)
4945 if (h->plt.refcount > 0)
4946 h->plt.refcount -= 1;
4958 /* Adjust a symbol defined by a dynamic object and referenced by a
4959 regular object. The current definition is in some section of the
4960 dynamic object, but we're not including those sections. We have to
4961 change the definition to something the rest of the link can
4965 elf64_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
4966 struct elf_link_hash_entry *h)
4968 struct elf64_aarch64_link_hash_table *htab;
4971 /* If this is a function, put it in the procedure linkage table. We
4972 will fill in the contents of the procedure linkage table later,
4973 when we know the address of the .got section. */
4974 if (h->type == STT_FUNC || h->needs_plt)
4976 if (h->plt.refcount <= 0
4977 || SYMBOL_CALLS_LOCAL (info, h)
4978 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
4979 && h->root.type == bfd_link_hash_undefweak))
4981 /* This case can occur if we saw a CALL26 reloc in
4982 an input file, but the symbol wasn't referred to
4983 by a dynamic object or all references were
4984 garbage collected. In which case we can end up
4986 h->plt.offset = (bfd_vma) - 1;
4993 /* It's possible that we incorrectly decided a .plt reloc was
4994 needed for an R_X86_64_PC32 reloc to a non-function sym in
4995 check_relocs. We can't decide accurately between function and
4996 non-function syms in check-relocs; Objects loaded later in
4997 the link may change h->type. So fix it now. */
4998 h->plt.offset = (bfd_vma) - 1;
5001 /* If this is a weak symbol, and there is a real definition, the
5002 processor independent code will have arranged for us to see the
5003 real definition first, and we can just use the same value. */
5004 if (h->u.weakdef != NULL)
5006 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
5007 || h->u.weakdef->root.type == bfd_link_hash_defweak);
5008 h->root.u.def.section = h->u.weakdef->root.u.def.section;
5009 h->root.u.def.value = h->u.weakdef->root.u.def.value;
5010 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
5011 h->non_got_ref = h->u.weakdef->non_got_ref;
5015 /* If we are creating a shared library, we must presume that the
5016 only references to the symbol are via the global offset table.
5017 For such cases we need not do anything here; the relocations will
5018 be handled correctly by relocate_section. */
5022 /* If there are no references to this symbol that do not use the
5023 GOT, we don't need to generate a copy reloc. */
5024 if (!h->non_got_ref)
5027 /* If -z nocopyreloc was given, we won't generate them either. */
5028 if (info->nocopyreloc)
5034 /* We must allocate the symbol in our .dynbss section, which will
5035 become part of the .bss section of the executable. There will be
5036 an entry for this symbol in the .dynsym section. The dynamic
5037 object will contain position independent code, so all references
5038 from the dynamic object to this symbol will go through the global
5039 offset table. The dynamic linker will use the .dynsym entry to
5040 determine the address it must put in the global offset table, so
5041 both the dynamic object and the regular object will refer to the
5042 same memory location for the variable. */
5044 htab = elf64_aarch64_hash_table (info);
5046 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
5047 to copy the initial value out of the dynamic object and into the
5048 runtime process image. */
5049 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
5051 htab->srelbss->size += RELOC_SIZE (htab);
5057 return _bfd_elf_adjust_dynamic_copy (h, s);
5062 elf64_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
5064 struct elf_aarch64_local_symbol *locals;
5065 locals = elf64_aarch64_locals (abfd);
5068 locals = (struct elf_aarch64_local_symbol *)
5069 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
5072 elf64_aarch64_locals (abfd) = locals;
5077 /* Look through the relocs for a section during the first phase. */
5080 elf64_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
5081 asection *sec, const Elf_Internal_Rela *relocs)
5083 Elf_Internal_Shdr *symtab_hdr;
5084 struct elf_link_hash_entry **sym_hashes;
5085 const Elf_Internal_Rela *rel;
5086 const Elf_Internal_Rela *rel_end;
5089 struct elf64_aarch64_link_hash_table *htab;
5091 if (info->relocatable)
5094 BFD_ASSERT (is_aarch64_elf (abfd));
5096 htab = elf64_aarch64_hash_table (info);
5099 symtab_hdr = &elf_symtab_hdr (abfd);
5100 sym_hashes = elf_sym_hashes (abfd);
5102 rel_end = relocs + sec->reloc_count;
5103 for (rel = relocs; rel < rel_end; rel++)
5105 struct elf_link_hash_entry *h;
5106 unsigned long r_symndx;
5107 unsigned int r_type;
5109 r_symndx = ELF64_R_SYM (rel->r_info);
5110 r_type = ELF64_R_TYPE (rel->r_info);
5112 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5114 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5119 if (r_symndx < symtab_hdr->sh_info)
5123 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5124 while (h->root.type == bfd_link_hash_indirect
5125 || h->root.type == bfd_link_hash_warning)
5126 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5128 /* PR15323, ref flags aren't set for references in the same
5130 h->root.non_ir_ref = 1;
5133 /* Could be done earlier, if h were already available. */
5134 r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5138 case R_AARCH64_ABS64:
5140 /* We don't need to handle relocs into sections not going into
5141 the "real" output. */
5142 if ((sec->flags & SEC_ALLOC) == 0)
5150 h->plt.refcount += 1;
5151 h->pointer_equality_needed = 1;
5154 /* No need to do anything if we're not creating a shared
5160 struct elf_dyn_relocs *p;
5161 struct elf_dyn_relocs **head;
5163 /* We must copy these reloc types into the output file.
5164 Create a reloc section in dynobj and make room for
5168 if (htab->root.dynobj == NULL)
5169 htab->root.dynobj = abfd;
5171 sreloc = _bfd_elf_make_dynamic_reloc_section
5172 (sec, htab->root.dynobj, 3, abfd, /*rela? */ TRUE);
5178 /* If this is a global symbol, we count the number of
5179 relocations we need for this symbol. */
5182 struct elf64_aarch64_link_hash_entry *eh;
5183 eh = (struct elf64_aarch64_link_hash_entry *) h;
5184 head = &eh->dyn_relocs;
5188 /* Track dynamic relocs needed for local syms too.
5189 We really need local syms available to do this
5194 Elf_Internal_Sym *isym;
5196 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5201 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5205 /* Beware of type punned pointers vs strict aliasing
5207 vpp = &(elf_section_data (s)->local_dynrel);
5208 head = (struct elf_dyn_relocs **) vpp;
5212 if (p == NULL || p->sec != sec)
5214 bfd_size_type amt = sizeof *p;
5215 p = ((struct elf_dyn_relocs *)
5216 bfd_zalloc (htab->root.dynobj, amt));
5229 /* RR: We probably want to keep a consistency check that
5230 there are no dangling GOT_PAGE relocs. */
5231 case R_AARCH64_LD64_GOT_LO12_NC:
5232 case R_AARCH64_GOT_LD_PREL19:
5233 case R_AARCH64_ADR_GOT_PAGE:
5234 case R_AARCH64_TLSGD_ADR_PAGE21:
5235 case R_AARCH64_TLSGD_ADD_LO12_NC:
5236 case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5237 case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5238 case R_AARCH64_TLSLE_ADD_TPREL_LO12:
5239 case R_AARCH64_TLSLE_ADD_TPREL_HI12:
5240 case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5241 case R_AARCH64_TLSLE_MOVW_TPREL_G2:
5242 case R_AARCH64_TLSLE_MOVW_TPREL_G1:
5243 case R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5244 case R_AARCH64_TLSLE_MOVW_TPREL_G0:
5245 case R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5246 case R_AARCH64_TLSDESC_ADR_PAGE:
5247 case R_AARCH64_TLSDESC_ADD_LO12_NC:
5248 case R_AARCH64_TLSDESC_LD64_LO12_NC:
5251 unsigned old_got_type;
5253 got_type = aarch64_reloc_got_type (r_type);
5257 h->got.refcount += 1;
5258 old_got_type = elf64_aarch64_hash_entry (h)->got_type;
5262 struct elf_aarch64_local_symbol *locals;
5264 if (!elf64_aarch64_allocate_local_symbols
5265 (abfd, symtab_hdr->sh_info))
5268 locals = elf64_aarch64_locals (abfd);
5269 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5270 locals[r_symndx].got_refcount += 1;
5271 old_got_type = locals[r_symndx].got_type;
5274 /* If a variable is accessed with both general dynamic TLS
5275 methods, two slots may be created. */
5276 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
5277 got_type |= old_got_type;
5279 /* We will already have issued an error message if there
5280 is a TLS/non-TLS mismatch, based on the symbol type.
5281 So just combine any TLS types needed. */
5282 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
5283 && got_type != GOT_NORMAL)
5284 got_type |= old_got_type;
5286 /* If the symbol is accessed by both IE and GD methods, we
5287 are able to relax. Turn off the GD flag, without
5288 messing up with any other kind of TLS types that may be
5290 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
5291 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
5293 if (old_got_type != got_type)
5296 elf64_aarch64_hash_entry (h)->got_type = got_type;
5299 struct elf_aarch64_local_symbol *locals;
5300 locals = elf64_aarch64_locals (abfd);
5301 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5302 locals[r_symndx].got_type = got_type;
5306 if (htab->root.sgot == NULL)
5308 if (htab->root.dynobj == NULL)
5309 htab->root.dynobj = abfd;
5310 if (!_bfd_elf_create_got_section (htab->root.dynobj, info))
5316 case R_AARCH64_ADR_PREL_PG_HI21_NC:
5317 case R_AARCH64_ADR_PREL_PG_HI21:
5318 case R_AARCH64_ADR_PREL_LO21:
5319 if (h != NULL && info->executable)
5321 /* If this reloc is in a read-only section, we might
5322 need a copy reloc. We can't check reliably at this
5323 stage whether the section is read-only, as input
5324 sections have not yet been mapped to output sections.
5325 Tentatively set the flag for now, and correct in
5326 adjust_dynamic_symbol. */
5328 h->plt.refcount += 1;
5329 h->pointer_equality_needed = 1;
5331 /* FIXME:: RR need to handle these in shared libraries
5332 and essentially bomb out as these being non-PIC
5333 relocations in shared libraries. */
5336 case R_AARCH64_CALL26:
5337 case R_AARCH64_JUMP26:
5338 /* If this is a local symbol then we resolve it
5339 directly without creating a PLT entry. */
5344 h->plt.refcount += 1;
5351 /* Treat mapping symbols as special target symbols. */
5354 elf64_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
5357 return bfd_is_aarch64_special_symbol_name (sym->name,
5358 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
5361 /* This is a copy of elf_find_function () from elf.c except that
5362 AArch64 mapping symbols are ignored when looking for function names. */
5365 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
5369 const char **filename_ptr,
5370 const char **functionname_ptr)
5372 const char *filename = NULL;
5373 asymbol *func = NULL;
5374 bfd_vma low_func = 0;
5377 for (p = symbols; *p != NULL; p++)
5381 q = (elf_symbol_type *) * p;
5383 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
5388 filename = bfd_asymbol_name (&q->symbol);
5392 /* Skip mapping symbols. */
5393 if ((q->symbol.flags & BSF_LOCAL)
5394 && (bfd_is_aarch64_special_symbol_name
5395 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
5398 if (bfd_get_section (&q->symbol) == section
5399 && q->symbol.value >= low_func && q->symbol.value <= offset)
5401 func = (asymbol *) q;
5402 low_func = q->symbol.value;
5412 *filename_ptr = filename;
5413 if (functionname_ptr)
5414 *functionname_ptr = bfd_asymbol_name (func);
5420 /* Find the nearest line to a particular section and offset, for error
5421 reporting. This code is a duplicate of the code in elf.c, except
5422 that it uses aarch64_elf_find_function. */
5425 elf64_aarch64_find_nearest_line (bfd *abfd,
5429 const char **filename_ptr,
5430 const char **functionname_ptr,
5431 unsigned int *line_ptr)
5433 bfd_boolean found = FALSE;
5435 /* We skip _bfd_dwarf1_find_nearest_line since no known AArch64
5436 toolchain uses it. */
5438 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
5439 section, symbols, offset,
5440 filename_ptr, functionname_ptr,
5442 &elf_tdata (abfd)->dwarf2_find_line_info))
5444 if (!*functionname_ptr)
5445 aarch64_elf_find_function (abfd, section, symbols, offset,
5446 *filename_ptr ? NULL : filename_ptr,
5452 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
5453 &found, filename_ptr,
5454 functionname_ptr, line_ptr,
5455 &elf_tdata (abfd)->line_info))
5458 if (found && (*functionname_ptr || *line_ptr))
5461 if (symbols == NULL)
5464 if (!aarch64_elf_find_function (abfd, section, symbols, offset,
5465 filename_ptr, functionname_ptr))
5473 elf64_aarch64_find_inliner_info (bfd *abfd,
5474 const char **filename_ptr,
5475 const char **functionname_ptr,
5476 unsigned int *line_ptr)
5479 found = _bfd_dwarf2_find_inliner_info
5480 (abfd, filename_ptr,
5481 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
5487 elf64_aarch64_post_process_headers (bfd *abfd,
5488 struct bfd_link_info *link_info
5491 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
5493 i_ehdrp = elf_elfheader (abfd);
5494 i_ehdrp->e_ident[EI_OSABI] = 0;
5495 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
5498 static enum elf_reloc_type_class
5499 elf64_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
5500 const asection *rel_sec ATTRIBUTE_UNUSED,
5501 const Elf_Internal_Rela *rela)
5503 switch ((int) ELF64_R_TYPE (rela->r_info))
5505 case R_AARCH64_RELATIVE:
5506 return reloc_class_relative;
5507 case R_AARCH64_JUMP_SLOT:
5508 return reloc_class_plt;
5509 case R_AARCH64_COPY:
5510 return reloc_class_copy;
5512 return reloc_class_normal;
5516 /* Set the right machine number for an AArch64 ELF file. */
5519 elf64_aarch64_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
5521 if (hdr->sh_type == SHT_NOTE)
5522 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
5527 /* Handle an AArch64 specific section when reading an object file. This is
5528 called when bfd_section_from_shdr finds a section with an unknown
5532 elf64_aarch64_section_from_shdr (bfd *abfd,
5533 Elf_Internal_Shdr *hdr,
5534 const char *name, int shindex)
5536 /* There ought to be a place to keep ELF backend specific flags, but
5537 at the moment there isn't one. We just keep track of the
5538 sections by their name, instead. Fortunately, the ABI gives
5539 names for all the AArch64 specific sections, so we will probably get
5541 switch (hdr->sh_type)
5543 case SHT_AARCH64_ATTRIBUTES:
5550 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5556 /* A structure used to record a list of sections, independently
5557 of the next and prev fields in the asection structure. */
5558 typedef struct section_list
5561 struct section_list *next;
5562 struct section_list *prev;
5566 /* Unfortunately we need to keep a list of sections for which
5567 an _aarch64_elf_section_data structure has been allocated. This
5568 is because it is possible for functions like elf64_aarch64_write_section
5569 to be called on a section which has had an elf_data_structure
5570 allocated for it (and so the used_by_bfd field is valid) but
5571 for which the AArch64 extended version of this structure - the
5572 _aarch64_elf_section_data structure - has not been allocated. */
5573 static section_list *sections_with_aarch64_elf_section_data = NULL;
5576 record_section_with_aarch64_elf_section_data (asection *sec)
5578 struct section_list *entry;
5580 entry = bfd_malloc (sizeof (*entry));
5584 entry->next = sections_with_aarch64_elf_section_data;
5586 if (entry->next != NULL)
5587 entry->next->prev = entry;
5588 sections_with_aarch64_elf_section_data = entry;
5591 static struct section_list *
5592 find_aarch64_elf_section_entry (asection *sec)
5594 struct section_list *entry;
5595 static struct section_list *last_entry = NULL;
5597 /* This is a short cut for the typical case where the sections are added
5598 to the sections_with_aarch64_elf_section_data list in forward order and
5599 then looked up here in backwards order. This makes a real difference
5600 to the ld-srec/sec64k.exp linker test. */
5601 entry = sections_with_aarch64_elf_section_data;
5602 if (last_entry != NULL)
5604 if (last_entry->sec == sec)
5606 else if (last_entry->next != NULL && last_entry->next->sec == sec)
5607 entry = last_entry->next;
5610 for (; entry; entry = entry->next)
5611 if (entry->sec == sec)
5615 /* Record the entry prior to this one - it is the entry we are
5616 most likely to want to locate next time. Also this way if we
5617 have been called from
5618 unrecord_section_with_aarch64_elf_section_data () we will not
5619 be caching a pointer that is about to be freed. */
5620 last_entry = entry->prev;
5626 unrecord_section_with_aarch64_elf_section_data (asection *sec)
5628 struct section_list *entry;
5630 entry = find_aarch64_elf_section_entry (sec);
5634 if (entry->prev != NULL)
5635 entry->prev->next = entry->next;
5636 if (entry->next != NULL)
5637 entry->next->prev = entry->prev;
5638 if (entry == sections_with_aarch64_elf_section_data)
5639 sections_with_aarch64_elf_section_data = entry->next;
5648 struct bfd_link_info *info;
5651 int (*func) (void *, const char *, Elf_Internal_Sym *,
5652 asection *, struct elf_link_hash_entry *);
5653 } output_arch_syminfo;
5655 enum map_symbol_type
5662 /* Output a single mapping symbol. */
5665 elf64_aarch64_output_map_sym (output_arch_syminfo *osi,
5666 enum map_symbol_type type, bfd_vma offset)
5668 static const char *names[2] = { "$x", "$d" };
5669 Elf_Internal_Sym sym;
5671 sym.st_value = (osi->sec->output_section->vma
5672 + osi->sec->output_offset + offset);
5675 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5676 sym.st_shndx = osi->sec_shndx;
5677 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
5682 /* Output mapping symbols for PLT entries associated with H. */
5685 elf64_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
5687 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
5690 if (h->root.type == bfd_link_hash_indirect)
5693 if (h->root.type == bfd_link_hash_warning)
5694 /* When warning symbols are created, they **replace** the "real"
5695 entry in the hash table, thus we never get to see the real
5696 symbol in a hash traversal. So look at it now. */
5697 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5699 if (h->plt.offset == (bfd_vma) - 1)
5702 addr = h->plt.offset;
5705 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5712 /* Output a single local symbol for a generated stub. */
5715 elf64_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
5716 bfd_vma offset, bfd_vma size)
5718 Elf_Internal_Sym sym;
5720 sym.st_value = (osi->sec->output_section->vma
5721 + osi->sec->output_offset + offset);
5724 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5725 sym.st_shndx = osi->sec_shndx;
5726 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
5730 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
5732 struct elf64_aarch64_stub_hash_entry *stub_entry;
5736 output_arch_syminfo *osi;
5738 /* Massage our args to the form they really have. */
5739 stub_entry = (struct elf64_aarch64_stub_hash_entry *) gen_entry;
5740 osi = (output_arch_syminfo *) in_arg;
5742 stub_sec = stub_entry->stub_sec;
5744 /* Ensure this stub is attached to the current section being
5746 if (stub_sec != osi->sec)
5749 addr = (bfd_vma) stub_entry->stub_offset;
5751 stub_name = stub_entry->output_name;
5753 switch (stub_entry->stub_type)
5755 case aarch64_stub_adrp_branch:
5756 if (!elf64_aarch64_output_stub_sym (osi, stub_name, addr,
5757 sizeof (aarch64_adrp_branch_stub)))
5759 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5762 case aarch64_stub_long_branch:
5763 if (!elf64_aarch64_output_stub_sym
5764 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
5766 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
5768 if (!elf64_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
5778 /* Output mapping symbols for linker generated sections. */
5781 elf64_aarch64_output_arch_local_syms (bfd *output_bfd,
5782 struct bfd_link_info *info,
5784 int (*func) (void *, const char *,
5787 struct elf_link_hash_entry
5790 output_arch_syminfo osi;
5791 struct elf64_aarch64_link_hash_table *htab;
5793 htab = elf64_aarch64_hash_table (info);
5799 /* Long calls stubs. */
5800 if (htab->stub_bfd && htab->stub_bfd->sections)
5804 for (stub_sec = htab->stub_bfd->sections;
5805 stub_sec != NULL; stub_sec = stub_sec->next)
5807 /* Ignore non-stub sections. */
5808 if (!strstr (stub_sec->name, STUB_SUFFIX))
5813 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5814 (output_bfd, osi.sec->output_section);
5816 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
5821 /* Finally, output mapping symbols for the PLT. */
5822 if (!htab->root.splt || htab->root.splt->size == 0)
5825 /* For now live without mapping symbols for the plt. */
5826 osi.sec_shndx = _bfd_elf_section_from_bfd_section
5827 (output_bfd, htab->root.splt->output_section);
5828 osi.sec = htab->root.splt;
5830 elf_link_hash_traverse (&htab->root, elf64_aarch64_output_plt_map,
5837 /* Allocate target specific section data. */
5840 elf64_aarch64_new_section_hook (bfd *abfd, asection *sec)
5842 if (!sec->used_by_bfd)
5844 _aarch64_elf_section_data *sdata;
5845 bfd_size_type amt = sizeof (*sdata);
5847 sdata = bfd_zalloc (abfd, amt);
5850 sec->used_by_bfd = sdata;
5853 record_section_with_aarch64_elf_section_data (sec);
5855 return _bfd_elf_new_section_hook (abfd, sec);
5860 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
5862 void *ignore ATTRIBUTE_UNUSED)
5864 unrecord_section_with_aarch64_elf_section_data (sec);
5868 elf64_aarch64_close_and_cleanup (bfd *abfd)
5871 bfd_map_over_sections (abfd,
5872 unrecord_section_via_map_over_sections, NULL);
5874 return _bfd_elf_close_and_cleanup (abfd);
5878 elf64_aarch64_bfd_free_cached_info (bfd *abfd)
5881 bfd_map_over_sections (abfd,
5882 unrecord_section_via_map_over_sections, NULL);
5884 return _bfd_free_cached_info (abfd);
5888 elf64_aarch64_is_function_type (unsigned int type)
5890 return type == STT_FUNC;
5893 /* Create dynamic sections. This is different from the ARM backend in that
5894 the got, plt, gotplt and their relocation sections are all created in the
5895 standard part of the bfd elf backend. */
5898 elf64_aarch64_create_dynamic_sections (bfd *dynobj,
5899 struct bfd_link_info *info)
5901 struct elf64_aarch64_link_hash_table *htab;
5902 struct elf_link_hash_entry *h;
5904 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
5907 htab = elf64_aarch64_hash_table (info);
5908 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
5910 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
5912 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
5915 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the
5916 dynobj's .got section. We don't do this in the linker script
5917 because we don't want to define the symbol if we are not creating
5918 a global offset table. */
5919 h = _bfd_elf_define_linkage_sym (dynobj, info,
5920 htab->root.sgot, "_GLOBAL_OFFSET_TABLE_");
5921 elf_hash_table (info)->hgot = h;
5929 /* Allocate space in .plt, .got and associated reloc sections for
5933 elf64_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
5935 struct bfd_link_info *info;
5936 struct elf64_aarch64_link_hash_table *htab;
5937 struct elf64_aarch64_link_hash_entry *eh;
5938 struct elf_dyn_relocs *p;
5940 /* An example of a bfd_link_hash_indirect symbol is versioned
5941 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
5942 -> __gxx_personality_v0(bfd_link_hash_defined)
5944 There is no need to process bfd_link_hash_indirect symbols here
5945 because we will also be presented with the concrete instance of
5946 the symbol and elf64_aarch64_copy_indirect_symbol () will have been
5947 called to copy all relevant data from the generic to the concrete
5950 if (h->root.type == bfd_link_hash_indirect)
5953 if (h->root.type == bfd_link_hash_warning)
5954 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5956 info = (struct bfd_link_info *) inf;
5957 htab = elf64_aarch64_hash_table (info);
5959 if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
5961 /* Make sure this symbol is output as a dynamic symbol.
5962 Undefined weak syms won't yet be marked as dynamic. */
5963 if (h->dynindx == -1 && !h->forced_local)
5965 if (!bfd_elf_link_record_dynamic_symbol (info, h))
5969 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
5971 asection *s = htab->root.splt;
5973 /* If this is the first .plt entry, make room for the special
5976 s->size += htab->plt_header_size;
5978 h->plt.offset = s->size;
5980 /* If this symbol is not defined in a regular file, and we are
5981 not generating a shared library, then set the symbol to this
5982 location in the .plt. This is required to make function
5983 pointers compare as equal between the normal executable and
5984 the shared library. */
5985 if (!info->shared && !h->def_regular)
5987 h->root.u.def.section = s;
5988 h->root.u.def.value = h->plt.offset;
5991 /* Make room for this entry. For now we only create the
5992 small model PLT entries. We later need to find a way
5993 of relaxing into these from the large model PLT entries. */
5994 s->size += PLT_SMALL_ENTRY_SIZE;
5996 /* We also need to make an entry in the .got.plt section, which
5997 will be placed in the .got section by the linker script. */
5998 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
6000 /* We also need to make an entry in the .rela.plt section. */
6001 htab->root.srelplt->size += RELOC_SIZE (htab);
6003 /* We need to ensure that all GOT entries that serve the PLT
6004 are consecutive with the special GOT slots [0] [1] and
6005 [2]. Any addtional relocations, such as
6006 R_AARCH64_TLSDESC, must be placed after the PLT related
6007 entries. We abuse the reloc_count such that during
6008 sizing we adjust reloc_count to indicate the number of
6009 PLT related reserved entries. In subsequent phases when
6010 filling in the contents of the reloc entries, PLT related
6011 entries are placed by computing their PLT index (0
6012 .. reloc_count). While other none PLT relocs are placed
6013 at the slot indicated by reloc_count and reloc_count is
6016 htab->root.srelplt->reloc_count++;
6020 h->plt.offset = (bfd_vma) - 1;
6026 h->plt.offset = (bfd_vma) - 1;
6030 eh = (struct elf64_aarch64_link_hash_entry *) h;
6031 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6033 if (h->got.refcount > 0)
6036 unsigned got_type = elf64_aarch64_hash_entry (h)->got_type;
6038 h->got.offset = (bfd_vma) - 1;
6040 dyn = htab->root.dynamic_sections_created;
6042 /* Make sure this symbol is output as a dynamic symbol.
6043 Undefined weak syms won't yet be marked as dynamic. */
6044 if (dyn && h->dynindx == -1 && !h->forced_local)
6046 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6050 if (got_type == GOT_UNKNOWN)
6053 else if (got_type == GOT_NORMAL)
6055 h->got.offset = htab->root.sgot->size;
6056 htab->root.sgot->size += GOT_ENTRY_SIZE;
6057 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6058 || h->root.type != bfd_link_hash_undefweak)
6060 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6062 htab->root.srelgot->size += RELOC_SIZE (htab);
6068 if (got_type & GOT_TLSDESC_GD)
6070 eh->tlsdesc_got_jump_table_offset =
6071 (htab->root.sgotplt->size
6072 - aarch64_compute_jump_table_size (htab));
6073 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6074 h->got.offset = (bfd_vma) - 2;
6077 if (got_type & GOT_TLS_GD)
6079 h->got.offset = htab->root.sgot->size;
6080 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6083 if (got_type & GOT_TLS_IE)
6085 h->got.offset = htab->root.sgot->size;
6086 htab->root.sgot->size += GOT_ENTRY_SIZE;
6089 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6090 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6091 || h->root.type != bfd_link_hash_undefweak)
6094 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6096 if (got_type & GOT_TLSDESC_GD)
6098 htab->root.srelplt->size += RELOC_SIZE (htab);
6099 /* Note reloc_count not incremented here! We have
6100 already adjusted reloc_count for this relocation
6103 /* TLSDESC PLT is now needed, but not yet determined. */
6104 htab->tlsdesc_plt = (bfd_vma) - 1;
6107 if (got_type & GOT_TLS_GD)
6108 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6110 if (got_type & GOT_TLS_IE)
6111 htab->root.srelgot->size += RELOC_SIZE (htab);
6117 h->got.offset = (bfd_vma) - 1;
6120 if (eh->dyn_relocs == NULL)
6123 /* In the shared -Bsymbolic case, discard space allocated for
6124 dynamic pc-relative relocs against symbols which turn out to be
6125 defined in regular objects. For the normal shared case, discard
6126 space for pc-relative relocs that have become local due to symbol
6127 visibility changes. */
6131 /* Relocs that use pc_count are those that appear on a call
6132 insn, or certain REL relocs that can generated via assembly.
6133 We want calls to protected symbols to resolve directly to the
6134 function rather than going via the plt. If people want
6135 function pointer comparisons to work as expected then they
6136 should avoid writing weird assembly. */
6137 if (SYMBOL_CALLS_LOCAL (info, h))
6139 struct elf_dyn_relocs **pp;
6141 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6143 p->count -= p->pc_count;
6152 /* Also discard relocs on undefined weak syms with non-default
6154 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6156 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6157 eh->dyn_relocs = NULL;
6159 /* Make sure undefined weak symbols are output as a dynamic
6161 else if (h->dynindx == -1
6163 && !bfd_elf_link_record_dynamic_symbol (info, h))
6168 else if (ELIMINATE_COPY_RELOCS)
6170 /* For the non-shared case, discard space for relocs against
6171 symbols which turn out to need copy relocs or are not
6177 || (htab->root.dynamic_sections_created
6178 && (h->root.type == bfd_link_hash_undefweak
6179 || h->root.type == bfd_link_hash_undefined))))
6181 /* Make sure this symbol is output as a dynamic symbol.
6182 Undefined weak syms won't yet be marked as dynamic. */
6183 if (h->dynindx == -1
6185 && !bfd_elf_link_record_dynamic_symbol (info, h))
6188 /* If that succeeded, we know we'll be keeping all the
6190 if (h->dynindx != -1)
6194 eh->dyn_relocs = NULL;
6199 /* Finally, allocate space. */
6200 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6204 sreloc = elf_section_data (p->sec)->sreloc;
6206 BFD_ASSERT (sreloc != NULL);
6208 sreloc->size += p->count * RELOC_SIZE (htab);
6217 /* This is the most important function of all . Innocuosly named
6220 elf64_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
6221 struct bfd_link_info *info)
6223 struct elf64_aarch64_link_hash_table *htab;
6229 htab = elf64_aarch64_hash_table ((info));
6230 dynobj = htab->root.dynobj;
6232 BFD_ASSERT (dynobj != NULL);
6234 if (htab->root.dynamic_sections_created)
6236 if (info->executable)
6238 s = bfd_get_linker_section (dynobj, ".interp");
6241 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
6242 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
6246 /* Set up .got offsets for local syms, and space for local dynamic
6248 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
6250 struct elf_aarch64_local_symbol *locals = NULL;
6251 Elf_Internal_Shdr *symtab_hdr;
6255 if (!is_aarch64_elf (ibfd))
6258 for (s = ibfd->sections; s != NULL; s = s->next)
6260 struct elf_dyn_relocs *p;
6262 for (p = (struct elf_dyn_relocs *)
6263 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
6265 if (!bfd_is_abs_section (p->sec)
6266 && bfd_is_abs_section (p->sec->output_section))
6268 /* Input section has been discarded, either because
6269 it is a copy of a linkonce section or due to
6270 linker script /DISCARD/, so we'll be discarding
6273 else if (p->count != 0)
6275 srel = elf_section_data (p->sec)->sreloc;
6276 srel->size += p->count * RELOC_SIZE (htab);
6277 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
6278 info->flags |= DF_TEXTREL;
6283 locals = elf64_aarch64_locals (ibfd);
6287 symtab_hdr = &elf_symtab_hdr (ibfd);
6288 srel = htab->root.srelgot;
6289 for (i = 0; i < symtab_hdr->sh_info; i++)
6291 locals[i].got_offset = (bfd_vma) - 1;
6292 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6293 if (locals[i].got_refcount > 0)
6295 unsigned got_type = locals[i].got_type;
6296 if (got_type & GOT_TLSDESC_GD)
6298 locals[i].tlsdesc_got_jump_table_offset =
6299 (htab->root.sgotplt->size
6300 - aarch64_compute_jump_table_size (htab));
6301 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6302 locals[i].got_offset = (bfd_vma) - 2;
6305 if (got_type & GOT_TLS_GD)
6307 locals[i].got_offset = htab->root.sgot->size;
6308 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6311 if (got_type & GOT_TLS_IE)
6313 locals[i].got_offset = htab->root.sgot->size;
6314 htab->root.sgot->size += GOT_ENTRY_SIZE;
6317 if (got_type == GOT_UNKNOWN)
6321 if (got_type == GOT_NORMAL)
6327 if (got_type & GOT_TLSDESC_GD)
6329 htab->root.srelplt->size += RELOC_SIZE (htab);
6330 /* Note RELOC_COUNT not incremented here! */
6331 htab->tlsdesc_plt = (bfd_vma) - 1;
6334 if (got_type & GOT_TLS_GD)
6335 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6337 if (got_type & GOT_TLS_IE)
6338 htab->root.srelgot->size += RELOC_SIZE (htab);
6343 locals[i].got_refcount = (bfd_vma) - 1;
6349 /* Allocate global sym .plt and .got entries, and space for global
6350 sym dynamic relocs. */
6351 elf_link_hash_traverse (&htab->root, elf64_aarch64_allocate_dynrelocs,
6355 /* For every jump slot reserved in the sgotplt, reloc_count is
6356 incremented. However, when we reserve space for TLS descriptors,
6357 it's not incremented, so in order to compute the space reserved
6358 for them, it suffices to multiply the reloc count by the jump
6361 if (htab->root.srelplt)
6362 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
6364 if (htab->tlsdesc_plt)
6366 if (htab->root.splt->size == 0)
6367 htab->root.splt->size += PLT_ENTRY_SIZE;
6369 htab->tlsdesc_plt = htab->root.splt->size;
6370 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
6372 /* If we're not using lazy TLS relocations, don't generate the
6373 GOT entry required. */
6374 if (!(info->flags & DF_BIND_NOW))
6376 htab->dt_tlsdesc_got = htab->root.sgot->size;
6377 htab->root.sgot->size += GOT_ENTRY_SIZE;
6381 /* We now have determined the sizes of the various dynamic sections.
6382 Allocate memory for them. */
6384 for (s = dynobj->sections; s != NULL; s = s->next)
6386 if ((s->flags & SEC_LINKER_CREATED) == 0)
6389 if (s == htab->root.splt
6390 || s == htab->root.sgot
6391 || s == htab->root.sgotplt
6392 || s == htab->root.iplt
6393 || s == htab->root.igotplt || s == htab->sdynbss)
6395 /* Strip this section if we don't need it; see the
6398 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
6400 if (s->size != 0 && s != htab->root.srelplt)
6403 /* We use the reloc_count field as a counter if we need
6404 to copy relocs into the output file. */
6405 if (s != htab->root.srelplt)
6410 /* It's not one of our sections, so don't allocate space. */
6416 /* If we don't need this section, strip it from the
6417 output file. This is mostly to handle .rela.bss and
6418 .rela.plt. We must create both sections in
6419 create_dynamic_sections, because they must be created
6420 before the linker maps input sections to output
6421 sections. The linker does that before
6422 adjust_dynamic_symbol is called, and it is that
6423 function which decides whether anything needs to go
6424 into these sections. */
6426 s->flags |= SEC_EXCLUDE;
6430 if ((s->flags & SEC_HAS_CONTENTS) == 0)
6433 /* Allocate memory for the section contents. We use bfd_zalloc
6434 here in case unused entries are not reclaimed before the
6435 section's contents are written out. This should not happen,
6436 but this way if it does, we get a R_AARCH64_NONE reloc instead
6438 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
6439 if (s->contents == NULL)
6443 if (htab->root.dynamic_sections_created)
6445 /* Add some entries to the .dynamic section. We fill in the
6446 values later, in elf64_aarch64_finish_dynamic_sections, but we
6447 must add the entries now so that we get the correct size for
6448 the .dynamic section. The DT_DEBUG entry is filled in by the
6449 dynamic linker and used by the debugger. */
6450 #define add_dynamic_entry(TAG, VAL) \
6451 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
6453 if (info->executable)
6455 if (!add_dynamic_entry (DT_DEBUG, 0))
6459 if (htab->root.splt->size != 0)
6461 if (!add_dynamic_entry (DT_PLTGOT, 0)
6462 || !add_dynamic_entry (DT_PLTRELSZ, 0)
6463 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
6464 || !add_dynamic_entry (DT_JMPREL, 0))
6467 if (htab->tlsdesc_plt
6468 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
6469 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
6475 if (!add_dynamic_entry (DT_RELA, 0)
6476 || !add_dynamic_entry (DT_RELASZ, 0)
6477 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
6480 /* If any dynamic relocs apply to a read-only section,
6481 then we need a DT_TEXTREL entry. */
6482 if ((info->flags & DF_TEXTREL) != 0)
6484 if (!add_dynamic_entry (DT_TEXTREL, 0))
6489 #undef add_dynamic_entry
6497 elf64_aarch64_update_plt_entry (bfd *output_bfd,
6498 unsigned int r_type,
6499 bfd_byte *plt_entry, bfd_vma value)
6501 reloc_howto_type *howto;
6502 howto = elf64_aarch64_howto_from_type (r_type);
6503 bfd_elf_aarch64_put_addend (output_bfd, plt_entry, howto, value);
6507 elf64_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
6508 struct elf64_aarch64_link_hash_table
6509 *htab, bfd *output_bfd)
6511 bfd_byte *plt_entry;
6514 bfd_vma gotplt_entry_address;
6515 bfd_vma plt_entry_address;
6516 Elf_Internal_Rela rela;
6519 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
6521 /* Offset in the GOT is PLT index plus got GOT headers(3)
6523 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
6524 plt_entry = htab->root.splt->contents + h->plt.offset;
6525 plt_entry_address = htab->root.splt->output_section->vma
6526 + htab->root.splt->output_section->output_offset + h->plt.offset;
6527 gotplt_entry_address = htab->root.sgotplt->output_section->vma +
6528 htab->root.sgotplt->output_offset + got_offset;
6530 /* Copy in the boiler-plate for the PLTn entry. */
6531 memcpy (plt_entry, elf64_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
6533 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6534 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6535 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6537 PG (gotplt_entry_address) -
6538 PG (plt_entry_address));
6540 /* Fill in the lo12 bits for the load from the pltgot. */
6541 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6543 PG_OFFSET (gotplt_entry_address));
6545 /* Fill in the the lo12 bits for the add from the pltgot entry. */
6546 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6548 PG_OFFSET (gotplt_entry_address));
6550 /* All the GOTPLT Entries are essentially initialized to PLT0. */
6551 bfd_put_64 (output_bfd,
6552 (htab->root.splt->output_section->vma
6553 + htab->root.splt->output_offset),
6554 htab->root.sgotplt->contents + got_offset);
6556 /* Fill in the entry in the .rela.plt section. */
6557 rela.r_offset = gotplt_entry_address;
6558 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_JUMP_SLOT);
6561 /* Compute the relocation entry to used based on PLT index and do
6562 not adjust reloc_count. The reloc_count has already been adjusted
6563 to account for this entry. */
6564 loc = htab->root.srelplt->contents + plt_index * RELOC_SIZE (htab);
6565 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6568 /* Size sections even though they're not dynamic. We use it to setup
6569 _TLS_MODULE_BASE_, if needed. */
6572 elf64_aarch64_always_size_sections (bfd *output_bfd,
6573 struct bfd_link_info *info)
6577 if (info->relocatable)
6580 tls_sec = elf_hash_table (info)->tls_sec;
6584 struct elf_link_hash_entry *tlsbase;
6586 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
6587 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
6591 struct bfd_link_hash_entry *h = NULL;
6592 const struct elf_backend_data *bed =
6593 get_elf_backend_data (output_bfd);
6595 if (!(_bfd_generic_link_add_one_symbol
6596 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
6597 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
6600 tlsbase->type = STT_TLS;
6601 tlsbase = (struct elf_link_hash_entry *) h;
6602 tlsbase->def_regular = 1;
6603 tlsbase->other = STV_HIDDEN;
6604 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
6611 /* Finish up dynamic symbol handling. We set the contents of various
6612 dynamic sections here. */
6614 elf64_aarch64_finish_dynamic_symbol (bfd *output_bfd,
6615 struct bfd_link_info *info,
6616 struct elf_link_hash_entry *h,
6617 Elf_Internal_Sym *sym)
6619 struct elf64_aarch64_link_hash_table *htab;
6620 htab = elf64_aarch64_hash_table (info);
6622 if (h->plt.offset != (bfd_vma) - 1)
6624 /* This symbol has an entry in the procedure linkage table. Set
6627 if (h->dynindx == -1
6628 || htab->root.splt == NULL
6629 || htab->root.sgotplt == NULL || htab->root.srelplt == NULL)
6632 elf64_aarch64_create_small_pltn_entry (h, htab, output_bfd);
6633 if (!h->def_regular)
6635 /* Mark the symbol as undefined, rather than as defined in
6636 the .plt section. Leave the value alone. This is a clue
6637 for the dynamic linker, to make function pointer
6638 comparisons work between an application and shared
6640 sym->st_shndx = SHN_UNDEF;
6644 if (h->got.offset != (bfd_vma) - 1
6645 && elf64_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
6647 Elf_Internal_Rela rela;
6650 /* This symbol has an entry in the global offset table. Set it
6652 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
6655 rela.r_offset = (htab->root.sgot->output_section->vma
6656 + htab->root.sgot->output_offset
6657 + (h->got.offset & ~(bfd_vma) 1));
6659 if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
6661 if (!h->def_regular)
6664 BFD_ASSERT ((h->got.offset & 1) != 0);
6665 rela.r_info = ELF64_R_INFO (0, R_AARCH64_RELATIVE);
6666 rela.r_addend = (h->root.u.def.value
6667 + h->root.u.def.section->output_section->vma
6668 + h->root.u.def.section->output_offset);
6672 BFD_ASSERT ((h->got.offset & 1) == 0);
6673 bfd_put_64 (output_bfd, (bfd_vma) 0,
6674 htab->root.sgot->contents + h->got.offset);
6675 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_GLOB_DAT);
6679 loc = htab->root.srelgot->contents;
6680 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
6681 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6686 Elf_Internal_Rela rela;
6689 /* This symbol needs a copy reloc. Set it up. */
6691 if (h->dynindx == -1
6692 || (h->root.type != bfd_link_hash_defined
6693 && h->root.type != bfd_link_hash_defweak)
6694 || htab->srelbss == NULL)
6697 rela.r_offset = (h->root.u.def.value
6698 + h->root.u.def.section->output_section->vma
6699 + h->root.u.def.section->output_offset);
6700 rela.r_info = ELF64_R_INFO (h->dynindx, R_AARCH64_COPY);
6702 loc = htab->srelbss->contents;
6703 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
6704 bfd_elf64_swap_reloca_out (output_bfd, &rela, loc);
6707 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
6708 be NULL for local symbols. */
6710 && (h == elf_hash_table (info)->hdynamic
6711 || h == elf_hash_table (info)->hgot))
6712 sym->st_shndx = SHN_ABS;
6718 elf64_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
6719 struct elf64_aarch64_link_hash_table
6722 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
6723 small and large plts and at the minute just generates
6726 /* PLT0 of the small PLT looks like this -
6727 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
6728 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
6729 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
6731 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
6732 // GOTPLT entry for this.
6735 bfd_vma plt_got_base;
6739 memcpy (htab->root.splt->contents, elf64_aarch64_small_plt0_entry,
6741 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
6744 plt_got_base = (htab->root.sgotplt->output_section->vma
6745 + htab->root.sgotplt->output_offset);
6747 plt_base = htab->root.splt->output_section->vma +
6748 htab->root.splt->output_section->output_offset;
6750 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
6751 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
6752 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADR_PREL_PG_HI21,
6753 htab->root.splt->contents + 4,
6754 PG (plt_got_base + 16) - PG (plt_base + 4));
6756 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_LDST64_ABS_LO12_NC,
6757 htab->root.splt->contents + 8,
6758 PG_OFFSET (plt_got_base + 16));
6760 elf64_aarch64_update_plt_entry (output_bfd, R_AARCH64_ADD_ABS_LO12_NC,
6761 htab->root.splt->contents + 12,
6762 PG_OFFSET (plt_got_base + 16));
6766 elf64_aarch64_finish_dynamic_sections (bfd *output_bfd,
6767 struct bfd_link_info *info)
6769 struct elf64_aarch64_link_hash_table *htab;
6773 htab = elf64_aarch64_hash_table (info);
6774 dynobj = htab->root.dynobj;
6775 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
6777 if (htab->root.dynamic_sections_created)
6779 Elf64_External_Dyn *dyncon, *dynconend;
6781 if (sdyn == NULL || htab->root.sgot == NULL)
6784 dyncon = (Elf64_External_Dyn *) sdyn->contents;
6785 dynconend = (Elf64_External_Dyn *) (sdyn->contents + sdyn->size);
6786 for (; dyncon < dynconend; dyncon++)
6788 Elf_Internal_Dyn dyn;
6791 bfd_elf64_swap_dyn_in (dynobj, dyncon, &dyn);
6799 s = htab->root.sgotplt;
6800 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
6804 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
6808 s = htab->root.srelplt->output_section;
6809 dyn.d_un.d_val = s->size;
6813 /* The procedure linkage table relocs (DT_JMPREL) should
6814 not be included in the overall relocs (DT_RELA).
6815 Therefore, we override the DT_RELASZ entry here to
6816 make it not include the JMPREL relocs. Since the
6817 linker script arranges for .rela.plt to follow all
6818 other relocation sections, we don't have to worry
6819 about changing the DT_RELA entry. */
6820 if (htab->root.srelplt != NULL)
6822 s = htab->root.srelplt->output_section;
6823 dyn.d_un.d_val -= s->size;
6827 case DT_TLSDESC_PLT:
6828 s = htab->root.splt;
6829 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6830 + htab->tlsdesc_plt;
6833 case DT_TLSDESC_GOT:
6834 s = htab->root.sgot;
6835 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
6836 + htab->dt_tlsdesc_got;
6840 bfd_elf64_swap_dyn_out (output_bfd, &dyn, dyncon);
6845 /* Fill in the special first entry in the procedure linkage table. */
6846 if (htab->root.splt && htab->root.splt->size > 0)
6848 elf64_aarch64_init_small_plt0_entry (output_bfd, htab);
6850 elf_section_data (htab->root.splt->output_section)->
6851 this_hdr.sh_entsize = htab->plt_entry_size;
6854 if (htab->tlsdesc_plt)
6856 bfd_put_64 (output_bfd, (bfd_vma) 0,
6857 htab->root.sgot->contents + htab->dt_tlsdesc_got);
6859 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
6860 elf64_aarch64_tlsdesc_small_plt_entry,
6861 sizeof (elf64_aarch64_tlsdesc_small_plt_entry));
6864 bfd_vma adrp1_addr =
6865 htab->root.splt->output_section->vma
6866 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
6868 bfd_vma adrp2_addr =
6869 htab->root.splt->output_section->vma
6870 + htab->root.splt->output_offset + htab->tlsdesc_plt + 8;
6873 htab->root.sgot->output_section->vma
6874 + htab->root.sgot->output_offset;
6876 bfd_vma pltgot_addr =
6877 htab->root.sgotplt->output_section->vma
6878 + htab->root.sgotplt->output_offset;
6880 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
6883 /* adrp x2, DT_TLSDESC_GOT */
6884 opcode = bfd_get_32 (output_bfd,
6885 htab->root.splt->contents
6886 + htab->tlsdesc_plt + 4);
6887 opcode = reencode_adr_imm
6888 (opcode, (PG (dt_tlsdesc_got) - PG (adrp1_addr)) >> 12);
6889 bfd_put_32 (output_bfd, opcode,
6890 htab->root.splt->contents + htab->tlsdesc_plt + 4);
6893 opcode = bfd_get_32 (output_bfd,
6894 htab->root.splt->contents
6895 + htab->tlsdesc_plt + 8);
6896 opcode = reencode_adr_imm
6897 (opcode, (PG (pltgot_addr) - PG (adrp2_addr)) >> 12);
6898 bfd_put_32 (output_bfd, opcode,
6899 htab->root.splt->contents + htab->tlsdesc_plt + 8);
6901 /* ldr x2, [x2, #0] */
6902 opcode = bfd_get_32 (output_bfd,
6903 htab->root.splt->contents
6904 + htab->tlsdesc_plt + 12);
6905 opcode = reencode_ldst_pos_imm (opcode,
6906 PG_OFFSET (dt_tlsdesc_got) >> 3);
6907 bfd_put_32 (output_bfd, opcode,
6908 htab->root.splt->contents + htab->tlsdesc_plt + 12);
6911 opcode = bfd_get_32 (output_bfd,
6912 htab->root.splt->contents
6913 + htab->tlsdesc_plt + 16);
6914 opcode = reencode_add_imm (opcode, PG_OFFSET (pltgot_addr));
6915 bfd_put_32 (output_bfd, opcode,
6916 htab->root.splt->contents + htab->tlsdesc_plt + 16);
6921 if (htab->root.sgotplt)
6923 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
6925 (*_bfd_error_handler)
6926 (_("discarded output section: `%A'"), htab->root.sgotplt);
6930 /* Fill in the first three entries in the global offset table. */
6931 if (htab->root.sgotplt->size > 0)
6933 /* Set the first entry in the global offset table to the address of
6934 the dynamic section. */
6936 bfd_put_64 (output_bfd, (bfd_vma) 0,
6937 htab->root.sgotplt->contents);
6939 bfd_put_64 (output_bfd,
6940 sdyn->output_section->vma + sdyn->output_offset,
6941 htab->root.sgotplt->contents);
6942 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
6943 bfd_put_64 (output_bfd,
6945 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
6946 bfd_put_64 (output_bfd,
6948 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
6951 elf_section_data (htab->root.sgotplt->output_section)->
6952 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
6955 if (htab->root.sgot && htab->root.sgot->size > 0)
6956 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
6962 /* Return address for Ith PLT stub in section PLT, for relocation REL
6963 or (bfd_vma) -1 if it should not be included. */
6966 elf64_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
6967 const arelent *rel ATTRIBUTE_UNUSED)
6969 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
6973 /* We use this so we can override certain functions
6974 (though currently we don't). */
6976 const struct elf_size_info elf64_aarch64_size_info =
6978 sizeof (Elf64_External_Ehdr),
6979 sizeof (Elf64_External_Phdr),
6980 sizeof (Elf64_External_Shdr),
6981 sizeof (Elf64_External_Rel),
6982 sizeof (Elf64_External_Rela),
6983 sizeof (Elf64_External_Sym),
6984 sizeof (Elf64_External_Dyn),
6985 sizeof (Elf_External_Note),
6986 4, /* Hash table entry size. */
6987 1, /* Internal relocs per external relocs. */
6988 64, /* Arch size. */
6989 3, /* Log_file_align. */
6990 ELFCLASS64, EV_CURRENT,
6991 bfd_elf64_write_out_phdrs,
6992 bfd_elf64_write_shdrs_and_ehdr,
6993 bfd_elf64_checksum_contents,
6994 bfd_elf64_write_relocs,
6995 bfd_elf64_swap_symbol_in,
6996 bfd_elf64_swap_symbol_out,
6997 bfd_elf64_slurp_reloc_table,
6998 bfd_elf64_slurp_symbol_table,
6999 bfd_elf64_swap_dyn_in,
7000 bfd_elf64_swap_dyn_out,
7001 bfd_elf64_swap_reloc_in,
7002 bfd_elf64_swap_reloc_out,
7003 bfd_elf64_swap_reloca_in,
7004 bfd_elf64_swap_reloca_out
7007 #define ELF_ARCH bfd_arch_aarch64
7008 #define ELF_MACHINE_CODE EM_AARCH64
7009 #define ELF_MAXPAGESIZE 0x10000
7010 #define ELF_MINPAGESIZE 0x1000
7011 #define ELF_COMMONPAGESIZE 0x1000
7013 #define bfd_elf64_close_and_cleanup \
7014 elf64_aarch64_close_and_cleanup
7016 #define bfd_elf64_bfd_copy_private_bfd_data \
7017 elf64_aarch64_copy_private_bfd_data
7019 #define bfd_elf64_bfd_free_cached_info \
7020 elf64_aarch64_bfd_free_cached_info
7022 #define bfd_elf64_bfd_is_target_special_symbol \
7023 elf64_aarch64_is_target_special_symbol
7025 #define bfd_elf64_bfd_link_hash_table_create \
7026 elf64_aarch64_link_hash_table_create
7028 #define bfd_elf64_bfd_link_hash_table_free \
7029 elf64_aarch64_hash_table_free
7031 #define bfd_elf64_bfd_merge_private_bfd_data \
7032 elf64_aarch64_merge_private_bfd_data
7034 #define bfd_elf64_bfd_print_private_bfd_data \
7035 elf64_aarch64_print_private_bfd_data
7037 #define bfd_elf64_bfd_reloc_type_lookup \
7038 elf64_aarch64_reloc_type_lookup
7040 #define bfd_elf64_bfd_reloc_name_lookup \
7041 elf64_aarch64_reloc_name_lookup
7043 #define bfd_elf64_bfd_set_private_flags \
7044 elf64_aarch64_set_private_flags
7046 #define bfd_elf64_find_inliner_info \
7047 elf64_aarch64_find_inliner_info
7049 #define bfd_elf64_find_nearest_line \
7050 elf64_aarch64_find_nearest_line
7052 #define bfd_elf64_mkobject \
7053 elf64_aarch64_mkobject
7055 #define bfd_elf64_new_section_hook \
7056 elf64_aarch64_new_section_hook
7058 #define elf_backend_adjust_dynamic_symbol \
7059 elf64_aarch64_adjust_dynamic_symbol
7061 #define elf_backend_always_size_sections \
7062 elf64_aarch64_always_size_sections
7064 #define elf_backend_check_relocs \
7065 elf64_aarch64_check_relocs
7067 #define elf_backend_copy_indirect_symbol \
7068 elf64_aarch64_copy_indirect_symbol
7070 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
7071 to them in our hash. */
7072 #define elf_backend_create_dynamic_sections \
7073 elf64_aarch64_create_dynamic_sections
7075 #define elf_backend_init_index_section \
7076 _bfd_elf_init_2_index_sections
7078 #define elf_backend_is_function_type \
7079 elf64_aarch64_is_function_type
7081 #define elf_backend_finish_dynamic_sections \
7082 elf64_aarch64_finish_dynamic_sections
7084 #define elf_backend_finish_dynamic_symbol \
7085 elf64_aarch64_finish_dynamic_symbol
7087 #define elf_backend_gc_sweep_hook \
7088 elf64_aarch64_gc_sweep_hook
7090 #define elf_backend_object_p \
7091 elf64_aarch64_object_p
7093 #define elf_backend_output_arch_local_syms \
7094 elf64_aarch64_output_arch_local_syms
7096 #define elf_backend_plt_sym_val \
7097 elf64_aarch64_plt_sym_val
7099 #define elf_backend_post_process_headers \
7100 elf64_aarch64_post_process_headers
7102 #define elf_backend_relocate_section \
7103 elf64_aarch64_relocate_section
7105 #define elf_backend_reloc_type_class \
7106 elf64_aarch64_reloc_type_class
7108 #define elf_backend_section_flags \
7109 elf64_aarch64_section_flags
7111 #define elf_backend_section_from_shdr \
7112 elf64_aarch64_section_from_shdr
7114 #define elf_backend_size_dynamic_sections \
7115 elf64_aarch64_size_dynamic_sections
7117 #define elf_backend_size_info \
7118 elf64_aarch64_size_info
7120 #define elf_backend_can_refcount 1
7121 #define elf_backend_can_gc_sections 1
7122 #define elf_backend_plt_readonly 1
7123 #define elf_backend_want_got_plt 1
7124 #define elf_backend_want_plt_sym 0
7125 #define elf_backend_may_use_rel_p 0
7126 #define elf_backend_may_use_rela_p 1
7127 #define elf_backend_default_use_rela_p 1
7128 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
7129 #define elf_backend_default_execstack 0
7131 #undef elf_backend_obj_attrs_section
7132 #define elf_backend_obj_attrs_section ".ARM.attributes"
7134 #include "elf64-target.h"