1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 /* Notes on implementation:
23 Thread Local Store (TLS)
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
95 elfNN_aarch64_check_relocs()
97 This function is invoked for each relocation.
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
107 elfNN_aarch64_allocate_dynrelocs ()
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
115 elfNN_aarch64_size_dynamic_sections ()
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
122 elfNN_aarch64_relocate_section ()
124 Calls elfNN_aarch64_final_link_relocate ()
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
134 elfNN_aarch64_final_link_relocate ()
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
140 #include "libiberty.h"
142 #include "bfd_stdint.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
189 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
191 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
192 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
205 #define ELIMINATE_COPY_RELOCS 0
207 /* Return size of a relocation entry. HTAB is the bfd's
208 elf_aarch64_link_hash_entry. */
209 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
211 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
212 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
213 #define PLT_ENTRY_SIZE (32)
214 #define PLT_SMALL_ENTRY_SIZE (16)
215 #define PLT_TLSDESC_ENTRY_SIZE (32)
217 /* Encoding of the nop instruction */
218 #define INSN_NOP 0xd503201f
220 #define aarch64_compute_jump_table_size(htab) \
221 (((htab)->root.srelplt == NULL) ? 0 \
222 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
224 /* The first entry in a procedure linkage table looks like this
225 if the distance between the PLTGOT and the PLT is < 4GB use
226 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
227 in x16 and needs to work out PLTGOT[1] by using an address of
228 [x16,#-GOT_ENTRY_SIZE]. */
229 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
231 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
232 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
234 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
235 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
237 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
238 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
240 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
241 0x1f, 0x20, 0x03, 0xd5, /* nop */
242 0x1f, 0x20, 0x03, 0xd5, /* nop */
243 0x1f, 0x20, 0x03, 0xd5, /* nop */
246 /* Per function entry in a procedure linkage table looks like this
247 if the distance between the PLTGOT and the PLT is < 4GB use
248 these PLT entries. */
249 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
251 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
253 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
254 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
256 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
257 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
259 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
262 static const bfd_byte
263 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
265 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
266 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
267 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
269 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
270 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
272 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
273 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
275 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
276 0x1f, 0x20, 0x03, 0xd5, /* nop */
277 0x1f, 0x20, 0x03, 0xd5, /* nop */
280 #define elf_info_to_howto elfNN_aarch64_info_to_howto
281 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
283 #define AARCH64_ELF_ABI_VERSION 0
285 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
286 #define ALL_ONES (~ (bfd_vma) 0)
288 /* Indexed by the bfd interal reloc enumerators.
289 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
292 static reloc_howto_type elfNN_aarch64_howto_table[] =
296 /* Basic data relocations. */
299 HOWTO (R_AARCH64_NULL, /* type */
301 3, /* size (0 = byte, 1 = short, 2 = long) */
303 FALSE, /* pc_relative */
305 complain_overflow_dont, /* complain_on_overflow */
306 bfd_elf_generic_reloc, /* special_function */
307 "R_AARCH64_NULL", /* name */
308 FALSE, /* partial_inplace */
311 FALSE), /* pcrel_offset */
313 HOWTO (R_AARCH64_NONE, /* type */
315 3, /* size (0 = byte, 1 = short, 2 = long) */
317 FALSE, /* pc_relative */
319 complain_overflow_dont, /* complain_on_overflow */
320 bfd_elf_generic_reloc, /* special_function */
321 "R_AARCH64_NONE", /* name */
322 FALSE, /* partial_inplace */
325 FALSE), /* pcrel_offset */
329 HOWTO64 (AARCH64_R (ABS64), /* type */
331 4, /* size (4 = long long) */
333 FALSE, /* pc_relative */
335 complain_overflow_unsigned, /* complain_on_overflow */
336 bfd_elf_generic_reloc, /* special_function */
337 AARCH64_R_STR (ABS64), /* name */
338 FALSE, /* partial_inplace */
339 ALL_ONES, /* src_mask */
340 ALL_ONES, /* dst_mask */
341 FALSE), /* pcrel_offset */
344 HOWTO (AARCH64_R (ABS32), /* type */
346 2, /* size (0 = byte, 1 = short, 2 = long) */
348 FALSE, /* pc_relative */
350 complain_overflow_unsigned, /* complain_on_overflow */
351 bfd_elf_generic_reloc, /* special_function */
352 AARCH64_R_STR (ABS32), /* name */
353 FALSE, /* partial_inplace */
354 0xffffffff, /* src_mask */
355 0xffffffff, /* dst_mask */
356 FALSE), /* pcrel_offset */
359 HOWTO (AARCH64_R (ABS16), /* type */
361 1, /* size (0 = byte, 1 = short, 2 = long) */
363 FALSE, /* pc_relative */
365 complain_overflow_unsigned, /* complain_on_overflow */
366 bfd_elf_generic_reloc, /* special_function */
367 AARCH64_R_STR (ABS16), /* name */
368 FALSE, /* partial_inplace */
369 0xffff, /* src_mask */
370 0xffff, /* dst_mask */
371 FALSE), /* pcrel_offset */
373 /* .xword: (S+A-P) */
374 HOWTO64 (AARCH64_R (PREL64), /* type */
376 4, /* size (4 = long long) */
378 TRUE, /* pc_relative */
380 complain_overflow_signed, /* complain_on_overflow */
381 bfd_elf_generic_reloc, /* special_function */
382 AARCH64_R_STR (PREL64), /* name */
383 FALSE, /* partial_inplace */
384 ALL_ONES, /* src_mask */
385 ALL_ONES, /* dst_mask */
386 TRUE), /* pcrel_offset */
389 HOWTO (AARCH64_R (PREL32), /* type */
391 2, /* size (0 = byte, 1 = short, 2 = long) */
393 TRUE, /* pc_relative */
395 complain_overflow_signed, /* complain_on_overflow */
396 bfd_elf_generic_reloc, /* special_function */
397 AARCH64_R_STR (PREL32), /* name */
398 FALSE, /* partial_inplace */
399 0xffffffff, /* src_mask */
400 0xffffffff, /* dst_mask */
401 TRUE), /* pcrel_offset */
404 HOWTO (AARCH64_R (PREL16), /* type */
406 1, /* size (0 = byte, 1 = short, 2 = long) */
408 TRUE, /* pc_relative */
410 complain_overflow_signed, /* complain_on_overflow */
411 bfd_elf_generic_reloc, /* special_function */
412 AARCH64_R_STR (PREL16), /* name */
413 FALSE, /* partial_inplace */
414 0xffff, /* src_mask */
415 0xffff, /* dst_mask */
416 TRUE), /* pcrel_offset */
418 /* Group relocations to create a 16, 32, 48 or 64 bit
419 unsigned data or abs address inline. */
421 /* MOVZ: ((S+A) >> 0) & 0xffff */
422 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
424 2, /* size (0 = byte, 1 = short, 2 = long) */
426 FALSE, /* pc_relative */
428 complain_overflow_unsigned, /* complain_on_overflow */
429 bfd_elf_generic_reloc, /* special_function */
430 AARCH64_R_STR (MOVW_UABS_G0), /* name */
431 FALSE, /* partial_inplace */
432 0xffff, /* src_mask */
433 0xffff, /* dst_mask */
434 FALSE), /* pcrel_offset */
436 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
437 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
439 2, /* size (0 = byte, 1 = short, 2 = long) */
441 FALSE, /* pc_relative */
443 complain_overflow_dont, /* complain_on_overflow */
444 bfd_elf_generic_reloc, /* special_function */
445 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
446 FALSE, /* partial_inplace */
447 0xffff, /* src_mask */
448 0xffff, /* dst_mask */
449 FALSE), /* pcrel_offset */
451 /* MOVZ: ((S+A) >> 16) & 0xffff */
452 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
456 FALSE, /* pc_relative */
458 complain_overflow_unsigned, /* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 AARCH64_R_STR (MOVW_UABS_G1), /* name */
461 FALSE, /* partial_inplace */
462 0xffff, /* src_mask */
463 0xffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
466 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
467 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
469 2, /* size (0 = byte, 1 = short, 2 = long) */
471 FALSE, /* pc_relative */
473 complain_overflow_dont, /* complain_on_overflow */
474 bfd_elf_generic_reloc, /* special_function */
475 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
476 FALSE, /* partial_inplace */
477 0xffff, /* src_mask */
478 0xffff, /* dst_mask */
479 FALSE), /* pcrel_offset */
481 /* MOVZ: ((S+A) >> 32) & 0xffff */
482 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
484 2, /* size (0 = byte, 1 = short, 2 = long) */
486 FALSE, /* pc_relative */
488 complain_overflow_unsigned, /* complain_on_overflow */
489 bfd_elf_generic_reloc, /* special_function */
490 AARCH64_R_STR (MOVW_UABS_G2), /* name */
491 FALSE, /* partial_inplace */
492 0xffff, /* src_mask */
493 0xffff, /* dst_mask */
494 FALSE), /* pcrel_offset */
496 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
497 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
499 2, /* size (0 = byte, 1 = short, 2 = long) */
501 FALSE, /* pc_relative */
503 complain_overflow_dont, /* complain_on_overflow */
504 bfd_elf_generic_reloc, /* special_function */
505 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
506 FALSE, /* partial_inplace */
507 0xffff, /* src_mask */
508 0xffff, /* dst_mask */
509 FALSE), /* pcrel_offset */
511 /* MOVZ: ((S+A) >> 48) & 0xffff */
512 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
516 FALSE, /* pc_relative */
518 complain_overflow_unsigned, /* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 AARCH64_R_STR (MOVW_UABS_G3), /* name */
521 FALSE, /* partial_inplace */
522 0xffff, /* src_mask */
523 0xffff, /* dst_mask */
524 FALSE), /* pcrel_offset */
526 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
527 signed data or abs address inline. Will change instruction
528 to MOVN or MOVZ depending on sign of calculated value. */
530 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
531 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
533 2, /* size (0 = byte, 1 = short, 2 = long) */
535 FALSE, /* pc_relative */
537 complain_overflow_signed, /* complain_on_overflow */
538 bfd_elf_generic_reloc, /* special_function */
539 AARCH64_R_STR (MOVW_SABS_G0), /* name */
540 FALSE, /* partial_inplace */
541 0xffff, /* src_mask */
542 0xffff, /* dst_mask */
543 FALSE), /* pcrel_offset */
545 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
546 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
548 2, /* size (0 = byte, 1 = short, 2 = long) */
550 FALSE, /* pc_relative */
552 complain_overflow_signed, /* complain_on_overflow */
553 bfd_elf_generic_reloc, /* special_function */
554 AARCH64_R_STR (MOVW_SABS_G1), /* name */
555 FALSE, /* partial_inplace */
556 0xffff, /* src_mask */
557 0xffff, /* dst_mask */
558 FALSE), /* pcrel_offset */
560 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
561 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
563 2, /* size (0 = byte, 1 = short, 2 = long) */
565 FALSE, /* pc_relative */
567 complain_overflow_signed, /* complain_on_overflow */
568 bfd_elf_generic_reloc, /* special_function */
569 AARCH64_R_STR (MOVW_SABS_G2), /* name */
570 FALSE, /* partial_inplace */
571 0xffff, /* src_mask */
572 0xffff, /* dst_mask */
573 FALSE), /* pcrel_offset */
575 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
576 addresses: PG(x) is (x & ~0xfff). */
578 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
579 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
581 2, /* size (0 = byte, 1 = short, 2 = long) */
583 TRUE, /* pc_relative */
585 complain_overflow_signed, /* complain_on_overflow */
586 bfd_elf_generic_reloc, /* special_function */
587 AARCH64_R_STR (LD_PREL_LO19), /* name */
588 FALSE, /* partial_inplace */
589 0x7ffff, /* src_mask */
590 0x7ffff, /* dst_mask */
591 TRUE), /* pcrel_offset */
593 /* ADR: (S+A-P) & 0x1fffff */
594 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
598 TRUE, /* pc_relative */
600 complain_overflow_signed, /* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 AARCH64_R_STR (ADR_PREL_LO21), /* name */
603 FALSE, /* partial_inplace */
604 0x1fffff, /* src_mask */
605 0x1fffff, /* dst_mask */
606 TRUE), /* pcrel_offset */
608 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
609 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
611 2, /* size (0 = byte, 1 = short, 2 = long) */
613 TRUE, /* pc_relative */
615 complain_overflow_signed, /* complain_on_overflow */
616 bfd_elf_generic_reloc, /* special_function */
617 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
618 FALSE, /* partial_inplace */
619 0x1fffff, /* src_mask */
620 0x1fffff, /* dst_mask */
621 TRUE), /* pcrel_offset */
623 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
624 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
628 TRUE, /* pc_relative */
630 complain_overflow_dont, /* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
633 FALSE, /* partial_inplace */
634 0x1fffff, /* src_mask */
635 0x1fffff, /* dst_mask */
636 TRUE), /* pcrel_offset */
638 /* ADD: (S+A) & 0xfff [no overflow check] */
639 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
641 2, /* size (0 = byte, 1 = short, 2 = long) */
643 FALSE, /* pc_relative */
645 complain_overflow_dont, /* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
648 FALSE, /* partial_inplace */
649 0x3ffc00, /* src_mask */
650 0x3ffc00, /* dst_mask */
651 FALSE), /* pcrel_offset */
653 /* LD/ST8: (S+A) & 0xfff */
654 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
656 2, /* size (0 = byte, 1 = short, 2 = long) */
658 FALSE, /* pc_relative */
660 complain_overflow_dont, /* complain_on_overflow */
661 bfd_elf_generic_reloc, /* special_function */
662 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
663 FALSE, /* partial_inplace */
664 0xfff, /* src_mask */
665 0xfff, /* dst_mask */
666 FALSE), /* pcrel_offset */
668 /* Relocations for control-flow instructions. */
670 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
671 HOWTO (AARCH64_R (TSTBR14), /* type */
673 2, /* size (0 = byte, 1 = short, 2 = long) */
675 TRUE, /* pc_relative */
677 complain_overflow_signed, /* complain_on_overflow */
678 bfd_elf_generic_reloc, /* special_function */
679 AARCH64_R_STR (TSTBR14), /* name */
680 FALSE, /* partial_inplace */
681 0x3fff, /* src_mask */
682 0x3fff, /* dst_mask */
683 TRUE), /* pcrel_offset */
685 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
686 HOWTO (AARCH64_R (CONDBR19), /* type */
688 2, /* size (0 = byte, 1 = short, 2 = long) */
690 TRUE, /* pc_relative */
692 complain_overflow_signed, /* complain_on_overflow */
693 bfd_elf_generic_reloc, /* special_function */
694 AARCH64_R_STR (CONDBR19), /* name */
695 FALSE, /* partial_inplace */
696 0x7ffff, /* src_mask */
697 0x7ffff, /* dst_mask */
698 TRUE), /* pcrel_offset */
700 /* B: ((S+A-P) >> 2) & 0x3ffffff */
701 HOWTO (AARCH64_R (JUMP26), /* type */
703 2, /* size (0 = byte, 1 = short, 2 = long) */
705 TRUE, /* pc_relative */
707 complain_overflow_signed, /* complain_on_overflow */
708 bfd_elf_generic_reloc, /* special_function */
709 AARCH64_R_STR (JUMP26), /* name */
710 FALSE, /* partial_inplace */
711 0x3ffffff, /* src_mask */
712 0x3ffffff, /* dst_mask */
713 TRUE), /* pcrel_offset */
715 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
716 HOWTO (AARCH64_R (CALL26), /* type */
718 2, /* size (0 = byte, 1 = short, 2 = long) */
720 TRUE, /* pc_relative */
722 complain_overflow_signed, /* complain_on_overflow */
723 bfd_elf_generic_reloc, /* special_function */
724 AARCH64_R_STR (CALL26), /* name */
725 FALSE, /* partial_inplace */
726 0x3ffffff, /* src_mask */
727 0x3ffffff, /* dst_mask */
728 TRUE), /* pcrel_offset */
730 /* LD/ST16: (S+A) & 0xffe */
731 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
733 2, /* size (0 = byte, 1 = short, 2 = long) */
735 FALSE, /* pc_relative */
737 complain_overflow_dont, /* complain_on_overflow */
738 bfd_elf_generic_reloc, /* special_function */
739 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
740 FALSE, /* partial_inplace */
741 0xffe, /* src_mask */
742 0xffe, /* dst_mask */
743 FALSE), /* pcrel_offset */
745 /* LD/ST32: (S+A) & 0xffc */
746 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
750 FALSE, /* pc_relative */
752 complain_overflow_dont, /* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
755 FALSE, /* partial_inplace */
756 0xffc, /* src_mask */
757 0xffc, /* dst_mask */
758 FALSE), /* pcrel_offset */
760 /* LD/ST64: (S+A) & 0xff8 */
761 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
763 2, /* size (0 = byte, 1 = short, 2 = long) */
765 FALSE, /* pc_relative */
767 complain_overflow_dont, /* complain_on_overflow */
768 bfd_elf_generic_reloc, /* special_function */
769 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
770 FALSE, /* partial_inplace */
771 0xff8, /* src_mask */
772 0xff8, /* dst_mask */
773 FALSE), /* pcrel_offset */
775 /* LD/ST128: (S+A) & 0xff0 */
776 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
778 2, /* size (0 = byte, 1 = short, 2 = long) */
780 FALSE, /* pc_relative */
782 complain_overflow_dont, /* complain_on_overflow */
783 bfd_elf_generic_reloc, /* special_function */
784 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
785 FALSE, /* partial_inplace */
786 0xff0, /* src_mask */
787 0xff0, /* dst_mask */
788 FALSE), /* pcrel_offset */
790 /* Set a load-literal immediate field to bits
791 0x1FFFFC of G(S)-P */
792 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
794 2, /* size (0 = byte,1 = short,2 = long) */
796 TRUE, /* pc_relative */
798 complain_overflow_signed, /* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 AARCH64_R_STR (GOT_LD_PREL19), /* name */
801 FALSE, /* partial_inplace */
802 0xffffe0, /* src_mask */
803 0xffffe0, /* dst_mask */
804 TRUE), /* pcrel_offset */
806 /* Get to the page for the GOT entry for the symbol
807 (G(S) - P) using an ADRP instruction. */
808 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
810 2, /* size (0 = byte, 1 = short, 2 = long) */
812 TRUE, /* pc_relative */
814 complain_overflow_dont, /* complain_on_overflow */
815 bfd_elf_generic_reloc, /* special_function */
816 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
817 FALSE, /* partial_inplace */
818 0x1fffff, /* src_mask */
819 0x1fffff, /* dst_mask */
820 TRUE), /* pcrel_offset */
822 /* LD64: GOT offset G(S) & 0xff8 */
823 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
825 2, /* size (0 = byte, 1 = short, 2 = long) */
827 FALSE, /* pc_relative */
829 complain_overflow_dont, /* complain_on_overflow */
830 bfd_elf_generic_reloc, /* special_function */
831 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
832 FALSE, /* partial_inplace */
833 0xff8, /* src_mask */
834 0xff8, /* dst_mask */
835 FALSE), /* pcrel_offset */
837 /* LD32: GOT offset G(S) & 0xffc */
838 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
840 2, /* size (0 = byte, 1 = short, 2 = long) */
842 FALSE, /* pc_relative */
844 complain_overflow_dont, /* complain_on_overflow */
845 bfd_elf_generic_reloc, /* special_function */
846 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
847 FALSE, /* partial_inplace */
848 0xffc, /* src_mask */
849 0xffc, /* dst_mask */
850 FALSE), /* pcrel_offset */
852 /* LD64: GOT offset for the symbol. */
853 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
855 2, /* size (0 = byte, 1 = short, 2 = long) */
857 FALSE, /* pc_relative */
859 complain_overflow_unsigned, /* complain_on_overflow */
860 bfd_elf_generic_reloc, /* special_function */
861 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
862 FALSE, /* partial_inplace */
863 0x7ff8, /* src_mask */
864 0x7ff8, /* dst_mask */
865 FALSE), /* pcrel_offset */
867 /* LD32: GOT offset to the page address of GOT table.
868 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
869 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
871 2, /* size (0 = byte, 1 = short, 2 = long) */
873 FALSE, /* pc_relative */
875 complain_overflow_unsigned, /* complain_on_overflow */
876 bfd_elf_generic_reloc, /* special_function */
877 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
878 FALSE, /* partial_inplace */
879 0x5ffc, /* src_mask */
880 0x5ffc, /* dst_mask */
881 FALSE), /* pcrel_offset */
883 /* LD64: GOT offset to the page address of GOT table.
884 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
885 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
887 2, /* size (0 = byte, 1 = short, 2 = long) */
889 FALSE, /* pc_relative */
891 complain_overflow_unsigned, /* complain_on_overflow */
892 bfd_elf_generic_reloc, /* special_function */
893 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
894 FALSE, /* partial_inplace */
895 0x7ff8, /* src_mask */
896 0x7ff8, /* dst_mask */
897 FALSE), /* pcrel_offset */
899 /* Get to the page for the GOT entry for the symbol
900 (G(S) - P) using an ADRP instruction. */
901 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
903 2, /* size (0 = byte, 1 = short, 2 = long) */
905 TRUE, /* pc_relative */
907 complain_overflow_dont, /* complain_on_overflow */
908 bfd_elf_generic_reloc, /* special_function */
909 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
910 FALSE, /* partial_inplace */
911 0x1fffff, /* src_mask */
912 0x1fffff, /* dst_mask */
913 TRUE), /* pcrel_offset */
915 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
917 2, /* size (0 = byte, 1 = short, 2 = long) */
919 TRUE, /* pc_relative */
921 complain_overflow_dont, /* complain_on_overflow */
922 bfd_elf_generic_reloc, /* special_function */
923 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
924 FALSE, /* partial_inplace */
925 0x1fffff, /* src_mask */
926 0x1fffff, /* dst_mask */
927 TRUE), /* pcrel_offset */
929 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
930 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
932 2, /* size (0 = byte, 1 = short, 2 = long) */
934 FALSE, /* pc_relative */
936 complain_overflow_dont, /* complain_on_overflow */
937 bfd_elf_generic_reloc, /* special_function */
938 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
939 FALSE, /* partial_inplace */
940 0xfff, /* src_mask */
941 0xfff, /* dst_mask */
942 FALSE), /* pcrel_offset */
944 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
946 2, /* size (0 = byte, 1 = short, 2 = long) */
948 FALSE, /* pc_relative */
950 complain_overflow_dont, /* complain_on_overflow */
951 bfd_elf_generic_reloc, /* special_function */
952 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
953 FALSE, /* partial_inplace */
954 0xffff, /* src_mask */
955 0xffff, /* dst_mask */
956 FALSE), /* pcrel_offset */
958 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
960 2, /* size (0 = byte, 1 = short, 2 = long) */
962 FALSE, /* pc_relative */
964 complain_overflow_dont, /* complain_on_overflow */
965 bfd_elf_generic_reloc, /* special_function */
966 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
967 FALSE, /* partial_inplace */
968 0xffff, /* src_mask */
969 0xffff, /* dst_mask */
970 FALSE), /* pcrel_offset */
972 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
974 2, /* size (0 = byte, 1 = short, 2 = long) */
976 FALSE, /* pc_relative */
978 complain_overflow_dont, /* complain_on_overflow */
979 bfd_elf_generic_reloc, /* special_function */
980 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
981 FALSE, /* partial_inplace */
982 0x1fffff, /* src_mask */
983 0x1fffff, /* dst_mask */
984 FALSE), /* pcrel_offset */
986 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
988 2, /* size (0 = byte, 1 = short, 2 = long) */
990 FALSE, /* pc_relative */
992 complain_overflow_dont, /* complain_on_overflow */
993 bfd_elf_generic_reloc, /* special_function */
994 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
995 FALSE, /* partial_inplace */
996 0xff8, /* src_mask */
997 0xff8, /* dst_mask */
998 FALSE), /* pcrel_offset */
1000 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
1002 2, /* size (0 = byte, 1 = short, 2 = long) */
1004 FALSE, /* pc_relative */
1006 complain_overflow_dont, /* complain_on_overflow */
1007 bfd_elf_generic_reloc, /* special_function */
1008 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
1009 FALSE, /* partial_inplace */
1010 0xffc, /* src_mask */
1011 0xffc, /* dst_mask */
1012 FALSE), /* pcrel_offset */
1014 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
1016 2, /* size (0 = byte, 1 = short, 2 = long) */
1018 FALSE, /* pc_relative */
1020 complain_overflow_dont, /* complain_on_overflow */
1021 bfd_elf_generic_reloc, /* special_function */
1022 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
1023 FALSE, /* partial_inplace */
1024 0x1ffffc, /* src_mask */
1025 0x1ffffc, /* dst_mask */
1026 FALSE), /* pcrel_offset */
1028 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
1030 2, /* size (0 = byte, 1 = short, 2 = long) */
1032 TRUE, /* pc_relative */
1034 complain_overflow_signed, /* complain_on_overflow */
1035 bfd_elf_generic_reloc, /* special_function */
1036 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
1037 FALSE, /* partial_inplace */
1038 0x1fffff, /* src_mask */
1039 0x1fffff, /* dst_mask */
1040 TRUE), /* pcrel_offset */
1042 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
1043 32, /* rightshift */
1044 2, /* size (0 = byte, 1 = short, 2 = long) */
1046 FALSE, /* pc_relative */
1048 complain_overflow_unsigned, /* complain_on_overflow */
1049 bfd_elf_generic_reloc, /* special_function */
1050 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
1051 FALSE, /* partial_inplace */
1052 0xffff, /* src_mask */
1053 0xffff, /* dst_mask */
1054 FALSE), /* pcrel_offset */
1056 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
1057 16, /* rightshift */
1058 2, /* size (0 = byte, 1 = short, 2 = long) */
1060 FALSE, /* pc_relative */
1062 complain_overflow_dont, /* complain_on_overflow */
1063 bfd_elf_generic_reloc, /* special_function */
1064 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1065 FALSE, /* partial_inplace */
1066 0xffff, /* src_mask */
1067 0xffff, /* dst_mask */
1068 FALSE), /* pcrel_offset */
1070 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1071 16, /* rightshift */
1072 2, /* size (0 = byte, 1 = short, 2 = long) */
1074 FALSE, /* pc_relative */
1076 complain_overflow_dont, /* complain_on_overflow */
1077 bfd_elf_generic_reloc, /* special_function */
1078 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1079 FALSE, /* partial_inplace */
1080 0xffff, /* src_mask */
1081 0xffff, /* dst_mask */
1082 FALSE), /* pcrel_offset */
1084 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1086 2, /* size (0 = byte, 1 = short, 2 = long) */
1088 FALSE, /* pc_relative */
1090 complain_overflow_dont, /* complain_on_overflow */
1091 bfd_elf_generic_reloc, /* special_function */
1092 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1093 FALSE, /* partial_inplace */
1094 0xffff, /* src_mask */
1095 0xffff, /* dst_mask */
1096 FALSE), /* pcrel_offset */
1098 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1100 2, /* size (0 = byte, 1 = short, 2 = long) */
1102 FALSE, /* pc_relative */
1104 complain_overflow_dont, /* complain_on_overflow */
1105 bfd_elf_generic_reloc, /* special_function */
1106 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1107 FALSE, /* partial_inplace */
1108 0xffff, /* src_mask */
1109 0xffff, /* dst_mask */
1110 FALSE), /* pcrel_offset */
1112 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1113 12, /* rightshift */
1114 2, /* size (0 = byte, 1 = short, 2 = long) */
1116 FALSE, /* pc_relative */
1118 complain_overflow_unsigned, /* complain_on_overflow */
1119 bfd_elf_generic_reloc, /* special_function */
1120 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1121 FALSE, /* partial_inplace */
1122 0xfff, /* src_mask */
1123 0xfff, /* dst_mask */
1124 FALSE), /* pcrel_offset */
1126 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1128 2, /* size (0 = byte, 1 = short, 2 = long) */
1130 FALSE, /* pc_relative */
1132 complain_overflow_unsigned, /* complain_on_overflow */
1133 bfd_elf_generic_reloc, /* special_function */
1134 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1135 FALSE, /* partial_inplace */
1136 0xfff, /* src_mask */
1137 0xfff, /* dst_mask */
1138 FALSE), /* pcrel_offset */
1140 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1142 2, /* size (0 = byte, 1 = short, 2 = long) */
1144 FALSE, /* pc_relative */
1146 complain_overflow_dont, /* complain_on_overflow */
1147 bfd_elf_generic_reloc, /* special_function */
1148 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1149 FALSE, /* partial_inplace */
1150 0xfff, /* src_mask */
1151 0xfff, /* dst_mask */
1152 FALSE), /* pcrel_offset */
1154 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1156 2, /* size (0 = byte, 1 = short, 2 = long) */
1158 TRUE, /* pc_relative */
1160 complain_overflow_dont, /* complain_on_overflow */
1161 bfd_elf_generic_reloc, /* special_function */
1162 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1163 FALSE, /* partial_inplace */
1164 0x0ffffe0, /* src_mask */
1165 0x0ffffe0, /* dst_mask */
1166 TRUE), /* pcrel_offset */
1168 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1170 2, /* size (0 = byte, 1 = short, 2 = long) */
1172 TRUE, /* pc_relative */
1174 complain_overflow_dont, /* complain_on_overflow */
1175 bfd_elf_generic_reloc, /* special_function */
1176 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1177 FALSE, /* partial_inplace */
1178 0x1fffff, /* src_mask */
1179 0x1fffff, /* dst_mask */
1180 TRUE), /* pcrel_offset */
1182 /* Get to the page for the GOT entry for the symbol
1183 (G(S) - P) using an ADRP instruction. */
1184 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1185 12, /* rightshift */
1186 2, /* size (0 = byte, 1 = short, 2 = long) */
1188 TRUE, /* pc_relative */
1190 complain_overflow_dont, /* complain_on_overflow */
1191 bfd_elf_generic_reloc, /* special_function */
1192 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1193 FALSE, /* partial_inplace */
1194 0x1fffff, /* src_mask */
1195 0x1fffff, /* dst_mask */
1196 TRUE), /* pcrel_offset */
1198 /* LD64: GOT offset G(S) & 0xff8. */
1199 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1201 2, /* size (0 = byte, 1 = short, 2 = long) */
1203 FALSE, /* pc_relative */
1205 complain_overflow_dont, /* complain_on_overflow */
1206 bfd_elf_generic_reloc, /* special_function */
1207 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1208 FALSE, /* partial_inplace */
1209 0xff8, /* src_mask */
1210 0xff8, /* dst_mask */
1211 FALSE), /* pcrel_offset */
1213 /* LD32: GOT offset G(S) & 0xffc. */
1214 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1216 2, /* size (0 = byte, 1 = short, 2 = long) */
1218 FALSE, /* pc_relative */
1220 complain_overflow_dont, /* complain_on_overflow */
1221 bfd_elf_generic_reloc, /* special_function */
1222 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1223 FALSE, /* partial_inplace */
1224 0xffc, /* src_mask */
1225 0xffc, /* dst_mask */
1226 FALSE), /* pcrel_offset */
1228 /* ADD: GOT offset G(S) & 0xfff. */
1229 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1231 2, /* size (0 = byte, 1 = short, 2 = long) */
1233 FALSE, /* pc_relative */
1235 complain_overflow_dont, /* complain_on_overflow */
1236 bfd_elf_generic_reloc, /* special_function */
1237 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1238 FALSE, /* partial_inplace */
1239 0xfff, /* src_mask */
1240 0xfff, /* dst_mask */
1241 FALSE), /* pcrel_offset */
1243 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1244 16, /* rightshift */
1245 2, /* size (0 = byte, 1 = short, 2 = long) */
1247 FALSE, /* pc_relative */
1249 complain_overflow_dont, /* complain_on_overflow */
1250 bfd_elf_generic_reloc, /* special_function */
1251 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1252 FALSE, /* partial_inplace */
1253 0xffff, /* src_mask */
1254 0xffff, /* dst_mask */
1255 FALSE), /* pcrel_offset */
1257 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1259 2, /* size (0 = byte, 1 = short, 2 = long) */
1261 FALSE, /* pc_relative */
1263 complain_overflow_dont, /* complain_on_overflow */
1264 bfd_elf_generic_reloc, /* special_function */
1265 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1266 FALSE, /* partial_inplace */
1267 0xffff, /* src_mask */
1268 0xffff, /* dst_mask */
1269 FALSE), /* pcrel_offset */
1271 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1273 2, /* size (0 = byte, 1 = short, 2 = long) */
1275 FALSE, /* pc_relative */
1277 complain_overflow_dont, /* complain_on_overflow */
1278 bfd_elf_generic_reloc, /* special_function */
1279 AARCH64_R_STR (TLSDESC_LDR), /* name */
1280 FALSE, /* partial_inplace */
1283 FALSE), /* pcrel_offset */
1285 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1287 2, /* size (0 = byte, 1 = short, 2 = long) */
1289 FALSE, /* pc_relative */
1291 complain_overflow_dont, /* complain_on_overflow */
1292 bfd_elf_generic_reloc, /* special_function */
1293 AARCH64_R_STR (TLSDESC_ADD), /* name */
1294 FALSE, /* partial_inplace */
1297 FALSE), /* pcrel_offset */
1299 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1301 2, /* size (0 = byte, 1 = short, 2 = long) */
1303 FALSE, /* pc_relative */
1305 complain_overflow_dont, /* complain_on_overflow */
1306 bfd_elf_generic_reloc, /* special_function */
1307 AARCH64_R_STR (TLSDESC_CALL), /* name */
1308 FALSE, /* partial_inplace */
1311 FALSE), /* pcrel_offset */
1313 HOWTO (AARCH64_R (COPY), /* type */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1317 FALSE, /* pc_relative */
1319 complain_overflow_bitfield, /* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 AARCH64_R_STR (COPY), /* name */
1322 TRUE, /* partial_inplace */
1323 0xffffffff, /* src_mask */
1324 0xffffffff, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1327 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1329 2, /* size (0 = byte, 1 = short, 2 = long) */
1331 FALSE, /* pc_relative */
1333 complain_overflow_bitfield, /* complain_on_overflow */
1334 bfd_elf_generic_reloc, /* special_function */
1335 AARCH64_R_STR (GLOB_DAT), /* name */
1336 TRUE, /* partial_inplace */
1337 0xffffffff, /* src_mask */
1338 0xffffffff, /* dst_mask */
1339 FALSE), /* pcrel_offset */
1341 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1345 FALSE, /* pc_relative */
1347 complain_overflow_bitfield, /* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 AARCH64_R_STR (JUMP_SLOT), /* name */
1350 TRUE, /* partial_inplace */
1351 0xffffffff, /* src_mask */
1352 0xffffffff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1355 HOWTO (AARCH64_R (RELATIVE), /* type */
1357 2, /* size (0 = byte, 1 = short, 2 = long) */
1359 FALSE, /* pc_relative */
1361 complain_overflow_bitfield, /* complain_on_overflow */
1362 bfd_elf_generic_reloc, /* special_function */
1363 AARCH64_R_STR (RELATIVE), /* name */
1364 TRUE, /* partial_inplace */
1365 ALL_ONES, /* src_mask */
1366 ALL_ONES, /* dst_mask */
1367 FALSE), /* pcrel_offset */
1369 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1371 2, /* size (0 = byte, 1 = short, 2 = long) */
1373 FALSE, /* pc_relative */
1375 complain_overflow_dont, /* complain_on_overflow */
1376 bfd_elf_generic_reloc, /* special_function */
1378 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1380 AARCH64_R_STR (TLS_DTPMOD), /* name */
1382 FALSE, /* partial_inplace */
1384 ALL_ONES, /* dst_mask */
1385 FALSE), /* pc_reloffset */
1387 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 FALSE, /* pc_relative */
1393 complain_overflow_dont, /* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1396 AARCH64_R_STR (TLS_DTPREL64), /* name */
1398 AARCH64_R_STR (TLS_DTPREL), /* name */
1400 FALSE, /* partial_inplace */
1402 ALL_ONES, /* dst_mask */
1403 FALSE), /* pcrel_offset */
1405 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1407 2, /* size (0 = byte, 1 = short, 2 = long) */
1409 FALSE, /* pc_relative */
1411 complain_overflow_dont, /* complain_on_overflow */
1412 bfd_elf_generic_reloc, /* special_function */
1414 AARCH64_R_STR (TLS_TPREL64), /* name */
1416 AARCH64_R_STR (TLS_TPREL), /* name */
1418 FALSE, /* partial_inplace */
1420 ALL_ONES, /* dst_mask */
1421 FALSE), /* pcrel_offset */
1423 HOWTO (AARCH64_R (TLSDESC), /* type */
1425 2, /* size (0 = byte, 1 = short, 2 = long) */
1427 FALSE, /* pc_relative */
1429 complain_overflow_dont, /* complain_on_overflow */
1430 bfd_elf_generic_reloc, /* special_function */
1431 AARCH64_R_STR (TLSDESC), /* name */
1432 FALSE, /* partial_inplace */
1434 ALL_ONES, /* dst_mask */
1435 FALSE), /* pcrel_offset */
1437 HOWTO (AARCH64_R (IRELATIVE), /* type */
1439 2, /* size (0 = byte, 1 = short, 2 = long) */
1441 FALSE, /* pc_relative */
1443 complain_overflow_bitfield, /* complain_on_overflow */
1444 bfd_elf_generic_reloc, /* special_function */
1445 AARCH64_R_STR (IRELATIVE), /* name */
1446 FALSE, /* partial_inplace */
1448 ALL_ONES, /* dst_mask */
1449 FALSE), /* pcrel_offset */
1454 static reloc_howto_type elfNN_aarch64_howto_none =
1455 HOWTO (R_AARCH64_NONE, /* type */
1457 3, /* size (0 = byte, 1 = short, 2 = long) */
1459 FALSE, /* pc_relative */
1461 complain_overflow_dont,/* complain_on_overflow */
1462 bfd_elf_generic_reloc, /* special_function */
1463 "R_AARCH64_NONE", /* name */
1464 FALSE, /* partial_inplace */
1467 FALSE); /* pcrel_offset */
1469 /* Given HOWTO, return the bfd internal relocation enumerator. */
1471 static bfd_reloc_code_real_type
1472 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1475 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1476 const ptrdiff_t offset
1477 = howto - elfNN_aarch64_howto_table;
1479 if (offset > 0 && offset < size - 1)
1480 return BFD_RELOC_AARCH64_RELOC_START + offset;
1482 if (howto == &elfNN_aarch64_howto_none)
1483 return BFD_RELOC_AARCH64_NONE;
1485 return BFD_RELOC_AARCH64_RELOC_START;
1488 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1490 static bfd_reloc_code_real_type
1491 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1493 static bfd_boolean initialized_p = FALSE;
1494 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1495 static unsigned int offsets[R_AARCH64_end];
1497 if (initialized_p == FALSE)
1501 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1502 if (elfNN_aarch64_howto_table[i].type != 0)
1503 offsets[elfNN_aarch64_howto_table[i].type] = i;
1505 initialized_p = TRUE;
1508 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1509 return BFD_RELOC_AARCH64_NONE;
1511 /* PR 17512: file: b371e70a. */
1512 if (r_type >= R_AARCH64_end)
1514 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1515 bfd_set_error (bfd_error_bad_value);
1516 return BFD_RELOC_AARCH64_NONE;
1519 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1522 struct elf_aarch64_reloc_map
1524 bfd_reloc_code_real_type from;
1525 bfd_reloc_code_real_type to;
1528 /* Map bfd generic reloc to AArch64-specific reloc. */
1529 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1531 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1533 /* Basic data relocations. */
1534 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1535 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1536 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1537 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1538 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1539 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1540 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1543 /* Given the bfd internal relocation enumerator in CODE, return the
1544 corresponding howto entry. */
1546 static reloc_howto_type *
1547 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1551 /* Convert bfd generic reloc to AArch64-specific reloc. */
1552 if (code < BFD_RELOC_AARCH64_RELOC_START
1553 || code > BFD_RELOC_AARCH64_RELOC_END)
1554 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1555 if (elf_aarch64_reloc_map[i].from == code)
1557 code = elf_aarch64_reloc_map[i].to;
1561 if (code > BFD_RELOC_AARCH64_RELOC_START
1562 && code < BFD_RELOC_AARCH64_RELOC_END)
1563 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1564 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1566 if (code == BFD_RELOC_AARCH64_NONE)
1567 return &elfNN_aarch64_howto_none;
1572 static reloc_howto_type *
1573 elfNN_aarch64_howto_from_type (unsigned int r_type)
1575 bfd_reloc_code_real_type val;
1576 reloc_howto_type *howto;
1581 bfd_set_error (bfd_error_bad_value);
1586 if (r_type == R_AARCH64_NONE)
1587 return &elfNN_aarch64_howto_none;
1589 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1590 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1595 bfd_set_error (bfd_error_bad_value);
1600 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1601 Elf_Internal_Rela *elf_reloc)
1603 unsigned int r_type;
1605 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1606 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1609 static reloc_howto_type *
1610 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1611 bfd_reloc_code_real_type code)
1613 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1618 bfd_set_error (bfd_error_bad_value);
1622 static reloc_howto_type *
1623 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1628 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1629 if (elfNN_aarch64_howto_table[i].name != NULL
1630 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1631 return &elfNN_aarch64_howto_table[i];
1636 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1637 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1638 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1639 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1641 /* The linker script knows the section names for placement.
1642 The entry_names are used to do simple name mangling on the stubs.
1643 Given a function name, and its type, the stub can be found. The
1644 name can be changed. The only requirement is the %s be present. */
1645 #define STUB_ENTRY_NAME "__%s_veneer"
1647 /* The name of the dynamic interpreter. This is put in the .interp
1649 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1651 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1652 (((1 << 25) - 1) << 2)
1653 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1656 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1657 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1660 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1662 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1663 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1667 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1669 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1670 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1671 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1674 static const uint32_t aarch64_adrp_branch_stub [] =
1676 0x90000010, /* adrp ip0, X */
1677 /* R_AARCH64_ADR_HI21_PCREL(X) */
1678 0x91000210, /* add ip0, ip0, :lo12:X */
1679 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1680 0xd61f0200, /* br ip0 */
1683 static const uint32_t aarch64_long_branch_stub[] =
1686 0x58000090, /* ldr ip0, 1f */
1688 0x18000090, /* ldr wip0, 1f */
1690 0x10000011, /* adr ip1, #0 */
1691 0x8b110210, /* add ip0, ip0, ip1 */
1692 0xd61f0200, /* br ip0 */
1693 0x00000000, /* 1: .xword or .word
1694 R_AARCH64_PRELNN(X) + 12
1699 static const uint32_t aarch64_erratum_835769_stub[] =
1701 0x00000000, /* Placeholder for multiply accumulate. */
1702 0x14000000, /* b <label> */
1705 static const uint32_t aarch64_erratum_843419_stub[] =
1707 0x00000000, /* Placeholder for LDR instruction. */
1708 0x14000000, /* b <label> */
1711 /* Section name for stubs is the associated section name plus this
1713 #define STUB_SUFFIX ".stub"
1715 enum elf_aarch64_stub_type
1718 aarch64_stub_adrp_branch,
1719 aarch64_stub_long_branch,
1720 aarch64_stub_erratum_835769_veneer,
1721 aarch64_stub_erratum_843419_veneer,
1724 struct elf_aarch64_stub_hash_entry
1726 /* Base hash table entry structure. */
1727 struct bfd_hash_entry root;
1729 /* The stub section. */
1732 /* Offset within stub_sec of the beginning of this stub. */
1733 bfd_vma stub_offset;
1735 /* Given the symbol's value and its section we can determine its final
1736 value when building the stubs (so the stub knows where to jump). */
1737 bfd_vma target_value;
1738 asection *target_section;
1740 enum elf_aarch64_stub_type stub_type;
1742 /* The symbol table entry, if any, that this was derived from. */
1743 struct elf_aarch64_link_hash_entry *h;
1745 /* Destination symbol type */
1746 unsigned char st_type;
1748 /* Where this stub is being called from, or, in the case of combined
1749 stub sections, the first input section in the group. */
1752 /* The name for the local symbol at the start of this stub. The
1753 stub name in the hash table has to be unique; this does not, so
1754 it can be friendlier. */
1757 /* The instruction which caused this stub to be generated (only valid for
1758 erratum 835769 workaround stubs at present). */
1759 uint32_t veneered_insn;
1761 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
1762 bfd_vma adrp_offset;
1765 /* Used to build a map of a section. This is required for mixed-endian
1768 typedef struct elf_elf_section_map
1773 elf_aarch64_section_map;
1776 typedef struct _aarch64_elf_section_data
1778 struct bfd_elf_section_data elf;
1779 unsigned int mapcount;
1780 unsigned int mapsize;
1781 elf_aarch64_section_map *map;
1783 _aarch64_elf_section_data;
1785 #define elf_aarch64_section_data(sec) \
1786 ((_aarch64_elf_section_data *) elf_section_data (sec))
1788 /* The size of the thread control block which is defined to be two pointers. */
1789 #define TCB_SIZE (ARCH_SIZE/8)*2
1791 struct elf_aarch64_local_symbol
1793 unsigned int got_type;
1794 bfd_signed_vma got_refcount;
1797 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1798 offset is from the end of the jump table and reserved entries
1801 The magic value (bfd_vma) -1 indicates that an offset has not be
1803 bfd_vma tlsdesc_got_jump_table_offset;
1806 struct elf_aarch64_obj_tdata
1808 struct elf_obj_tdata root;
1810 /* local symbol descriptors */
1811 struct elf_aarch64_local_symbol *locals;
1813 /* Zero to warn when linking objects with incompatible enum sizes. */
1814 int no_enum_size_warning;
1816 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1817 int no_wchar_size_warning;
1820 #define elf_aarch64_tdata(bfd) \
1821 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1823 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1825 #define is_aarch64_elf(bfd) \
1826 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1827 && elf_tdata (bfd) != NULL \
1828 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1831 elfNN_aarch64_mkobject (bfd *abfd)
1833 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1837 #define elf_aarch64_hash_entry(ent) \
1838 ((struct elf_aarch64_link_hash_entry *)(ent))
1840 #define GOT_UNKNOWN 0
1841 #define GOT_NORMAL 1
1842 #define GOT_TLS_GD 2
1843 #define GOT_TLS_IE 4
1844 #define GOT_TLSDESC_GD 8
1846 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1848 /* AArch64 ELF linker hash entry. */
1849 struct elf_aarch64_link_hash_entry
1851 struct elf_link_hash_entry root;
1853 /* Track dynamic relocs copied for this symbol. */
1854 struct elf_dyn_relocs *dyn_relocs;
1856 /* Since PLT entries have variable size, we need to record the
1857 index into .got.plt instead of recomputing it from the PLT
1859 bfd_signed_vma plt_got_offset;
1861 /* Bit mask representing the type of GOT entry(s) if any required by
1863 unsigned int got_type;
1865 /* A pointer to the most recently used stub hash entry against this
1867 struct elf_aarch64_stub_hash_entry *stub_cache;
1869 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1870 is from the end of the jump table and reserved entries within the PLTGOT.
1872 The magic value (bfd_vma) -1 indicates that an offset has not
1874 bfd_vma tlsdesc_got_jump_table_offset;
1878 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1880 unsigned long r_symndx)
1883 return elf_aarch64_hash_entry (h)->got_type;
1885 if (! elf_aarch64_locals (abfd))
1888 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1891 /* Get the AArch64 elf linker hash table from a link_info structure. */
1892 #define elf_aarch64_hash_table(info) \
1893 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1895 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1896 ((struct elf_aarch64_stub_hash_entry *) \
1897 bfd_hash_lookup ((table), (string), (create), (copy)))
1899 /* AArch64 ELF linker hash table. */
1900 struct elf_aarch64_link_hash_table
1902 /* The main hash table. */
1903 struct elf_link_hash_table root;
1905 /* Nonzero to force PIC branch veneers. */
1908 /* Fix erratum 835769. */
1909 int fix_erratum_835769;
1911 /* Fix erratum 843419. */
1912 int fix_erratum_843419;
1914 /* Enable ADRP->ADR rewrite for erratum 843419 workaround. */
1915 int fix_erratum_843419_adr;
1917 /* The number of bytes in the initial entry in the PLT. */
1918 bfd_size_type plt_header_size;
1920 /* The number of bytes in the subsequent PLT etries. */
1921 bfd_size_type plt_entry_size;
1923 /* Short-cuts to get to dynamic linker sections. */
1927 /* Small local sym cache. */
1928 struct sym_cache sym_cache;
1930 /* For convenience in allocate_dynrelocs. */
1933 /* The amount of space used by the reserved portion of the sgotplt
1934 section, plus whatever space is used by the jump slots. */
1935 bfd_vma sgotplt_jump_table_size;
1937 /* The stub hash table. */
1938 struct bfd_hash_table stub_hash_table;
1940 /* Linker stub bfd. */
1943 /* Linker call-backs. */
1944 asection *(*add_stub_section) (const char *, asection *);
1945 void (*layout_sections_again) (void);
1947 /* Array to keep track of which stub sections have been created, and
1948 information on stub grouping. */
1951 /* This is the section to which stubs in the group will be
1954 /* The stub section. */
1958 /* Assorted information used by elfNN_aarch64_size_stubs. */
1959 unsigned int bfd_count;
1961 asection **input_list;
1963 /* The offset into splt of the PLT entry for the TLS descriptor
1964 resolver. Special values are 0, if not necessary (or not found
1965 to be necessary yet), and -1 if needed but not determined
1967 bfd_vma tlsdesc_plt;
1969 /* The GOT offset for the lazy trampoline. Communicated to the
1970 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1971 indicates an offset is not allocated. */
1972 bfd_vma dt_tlsdesc_got;
1974 /* Used by local STT_GNU_IFUNC symbols. */
1975 htab_t loc_hash_table;
1976 void * loc_hash_memory;
1979 /* Create an entry in an AArch64 ELF linker hash table. */
1981 static struct bfd_hash_entry *
1982 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1983 struct bfd_hash_table *table,
1986 struct elf_aarch64_link_hash_entry *ret =
1987 (struct elf_aarch64_link_hash_entry *) entry;
1989 /* Allocate the structure if it has not already been allocated by a
1992 ret = bfd_hash_allocate (table,
1993 sizeof (struct elf_aarch64_link_hash_entry));
1995 return (struct bfd_hash_entry *) ret;
1997 /* Call the allocation method of the superclass. */
1998 ret = ((struct elf_aarch64_link_hash_entry *)
1999 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2003 ret->dyn_relocs = NULL;
2004 ret->got_type = GOT_UNKNOWN;
2005 ret->plt_got_offset = (bfd_vma) - 1;
2006 ret->stub_cache = NULL;
2007 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
2010 return (struct bfd_hash_entry *) ret;
2013 /* Initialize an entry in the stub hash table. */
2015 static struct bfd_hash_entry *
2016 stub_hash_newfunc (struct bfd_hash_entry *entry,
2017 struct bfd_hash_table *table, const char *string)
2019 /* Allocate the structure if it has not already been allocated by a
2023 entry = bfd_hash_allocate (table,
2025 elf_aarch64_stub_hash_entry));
2030 /* Call the allocation method of the superclass. */
2031 entry = bfd_hash_newfunc (entry, table, string);
2034 struct elf_aarch64_stub_hash_entry *eh;
2036 /* Initialize the local fields. */
2037 eh = (struct elf_aarch64_stub_hash_entry *) entry;
2038 eh->adrp_offset = 0;
2039 eh->stub_sec = NULL;
2040 eh->stub_offset = 0;
2041 eh->target_value = 0;
2042 eh->target_section = NULL;
2043 eh->stub_type = aarch64_stub_none;
2051 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
2052 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2053 as global symbol. We reuse indx and dynstr_index for local symbol
2054 hash since they aren't used by global symbols in this backend. */
2057 elfNN_aarch64_local_htab_hash (const void *ptr)
2059 struct elf_link_hash_entry *h
2060 = (struct elf_link_hash_entry *) ptr;
2061 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2064 /* Compare local hash entries. */
2067 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2069 struct elf_link_hash_entry *h1
2070 = (struct elf_link_hash_entry *) ptr1;
2071 struct elf_link_hash_entry *h2
2072 = (struct elf_link_hash_entry *) ptr2;
2074 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2077 /* Find and/or create a hash entry for local symbol. */
2079 static struct elf_link_hash_entry *
2080 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2081 bfd *abfd, const Elf_Internal_Rela *rel,
2084 struct elf_aarch64_link_hash_entry e, *ret;
2085 asection *sec = abfd->sections;
2086 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2087 ELFNN_R_SYM (rel->r_info));
2090 e.root.indx = sec->id;
2091 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2092 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2093 create ? INSERT : NO_INSERT);
2100 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2104 ret = (struct elf_aarch64_link_hash_entry *)
2105 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2106 sizeof (struct elf_aarch64_link_hash_entry));
2109 memset (ret, 0, sizeof (*ret));
2110 ret->root.indx = sec->id;
2111 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2112 ret->root.dynindx = -1;
2118 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2121 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2122 struct elf_link_hash_entry *dir,
2123 struct elf_link_hash_entry *ind)
2125 struct elf_aarch64_link_hash_entry *edir, *eind;
2127 edir = (struct elf_aarch64_link_hash_entry *) dir;
2128 eind = (struct elf_aarch64_link_hash_entry *) ind;
2130 if (eind->dyn_relocs != NULL)
2132 if (edir->dyn_relocs != NULL)
2134 struct elf_dyn_relocs **pp;
2135 struct elf_dyn_relocs *p;
2137 /* Add reloc counts against the indirect sym to the direct sym
2138 list. Merge any entries against the same section. */
2139 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2141 struct elf_dyn_relocs *q;
2143 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2144 if (q->sec == p->sec)
2146 q->pc_count += p->pc_count;
2147 q->count += p->count;
2154 *pp = edir->dyn_relocs;
2157 edir->dyn_relocs = eind->dyn_relocs;
2158 eind->dyn_relocs = NULL;
2161 if (ind->root.type == bfd_link_hash_indirect)
2163 /* Copy over PLT info. */
2164 if (dir->got.refcount <= 0)
2166 edir->got_type = eind->got_type;
2167 eind->got_type = GOT_UNKNOWN;
2171 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2174 /* Destroy an AArch64 elf linker hash table. */
2177 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2179 struct elf_aarch64_link_hash_table *ret
2180 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2182 if (ret->loc_hash_table)
2183 htab_delete (ret->loc_hash_table);
2184 if (ret->loc_hash_memory)
2185 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2187 bfd_hash_table_free (&ret->stub_hash_table);
2188 _bfd_elf_link_hash_table_free (obfd);
2191 /* Create an AArch64 elf linker hash table. */
2193 static struct bfd_link_hash_table *
2194 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2196 struct elf_aarch64_link_hash_table *ret;
2197 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2199 ret = bfd_zmalloc (amt);
2203 if (!_bfd_elf_link_hash_table_init
2204 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2205 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2211 ret->plt_header_size = PLT_ENTRY_SIZE;
2212 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2214 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2216 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2217 sizeof (struct elf_aarch64_stub_hash_entry)))
2219 _bfd_elf_link_hash_table_free (abfd);
2223 ret->loc_hash_table = htab_try_create (1024,
2224 elfNN_aarch64_local_htab_hash,
2225 elfNN_aarch64_local_htab_eq,
2227 ret->loc_hash_memory = objalloc_create ();
2228 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2230 elfNN_aarch64_link_hash_table_free (abfd);
2233 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2235 return &ret->root.root;
2239 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2240 bfd_vma offset, bfd_vma value)
2242 reloc_howto_type *howto;
2245 howto = elfNN_aarch64_howto_from_type (r_type);
2246 place = (input_section->output_section->vma + input_section->output_offset
2249 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2250 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2251 return _bfd_aarch64_elf_put_addend (input_bfd,
2252 input_section->contents + offset, r_type,
2256 static enum elf_aarch64_stub_type
2257 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2259 if (aarch64_valid_for_adrp_p (value, place))
2260 return aarch64_stub_adrp_branch;
2261 return aarch64_stub_long_branch;
2264 /* Determine the type of stub needed, if any, for a call. */
2266 static enum elf_aarch64_stub_type
2267 aarch64_type_of_stub (struct bfd_link_info *info,
2268 asection *input_sec,
2269 const Elf_Internal_Rela *rel,
2270 unsigned char st_type,
2271 struct elf_aarch64_link_hash_entry *hash,
2272 bfd_vma destination)
2275 bfd_signed_vma branch_offset;
2276 unsigned int r_type;
2277 struct elf_aarch64_link_hash_table *globals;
2278 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2279 bfd_boolean via_plt_p;
2281 if (st_type != STT_FUNC)
2284 globals = elf_aarch64_hash_table (info);
2285 via_plt_p = (globals->root.splt != NULL && hash != NULL
2286 && hash->root.plt.offset != (bfd_vma) - 1);
2291 /* Determine where the call point is. */
2292 location = (input_sec->output_offset
2293 + input_sec->output_section->vma + rel->r_offset);
2295 branch_offset = (bfd_signed_vma) (destination - location);
2297 r_type = ELFNN_R_TYPE (rel->r_info);
2299 /* We don't want to redirect any old unconditional jump in this way,
2300 only one which is being used for a sibcall, where it is
2301 acceptable for the IP0 and IP1 registers to be clobbered. */
2302 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2303 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2304 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2306 stub_type = aarch64_stub_long_branch;
2312 /* Build a name for an entry in the stub hash table. */
2315 elfNN_aarch64_stub_name (const asection *input_section,
2316 const asection *sym_sec,
2317 const struct elf_aarch64_link_hash_entry *hash,
2318 const Elf_Internal_Rela *rel)
2325 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2326 stub_name = bfd_malloc (len);
2327 if (stub_name != NULL)
2328 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2329 (unsigned int) input_section->id,
2330 hash->root.root.root.string,
2335 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2336 stub_name = bfd_malloc (len);
2337 if (stub_name != NULL)
2338 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2339 (unsigned int) input_section->id,
2340 (unsigned int) sym_sec->id,
2341 (unsigned int) ELFNN_R_SYM (rel->r_info),
2348 /* Look up an entry in the stub hash. Stub entries are cached because
2349 creating the stub name takes a bit of time. */
2351 static struct elf_aarch64_stub_hash_entry *
2352 elfNN_aarch64_get_stub_entry (const asection *input_section,
2353 const asection *sym_sec,
2354 struct elf_link_hash_entry *hash,
2355 const Elf_Internal_Rela *rel,
2356 struct elf_aarch64_link_hash_table *htab)
2358 struct elf_aarch64_stub_hash_entry *stub_entry;
2359 struct elf_aarch64_link_hash_entry *h =
2360 (struct elf_aarch64_link_hash_entry *) hash;
2361 const asection *id_sec;
2363 if ((input_section->flags & SEC_CODE) == 0)
2366 /* If this input section is part of a group of sections sharing one
2367 stub section, then use the id of the first section in the group.
2368 Stub names need to include a section id, as there may well be
2369 more than one stub used to reach say, printf, and we need to
2370 distinguish between them. */
2371 id_sec = htab->stub_group[input_section->id].link_sec;
2373 if (h != NULL && h->stub_cache != NULL
2374 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2376 stub_entry = h->stub_cache;
2382 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2383 if (stub_name == NULL)
2386 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2387 stub_name, FALSE, FALSE);
2389 h->stub_cache = stub_entry;
2398 /* Create a stub section. */
2401 _bfd_aarch64_create_stub_section (asection *section,
2402 struct elf_aarch64_link_hash_table *htab)
2408 namelen = strlen (section->name);
2409 len = namelen + sizeof (STUB_SUFFIX);
2410 s_name = bfd_alloc (htab->stub_bfd, len);
2414 memcpy (s_name, section->name, namelen);
2415 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2416 return (*htab->add_stub_section) (s_name, section);
2420 /* Find or create a stub section for a link section.
2422 Fix or create the stub section used to collect stubs attached to
2423 the specified link section. */
2426 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
2427 struct elf_aarch64_link_hash_table *htab)
2429 if (htab->stub_group[link_section->id].stub_sec == NULL)
2430 htab->stub_group[link_section->id].stub_sec
2431 = _bfd_aarch64_create_stub_section (link_section, htab);
2432 return htab->stub_group[link_section->id].stub_sec;
2436 /* Find or create a stub section in the stub group for an input
2440 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2441 struct elf_aarch64_link_hash_table *htab)
2443 asection *link_sec = htab->stub_group[section->id].link_sec;
2444 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
2448 /* Add a new stub entry in the stub group associated with an input
2449 section to the stub hash. Not all fields of the new stub entry are
2452 static struct elf_aarch64_stub_hash_entry *
2453 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2455 struct elf_aarch64_link_hash_table *htab)
2459 struct elf_aarch64_stub_hash_entry *stub_entry;
2461 link_sec = htab->stub_group[section->id].link_sec;
2462 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2464 /* Enter this entry into the linker stub hash table. */
2465 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2467 if (stub_entry == NULL)
2469 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2470 section->owner, stub_name);
2474 stub_entry->stub_sec = stub_sec;
2475 stub_entry->stub_offset = 0;
2476 stub_entry->id_sec = link_sec;
2481 /* Add a new stub entry in the final stub section to the stub hash.
2482 Not all fields of the new stub entry are initialised. */
2484 static struct elf_aarch64_stub_hash_entry *
2485 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
2486 asection *link_section,
2487 struct elf_aarch64_link_hash_table *htab)
2490 struct elf_aarch64_stub_hash_entry *stub_entry;
2492 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
2493 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2495 if (stub_entry == NULL)
2497 (*_bfd_error_handler) (_("cannot create stub entry %s"), stub_name);
2501 stub_entry->stub_sec = stub_sec;
2502 stub_entry->stub_offset = 0;
2503 stub_entry->id_sec = link_section;
2510 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2511 void *in_arg ATTRIBUTE_UNUSED)
2513 struct elf_aarch64_stub_hash_entry *stub_entry;
2518 bfd_vma veneered_insn_loc;
2519 bfd_vma veneer_entry_loc;
2520 bfd_signed_vma branch_offset = 0;
2521 unsigned int template_size;
2522 const uint32_t *template;
2525 /* Massage our args to the form they really have. */
2526 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2528 stub_sec = stub_entry->stub_sec;
2530 /* Make a note of the offset within the stubs for this entry. */
2531 stub_entry->stub_offset = stub_sec->size;
2532 loc = stub_sec->contents + stub_entry->stub_offset;
2534 stub_bfd = stub_sec->owner;
2536 /* This is the address of the stub destination. */
2537 sym_value = (stub_entry->target_value
2538 + stub_entry->target_section->output_offset
2539 + stub_entry->target_section->output_section->vma);
2541 if (stub_entry->stub_type == aarch64_stub_long_branch)
2543 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2544 + stub_sec->output_offset);
2546 /* See if we can relax the stub. */
2547 if (aarch64_valid_for_adrp_p (sym_value, place))
2548 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2551 switch (stub_entry->stub_type)
2553 case aarch64_stub_adrp_branch:
2554 template = aarch64_adrp_branch_stub;
2555 template_size = sizeof (aarch64_adrp_branch_stub);
2557 case aarch64_stub_long_branch:
2558 template = aarch64_long_branch_stub;
2559 template_size = sizeof (aarch64_long_branch_stub);
2561 case aarch64_stub_erratum_835769_veneer:
2562 template = aarch64_erratum_835769_stub;
2563 template_size = sizeof (aarch64_erratum_835769_stub);
2565 case aarch64_stub_erratum_843419_veneer:
2566 template = aarch64_erratum_843419_stub;
2567 template_size = sizeof (aarch64_erratum_843419_stub);
2573 for (i = 0; i < (template_size / sizeof template[0]); i++)
2575 bfd_putl32 (template[i], loc);
2579 template_size = (template_size + 7) & ~7;
2580 stub_sec->size += template_size;
2582 switch (stub_entry->stub_type)
2584 case aarch64_stub_adrp_branch:
2585 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2586 stub_entry->stub_offset, sym_value))
2587 /* The stub would not have been relaxed if the offset was out
2591 if (aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
2592 stub_entry->stub_offset + 4, sym_value))
2596 case aarch64_stub_long_branch:
2597 /* We want the value relative to the address 12 bytes back from the
2599 if (aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
2600 stub_entry->stub_offset + 16, sym_value + 12))
2604 case aarch64_stub_erratum_835769_veneer:
2605 veneered_insn_loc = stub_entry->target_section->output_section->vma
2606 + stub_entry->target_section->output_offset
2607 + stub_entry->target_value;
2608 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2609 + stub_entry->stub_sec->output_offset
2610 + stub_entry->stub_offset;
2611 branch_offset = veneered_insn_loc - veneer_entry_loc;
2612 branch_offset >>= 2;
2613 branch_offset &= 0x3ffffff;
2614 bfd_putl32 (stub_entry->veneered_insn,
2615 stub_sec->contents + stub_entry->stub_offset);
2616 bfd_putl32 (template[1] | branch_offset,
2617 stub_sec->contents + stub_entry->stub_offset + 4);
2620 case aarch64_stub_erratum_843419_veneer:
2621 if (aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
2622 stub_entry->stub_offset + 4, sym_value + 4))
2633 /* As above, but don't actually build the stub. Just bump offset so
2634 we know stub section sizes. */
2637 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2638 void *in_arg ATTRIBUTE_UNUSED)
2640 struct elf_aarch64_stub_hash_entry *stub_entry;
2643 /* Massage our args to the form they really have. */
2644 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2646 switch (stub_entry->stub_type)
2648 case aarch64_stub_adrp_branch:
2649 size = sizeof (aarch64_adrp_branch_stub);
2651 case aarch64_stub_long_branch:
2652 size = sizeof (aarch64_long_branch_stub);
2654 case aarch64_stub_erratum_835769_veneer:
2655 size = sizeof (aarch64_erratum_835769_stub);
2657 case aarch64_stub_erratum_843419_veneer:
2658 size = sizeof (aarch64_erratum_843419_stub);
2664 size = (size + 7) & ~7;
2665 stub_entry->stub_sec->size += size;
2669 /* External entry points for sizing and building linker stubs. */
2671 /* Set up various things so that we can make a list of input sections
2672 for each output section included in the link. Returns -1 on error,
2673 0 when no stubs will be needed, and 1 on success. */
2676 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2677 struct bfd_link_info *info)
2680 unsigned int bfd_count;
2681 int top_id, top_index;
2683 asection **input_list, **list;
2685 struct elf_aarch64_link_hash_table *htab =
2686 elf_aarch64_hash_table (info);
2688 if (!is_elf_hash_table (htab))
2691 /* Count the number of input BFDs and find the top input section id. */
2692 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2693 input_bfd != NULL; input_bfd = input_bfd->link.next)
2696 for (section = input_bfd->sections;
2697 section != NULL; section = section->next)
2699 if (top_id < section->id)
2700 top_id = section->id;
2703 htab->bfd_count = bfd_count;
2705 amt = sizeof (struct map_stub) * (top_id + 1);
2706 htab->stub_group = bfd_zmalloc (amt);
2707 if (htab->stub_group == NULL)
2710 /* We can't use output_bfd->section_count here to find the top output
2711 section index as some sections may have been removed, and
2712 _bfd_strip_section_from_output doesn't renumber the indices. */
2713 for (section = output_bfd->sections, top_index = 0;
2714 section != NULL; section = section->next)
2716 if (top_index < section->index)
2717 top_index = section->index;
2720 htab->top_index = top_index;
2721 amt = sizeof (asection *) * (top_index + 1);
2722 input_list = bfd_malloc (amt);
2723 htab->input_list = input_list;
2724 if (input_list == NULL)
2727 /* For sections we aren't interested in, mark their entries with a
2728 value we can check later. */
2729 list = input_list + top_index;
2731 *list = bfd_abs_section_ptr;
2732 while (list-- != input_list);
2734 for (section = output_bfd->sections;
2735 section != NULL; section = section->next)
2737 if ((section->flags & SEC_CODE) != 0)
2738 input_list[section->index] = NULL;
2744 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2745 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2747 /* The linker repeatedly calls this function for each input section,
2748 in the order that input sections are linked into output sections.
2749 Build lists of input sections to determine groupings between which
2750 we may insert linker stubs. */
2753 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2755 struct elf_aarch64_link_hash_table *htab =
2756 elf_aarch64_hash_table (info);
2758 if (isec->output_section->index <= htab->top_index)
2760 asection **list = htab->input_list + isec->output_section->index;
2762 if (*list != bfd_abs_section_ptr)
2764 /* Steal the link_sec pointer for our list. */
2765 /* This happens to make the list in reverse order,
2766 which is what we want. */
2767 PREV_SEC (isec) = *list;
2773 /* See whether we can group stub sections together. Grouping stub
2774 sections may result in fewer stubs. More importantly, we need to
2775 put all .init* and .fini* stubs at the beginning of the .init or
2776 .fini output sections respectively, because glibc splits the
2777 _init and _fini functions into multiple parts. Putting a stub in
2778 the middle of a function is not a good idea. */
2781 group_sections (struct elf_aarch64_link_hash_table *htab,
2782 bfd_size_type stub_group_size,
2783 bfd_boolean stubs_always_before_branch)
2785 asection **list = htab->input_list + htab->top_index;
2789 asection *tail = *list;
2791 if (tail == bfd_abs_section_ptr)
2794 while (tail != NULL)
2798 bfd_size_type total;
2802 while ((prev = PREV_SEC (curr)) != NULL
2803 && ((total += curr->output_offset - prev->output_offset)
2807 /* OK, the size from the start of CURR to the end is less
2808 than stub_group_size and thus can be handled by one stub
2809 section. (Or the tail section is itself larger than
2810 stub_group_size, in which case we may be toast.)
2811 We should really be keeping track of the total size of
2812 stubs added here, as stubs contribute to the final output
2816 prev = PREV_SEC (tail);
2817 /* Set up this stub group. */
2818 htab->stub_group[tail->id].link_sec = curr;
2820 while (tail != curr && (tail = prev) != NULL);
2822 /* But wait, there's more! Input sections up to stub_group_size
2823 bytes before the stub section can be handled by it too. */
2824 if (!stubs_always_before_branch)
2828 && ((total += tail->output_offset - prev->output_offset)
2832 prev = PREV_SEC (tail);
2833 htab->stub_group[tail->id].link_sec = curr;
2839 while (list-- != htab->input_list);
2841 free (htab->input_list);
2846 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2848 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2849 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2850 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2851 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2852 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2853 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2855 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2856 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2857 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2858 #define AARCH64_ZR 0x1f
2860 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2861 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2863 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2864 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2865 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2866 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2867 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2868 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2869 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2870 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2871 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2872 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2873 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2874 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2875 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2876 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2877 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2878 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2879 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2880 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2882 /* Classify an INSN if it is indeed a load/store.
2884 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2886 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2889 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2894 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2895 bfd_boolean *pair, bfd_boolean *load)
2903 /* Bail out quickly if INSN doesn't fall into the the load-store
2905 if (!AARCH64_LDST (insn))
2910 if (AARCH64_LDST_EX (insn))
2912 *rt = AARCH64_RT (insn);
2914 if (AARCH64_BIT (insn, 21) == 1)
2917 *rt2 = AARCH64_RT2 (insn);
2919 *load = AARCH64_LD (insn);
2922 else if (AARCH64_LDST_NAP (insn)
2923 || AARCH64_LDSTP_PI (insn)
2924 || AARCH64_LDSTP_O (insn)
2925 || AARCH64_LDSTP_PRE (insn))
2928 *rt = AARCH64_RT (insn);
2929 *rt2 = AARCH64_RT2 (insn);
2930 *load = AARCH64_LD (insn);
2933 else if (AARCH64_LDST_PCREL (insn)
2934 || AARCH64_LDST_UI (insn)
2935 || AARCH64_LDST_PIIMM (insn)
2936 || AARCH64_LDST_U (insn)
2937 || AARCH64_LDST_PREIMM (insn)
2938 || AARCH64_LDST_RO (insn)
2939 || AARCH64_LDST_UIMM (insn))
2941 *rt = AARCH64_RT (insn);
2943 if (AARCH64_LDST_PCREL (insn))
2945 opc = AARCH64_BITS (insn, 22, 2);
2946 v = AARCH64_BIT (insn, 26);
2947 opc_v = opc | (v << 2);
2948 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
2949 || opc_v == 5 || opc_v == 7);
2952 else if (AARCH64_LDST_SIMD_M (insn)
2953 || AARCH64_LDST_SIMD_M_PI (insn))
2955 *rt = AARCH64_RT (insn);
2956 *load = AARCH64_BIT (insn, 22);
2957 opcode = (insn >> 12) & 0xf;
2984 else if (AARCH64_LDST_SIMD_S (insn)
2985 || AARCH64_LDST_SIMD_S_PI (insn))
2987 *rt = AARCH64_RT (insn);
2988 r = (insn >> 21) & 1;
2989 *load = AARCH64_BIT (insn, 22);
2990 opcode = (insn >> 13) & 0x7;
3002 *rt2 = *rt + (r == 0 ? 2 : 3);
3010 *rt2 = *rt + (r == 0 ? 2 : 3);
3022 /* Return TRUE if INSN is multiply-accumulate. */
3025 aarch64_mlxl_p (uint32_t insn)
3027 uint32_t op31 = AARCH64_OP31 (insn);
3029 if (AARCH64_MAC (insn)
3030 && (op31 == 0 || op31 == 1 || op31 == 5)
3031 /* Exclude MUL instructions which are encoded as a multiple accumulate
3033 && AARCH64_RA (insn) != AARCH64_ZR)
3039 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
3040 it is possible for a 64-bit multiply-accumulate instruction to generate an
3041 incorrect result. The details are quite complex and hard to
3042 determine statically, since branches in the code may exist in some
3043 circumstances, but all cases end with a memory (load, store, or
3044 prefetch) instruction followed immediately by the multiply-accumulate
3045 operation. We employ a linker patching technique, by moving the potentially
3046 affected multiply-accumulate instruction into a patch region and replacing
3047 the original instruction with a branch to the patch. This function checks
3048 if INSN_1 is the memory operation followed by a multiply-accumulate
3049 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
3050 if INSN_1 and INSN_2 are safe. */
3053 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3063 if (aarch64_mlxl_p (insn_2)
3064 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3066 /* Any SIMD memory op is independent of the subsequent MLA
3067 by definition of the erratum. */
3068 if (AARCH64_BIT (insn_1, 26))
3071 /* If not SIMD, check for integer memory ops and MLA relationship. */
3072 rn = AARCH64_RN (insn_2);
3073 ra = AARCH64_RA (insn_2);
3074 rm = AARCH64_RM (insn_2);
3076 /* If this is a load and there's a true(RAW) dependency, we are safe
3077 and this is not an erratum sequence. */
3079 (rt == rn || rt == rm || rt == ra
3080 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3083 /* We conservatively put out stubs for all other cases (including
3091 /* Used to order a list of mapping symbols by address. */
3094 elf_aarch64_compare_mapping (const void *a, const void *b)
3096 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3097 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3099 if (amap->vma > bmap->vma)
3101 else if (amap->vma < bmap->vma)
3103 else if (amap->type > bmap->type)
3104 /* Ensure results do not depend on the host qsort for objects with
3105 multiple mapping symbols at the same address by sorting on type
3108 else if (amap->type < bmap->type)
3116 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3118 char *stub_name = (char *) bfd_malloc
3119 (strlen ("__erratum_835769_veneer_") + 16);
3120 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3124 /* Scan for Cortex-A53 erratum 835769 sequence.
3126 Return TRUE else FALSE on abnormal termination. */
3129 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3130 struct bfd_link_info *info,
3131 unsigned int *num_fixes_p)
3134 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3135 unsigned int num_fixes = *num_fixes_p;
3140 for (section = input_bfd->sections;
3142 section = section->next)
3144 bfd_byte *contents = NULL;
3145 struct _aarch64_elf_section_data *sec_data;
3148 if (elf_section_type (section) != SHT_PROGBITS
3149 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3150 || (section->flags & SEC_EXCLUDE) != 0
3151 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3152 || (section->output_section == bfd_abs_section_ptr))
3155 if (elf_section_data (section)->this_hdr.contents != NULL)
3156 contents = elf_section_data (section)->this_hdr.contents;
3157 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3160 sec_data = elf_aarch64_section_data (section);
3162 qsort (sec_data->map, sec_data->mapcount,
3163 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3165 for (span = 0; span < sec_data->mapcount; span++)
3167 unsigned int span_start = sec_data->map[span].vma;
3168 unsigned int span_end = ((span == sec_data->mapcount - 1)
3169 ? sec_data->map[0].vma + section->size
3170 : sec_data->map[span + 1].vma);
3172 char span_type = sec_data->map[span].type;
3174 if (span_type == 'd')
3177 for (i = span_start; i + 4 < span_end; i += 4)
3179 uint32_t insn_1 = bfd_getl32 (contents + i);
3180 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3182 if (aarch64_erratum_sequence (insn_1, insn_2))
3184 struct elf_aarch64_stub_hash_entry *stub_entry;
3185 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3189 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3195 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3196 stub_entry->target_section = section;
3197 stub_entry->target_value = i + 4;
3198 stub_entry->veneered_insn = insn_2;
3199 stub_entry->output_name = stub_name;
3204 if (elf_section_data (section)->this_hdr.contents == NULL)
3208 *num_fixes_p = num_fixes;
3214 /* Test if instruction INSN is ADRP. */
3217 _bfd_aarch64_adrp_p (uint32_t insn)
3219 return ((insn & 0x9f000000) == 0x90000000);
3223 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
3226 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
3234 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
3237 && AARCH64_LDST_UIMM (insn_3)
3238 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
3242 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
3244 Return TRUE if section CONTENTS at offset I contains one of the
3245 erratum 843419 sequences, otherwise return FALSE. If a sequence is
3246 seen set P_VENEER_I to the offset of the final LOAD/STORE
3247 instruction in the sequence.
3251 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
3252 bfd_vma i, bfd_vma span_end,
3253 bfd_vma *p_veneer_i)
3255 uint32_t insn_1 = bfd_getl32 (contents + i);
3257 if (!_bfd_aarch64_adrp_p (insn_1))
3260 if (span_end < i + 12)
3263 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3264 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
3266 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
3269 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
3271 *p_veneer_i = i + 8;
3275 if (span_end < i + 16)
3278 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
3280 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
3282 *p_veneer_i = i + 12;
3290 /* Resize all stub sections. */
3293 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
3297 /* OK, we've added some stubs. Find out the new size of the
3299 for (section = htab->stub_bfd->sections;
3300 section != NULL; section = section->next)
3302 /* Ignore non-stub sections. */
3303 if (!strstr (section->name, STUB_SUFFIX))
3308 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3310 for (section = htab->stub_bfd->sections;
3311 section != NULL; section = section->next)
3313 if (!strstr (section->name, STUB_SUFFIX))
3319 /* Ensure all stub sections have a size which is a multiple of
3320 4096. This is important in order to ensure that the insertion
3321 of stub sections does not in itself move existing code around
3322 in such a way that new errata sequences are created. */
3323 if (htab->fix_erratum_843419)
3325 section->size = BFD_ALIGN (section->size, 0x1000);
3330 /* Construct an erratum 843419 workaround stub name.
3334 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
3337 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
3338 char *stub_name = bfd_malloc (len);
3340 if (stub_name != NULL)
3341 snprintf (stub_name, len, "e843419@%04x_%08x_%" BFD_VMA_FMT "x",
3342 input_section->owner->id,
3348 /* Build a stub_entry structure describing an 843419 fixup.
3350 The stub_entry constructed is populated with the bit pattern INSN
3351 of the instruction located at OFFSET within input SECTION.
3353 Returns TRUE on success. */
3356 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
3357 bfd_vma adrp_offset,
3358 bfd_vma ldst_offset,
3360 struct bfd_link_info *info)
3362 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3364 struct elf_aarch64_stub_hash_entry *stub_entry;
3366 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
3367 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3375 /* We always place an 843419 workaround veneer in the stub section
3376 attached to the input section in which an erratum sequence has
3377 been found. This ensures that later in the link process (in
3378 elfNN_aarch64_write_section) when we copy the veneered
3379 instruction from the input section into the stub section the
3380 copied instruction will have had any relocations applied to it.
3381 If we placed workaround veneers in any other stub section then we
3382 could not assume that all relocations have been processed on the
3383 corresponding input section at the point we output the stub
3387 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
3388 if (stub_entry == NULL)
3394 stub_entry->adrp_offset = adrp_offset;
3395 stub_entry->target_value = ldst_offset;
3396 stub_entry->target_section = section;
3397 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
3398 stub_entry->veneered_insn = insn;
3399 stub_entry->output_name = stub_name;
3405 /* Scan an input section looking for the signature of erratum 843419.
3407 Scans input SECTION in INPUT_BFD looking for erratum 843419
3408 signatures, for each signature found a stub_entry is created
3409 describing the location of the erratum for subsequent fixup.
3411 Return TRUE on successful scan, FALSE on failure to scan.
3415 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
3416 struct bfd_link_info *info)
3418 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3423 if (elf_section_type (section) != SHT_PROGBITS
3424 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3425 || (section->flags & SEC_EXCLUDE) != 0
3426 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3427 || (section->output_section == bfd_abs_section_ptr))
3432 bfd_byte *contents = NULL;
3433 struct _aarch64_elf_section_data *sec_data;
3436 if (elf_section_data (section)->this_hdr.contents != NULL)
3437 contents = elf_section_data (section)->this_hdr.contents;
3438 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3441 sec_data = elf_aarch64_section_data (section);
3443 qsort (sec_data->map, sec_data->mapcount,
3444 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3446 for (span = 0; span < sec_data->mapcount; span++)
3448 unsigned int span_start = sec_data->map[span].vma;
3449 unsigned int span_end = ((span == sec_data->mapcount - 1)
3450 ? sec_data->map[0].vma + section->size
3451 : sec_data->map[span + 1].vma);
3453 char span_type = sec_data->map[span].type;
3455 if (span_type == 'd')
3458 for (i = span_start; i + 8 < span_end; i += 4)
3460 bfd_vma vma = (section->output_section->vma
3461 + section->output_offset
3465 if (_bfd_aarch64_erratum_843419_p
3466 (contents, vma, i, span_end, &veneer_i))
3468 uint32_t insn = bfd_getl32 (contents + veneer_i);
3470 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
3477 if (elf_section_data (section)->this_hdr.contents == NULL)
3486 /* Determine and set the size of the stub section for a final link.
3488 The basic idea here is to examine all the relocations looking for
3489 PC-relative calls to a target that is unreachable with a "bl"
3493 elfNN_aarch64_size_stubs (bfd *output_bfd,
3495 struct bfd_link_info *info,
3496 bfd_signed_vma group_size,
3497 asection * (*add_stub_section) (const char *,
3499 void (*layout_sections_again) (void))
3501 bfd_size_type stub_group_size;
3502 bfd_boolean stubs_always_before_branch;
3503 bfd_boolean stub_changed = FALSE;
3504 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3505 unsigned int num_erratum_835769_fixes = 0;
3507 /* Propagate mach to stub bfd, because it may not have been
3508 finalized when we created stub_bfd. */
3509 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3510 bfd_get_mach (output_bfd));
3512 /* Stash our params away. */
3513 htab->stub_bfd = stub_bfd;
3514 htab->add_stub_section = add_stub_section;
3515 htab->layout_sections_again = layout_sections_again;
3516 stubs_always_before_branch = group_size < 0;
3518 stub_group_size = -group_size;
3520 stub_group_size = group_size;
3522 if (stub_group_size == 1)
3524 /* Default values. */
3525 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3526 stub_group_size = 127 * 1024 * 1024;
3529 group_sections (htab, stub_group_size, stubs_always_before_branch);
3531 (*htab->layout_sections_again) ();
3533 if (htab->fix_erratum_835769)
3537 for (input_bfd = info->input_bfds;
3538 input_bfd != NULL; input_bfd = input_bfd->link.next)
3539 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
3540 &num_erratum_835769_fixes))
3543 _bfd_aarch64_resize_stubs (htab);
3544 (*htab->layout_sections_again) ();
3547 if (htab->fix_erratum_843419)
3551 for (input_bfd = info->input_bfds;
3553 input_bfd = input_bfd->link.next)
3557 for (section = input_bfd->sections;
3559 section = section->next)
3560 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
3564 _bfd_aarch64_resize_stubs (htab);
3565 (*htab->layout_sections_again) ();
3572 for (input_bfd = info->input_bfds;
3573 input_bfd != NULL; input_bfd = input_bfd->link.next)
3575 Elf_Internal_Shdr *symtab_hdr;
3577 Elf_Internal_Sym *local_syms = NULL;
3579 /* We'll need the symbol table in a second. */
3580 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3581 if (symtab_hdr->sh_info == 0)
3584 /* Walk over each section attached to the input bfd. */
3585 for (section = input_bfd->sections;
3586 section != NULL; section = section->next)
3588 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3590 /* If there aren't any relocs, then there's nothing more
3592 if ((section->flags & SEC_RELOC) == 0
3593 || section->reloc_count == 0
3594 || (section->flags & SEC_CODE) == 0)
3597 /* If this section is a link-once section that will be
3598 discarded, then don't create any stubs. */
3599 if (section->output_section == NULL
3600 || section->output_section->owner != output_bfd)
3603 /* Get the relocs. */
3605 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3606 NULL, info->keep_memory);
3607 if (internal_relocs == NULL)
3608 goto error_ret_free_local;
3610 /* Now examine each relocation. */
3611 irela = internal_relocs;
3612 irelaend = irela + section->reloc_count;
3613 for (; irela < irelaend; irela++)
3615 unsigned int r_type, r_indx;
3616 enum elf_aarch64_stub_type stub_type;
3617 struct elf_aarch64_stub_hash_entry *stub_entry;
3620 bfd_vma destination;
3621 struct elf_aarch64_link_hash_entry *hash;
3622 const char *sym_name;
3624 const asection *id_sec;
3625 unsigned char st_type;
3628 r_type = ELFNN_R_TYPE (irela->r_info);
3629 r_indx = ELFNN_R_SYM (irela->r_info);
3631 if (r_type >= (unsigned int) R_AARCH64_end)
3633 bfd_set_error (bfd_error_bad_value);
3634 error_ret_free_internal:
3635 if (elf_section_data (section)->relocs == NULL)
3636 free (internal_relocs);
3637 goto error_ret_free_local;
3640 /* Only look for stubs on unconditional branch and
3641 branch and link instructions. */
3642 if (r_type != (unsigned int) AARCH64_R (CALL26)
3643 && r_type != (unsigned int) AARCH64_R (JUMP26))
3646 /* Now determine the call target, its name, value,
3653 if (r_indx < symtab_hdr->sh_info)
3655 /* It's a local symbol. */
3656 Elf_Internal_Sym *sym;
3657 Elf_Internal_Shdr *hdr;
3659 if (local_syms == NULL)
3662 = (Elf_Internal_Sym *) symtab_hdr->contents;
3663 if (local_syms == NULL)
3665 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3666 symtab_hdr->sh_info, 0,
3668 if (local_syms == NULL)
3669 goto error_ret_free_internal;
3672 sym = local_syms + r_indx;
3673 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3674 sym_sec = hdr->bfd_section;
3676 /* This is an undefined symbol. It can never
3680 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3681 sym_value = sym->st_value;
3682 destination = (sym_value + irela->r_addend
3683 + sym_sec->output_offset
3684 + sym_sec->output_section->vma);
3685 st_type = ELF_ST_TYPE (sym->st_info);
3687 = bfd_elf_string_from_elf_section (input_bfd,
3688 symtab_hdr->sh_link,
3695 e_indx = r_indx - symtab_hdr->sh_info;
3696 hash = ((struct elf_aarch64_link_hash_entry *)
3697 elf_sym_hashes (input_bfd)[e_indx]);
3699 while (hash->root.root.type == bfd_link_hash_indirect
3700 || hash->root.root.type == bfd_link_hash_warning)
3701 hash = ((struct elf_aarch64_link_hash_entry *)
3702 hash->root.root.u.i.link);
3704 if (hash->root.root.type == bfd_link_hash_defined
3705 || hash->root.root.type == bfd_link_hash_defweak)
3707 struct elf_aarch64_link_hash_table *globals =
3708 elf_aarch64_hash_table (info);
3709 sym_sec = hash->root.root.u.def.section;
3710 sym_value = hash->root.root.u.def.value;
3711 /* For a destination in a shared library,
3712 use the PLT stub as target address to
3713 decide whether a branch stub is
3715 if (globals->root.splt != NULL && hash != NULL
3716 && hash->root.plt.offset != (bfd_vma) - 1)
3718 sym_sec = globals->root.splt;
3719 sym_value = hash->root.plt.offset;
3720 if (sym_sec->output_section != NULL)
3721 destination = (sym_value
3722 + sym_sec->output_offset
3724 sym_sec->output_section->vma);
3726 else if (sym_sec->output_section != NULL)
3727 destination = (sym_value + irela->r_addend
3728 + sym_sec->output_offset
3729 + sym_sec->output_section->vma);
3731 else if (hash->root.root.type == bfd_link_hash_undefined
3732 || (hash->root.root.type
3733 == bfd_link_hash_undefweak))
3735 /* For a shared library, use the PLT stub as
3736 target address to decide whether a long
3737 branch stub is needed.
3738 For absolute code, they cannot be handled. */
3739 struct elf_aarch64_link_hash_table *globals =
3740 elf_aarch64_hash_table (info);
3742 if (globals->root.splt != NULL && hash != NULL
3743 && hash->root.plt.offset != (bfd_vma) - 1)
3745 sym_sec = globals->root.splt;
3746 sym_value = hash->root.plt.offset;
3747 if (sym_sec->output_section != NULL)
3748 destination = (sym_value
3749 + sym_sec->output_offset
3751 sym_sec->output_section->vma);
3758 bfd_set_error (bfd_error_bad_value);
3759 goto error_ret_free_internal;
3761 st_type = ELF_ST_TYPE (hash->root.type);
3762 sym_name = hash->root.root.root.string;
3765 /* Determine what (if any) linker stub is needed. */
3766 stub_type = aarch64_type_of_stub
3767 (info, section, irela, st_type, hash, destination);
3768 if (stub_type == aarch64_stub_none)
3771 /* Support for grouping stub sections. */
3772 id_sec = htab->stub_group[section->id].link_sec;
3774 /* Get the name of this stub. */
3775 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3778 goto error_ret_free_internal;
3781 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3782 stub_name, FALSE, FALSE);
3783 if (stub_entry != NULL)
3785 /* The proper stub has already been created. */
3790 stub_entry = _bfd_aarch64_add_stub_entry_in_group
3791 (stub_name, section, htab);
3792 if (stub_entry == NULL)
3795 goto error_ret_free_internal;
3798 stub_entry->target_value = sym_value;
3799 stub_entry->target_section = sym_sec;
3800 stub_entry->stub_type = stub_type;
3801 stub_entry->h = hash;
3802 stub_entry->st_type = st_type;
3804 if (sym_name == NULL)
3805 sym_name = "unnamed";
3806 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3807 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3808 if (stub_entry->output_name == NULL)
3811 goto error_ret_free_internal;
3814 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3817 stub_changed = TRUE;
3820 /* We're done with the internal relocs, free them. */
3821 if (elf_section_data (section)->relocs == NULL)
3822 free (internal_relocs);
3829 _bfd_aarch64_resize_stubs (htab);
3831 /* Ask the linker to do its stuff. */
3832 (*htab->layout_sections_again) ();
3833 stub_changed = FALSE;
3838 error_ret_free_local:
3842 /* Build all the stubs associated with the current output file. The
3843 stubs are kept in a hash table attached to the main linker hash
3844 table. We also set up the .plt entries for statically linked PIC
3845 functions here. This function is called via aarch64_elf_finish in the
3849 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3852 struct bfd_hash_table *table;
3853 struct elf_aarch64_link_hash_table *htab;
3855 htab = elf_aarch64_hash_table (info);
3857 for (stub_sec = htab->stub_bfd->sections;
3858 stub_sec != NULL; stub_sec = stub_sec->next)
3862 /* Ignore non-stub sections. */
3863 if (!strstr (stub_sec->name, STUB_SUFFIX))
3866 /* Allocate memory to hold the linker stubs. */
3867 size = stub_sec->size;
3868 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3869 if (stub_sec->contents == NULL && size != 0)
3873 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
3874 stub_sec->size += 4;
3877 /* Build the stubs as directed by the stub hash table. */
3878 table = &htab->stub_hash_table;
3879 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3885 /* Add an entry to the code/data map for section SEC. */
3888 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3890 struct _aarch64_elf_section_data *sec_data =
3891 elf_aarch64_section_data (sec);
3892 unsigned int newidx;
3894 if (sec_data->map == NULL)
3896 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3897 sec_data->mapcount = 0;
3898 sec_data->mapsize = 1;
3901 newidx = sec_data->mapcount++;
3903 if (sec_data->mapcount > sec_data->mapsize)
3905 sec_data->mapsize *= 2;
3906 sec_data->map = bfd_realloc_or_free
3907 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3912 sec_data->map[newidx].vma = vma;
3913 sec_data->map[newidx].type = type;
3918 /* Initialise maps of insn/data for input BFDs. */
3920 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3922 Elf_Internal_Sym *isymbuf;
3923 Elf_Internal_Shdr *hdr;
3924 unsigned int i, localsyms;
3926 /* Make sure that we are dealing with an AArch64 elf binary. */
3927 if (!is_aarch64_elf (abfd))
3930 if ((abfd->flags & DYNAMIC) != 0)
3933 hdr = &elf_symtab_hdr (abfd);
3934 localsyms = hdr->sh_info;
3936 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3937 should contain the number of local symbols, which should come before any
3938 global symbols. Mapping symbols are always local. */
3939 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3941 /* No internal symbols read? Skip this BFD. */
3942 if (isymbuf == NULL)
3945 for (i = 0; i < localsyms; i++)
3947 Elf_Internal_Sym *isym = &isymbuf[i];
3948 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3951 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3953 name = bfd_elf_string_from_elf_section (abfd,
3957 if (bfd_is_aarch64_special_symbol_name
3958 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3959 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
3964 /* Set option values needed during linking. */
3966 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
3967 struct bfd_link_info *link_info,
3969 int no_wchar_warn, int pic_veneer,
3970 int fix_erratum_835769,
3971 int fix_erratum_843419)
3973 struct elf_aarch64_link_hash_table *globals;
3975 globals = elf_aarch64_hash_table (link_info);
3976 globals->pic_veneer = pic_veneer;
3977 globals->fix_erratum_835769 = fix_erratum_835769;
3978 globals->fix_erratum_843419 = fix_erratum_843419;
3979 globals->fix_erratum_843419_adr = TRUE;
3981 BFD_ASSERT (is_aarch64_elf (output_bfd));
3982 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3983 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3987 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3988 struct elf_aarch64_link_hash_table
3989 *globals, struct bfd_link_info *info,
3990 bfd_vma value, bfd *output_bfd,
3991 bfd_boolean *unresolved_reloc_p)
3993 bfd_vma off = (bfd_vma) - 1;
3994 asection *basegot = globals->root.sgot;
3995 bfd_boolean dyn = globals->root.dynamic_sections_created;
3999 BFD_ASSERT (basegot != NULL);
4000 off = h->got.offset;
4001 BFD_ASSERT (off != (bfd_vma) - 1);
4002 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
4004 && SYMBOL_REFERENCES_LOCAL (info, h))
4005 || (ELF_ST_VISIBILITY (h->other)
4006 && h->root.type == bfd_link_hash_undefweak))
4008 /* This is actually a static link, or it is a -Bsymbolic link
4009 and the symbol is defined locally. We must initialize this
4010 entry in the global offset table. Since the offset must
4011 always be a multiple of 8 (4 in the case of ILP32), we use
4012 the least significant bit to record whether we have
4013 initialized it already.
4014 When doing a dynamic link, we create a .rel(a).got relocation
4015 entry to initialize the value. This is done in the
4016 finish_dynamic_symbol routine. */
4021 bfd_put_NN (output_bfd, value, basegot->contents + off);
4026 *unresolved_reloc_p = FALSE;
4028 off = off + basegot->output_section->vma + basegot->output_offset;
4034 /* Change R_TYPE to a more efficient access model where possible,
4035 return the new reloc type. */
4037 static bfd_reloc_code_real_type
4038 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
4039 struct elf_link_hash_entry *h)
4041 bfd_boolean is_local = h == NULL;
4045 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4046 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4048 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4049 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
4051 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4053 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4056 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4058 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4059 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4061 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4062 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4064 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4065 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
4067 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4068 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
4070 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4071 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
4073 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4076 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4078 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
4079 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4081 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4082 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4083 /* Instructions with these relocations will become NOPs. */
4084 return BFD_RELOC_AARCH64_NONE;
4094 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
4098 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4099 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4100 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4101 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4102 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4103 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4106 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4107 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4108 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4109 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
4112 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4113 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4114 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4115 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4116 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4117 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4118 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4119 return GOT_TLSDESC_GD;
4121 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4122 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4123 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4124 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4127 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4128 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4129 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4130 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4131 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4132 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4133 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4134 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4144 aarch64_can_relax_tls (bfd *input_bfd,
4145 struct bfd_link_info *info,
4146 bfd_reloc_code_real_type r_type,
4147 struct elf_link_hash_entry *h,
4148 unsigned long r_symndx)
4150 unsigned int symbol_got_type;
4151 unsigned int reloc_got_type;
4153 if (! IS_AARCH64_TLS_RELOC (r_type))
4156 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
4157 reloc_got_type = aarch64_reloc_got_type (r_type);
4159 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
4165 if (h && h->root.type == bfd_link_hash_undefweak)
4171 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
4174 static bfd_reloc_code_real_type
4175 aarch64_tls_transition (bfd *input_bfd,
4176 struct bfd_link_info *info,
4177 unsigned int r_type,
4178 struct elf_link_hash_entry *h,
4179 unsigned long r_symndx)
4181 bfd_reloc_code_real_type bfd_r_type
4182 = elfNN_aarch64_bfd_reloc_from_type (r_type);
4184 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
4187 return aarch64_tls_transition_without_check (bfd_r_type, h);
4190 /* Return the base VMA address which should be subtracted from real addresses
4191 when resolving R_AARCH64_TLS_DTPREL relocation. */
4194 dtpoff_base (struct bfd_link_info *info)
4196 /* If tls_sec is NULL, we should have signalled an error already. */
4197 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
4198 return elf_hash_table (info)->tls_sec->vma;
4201 /* Return the base VMA address which should be subtracted from real addresses
4202 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
4205 tpoff_base (struct bfd_link_info *info)
4207 struct elf_link_hash_table *htab = elf_hash_table (info);
4209 /* If tls_sec is NULL, we should have signalled an error already. */
4210 BFD_ASSERT (htab->tls_sec != NULL);
4212 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
4213 htab->tls_sec->alignment_power);
4214 return htab->tls_sec->vma - base;
4218 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4219 unsigned long r_symndx)
4221 /* Calculate the address of the GOT entry for symbol
4222 referred to in h. */
4224 return &h->got.offset;
4228 struct elf_aarch64_local_symbol *l;
4230 l = elf_aarch64_locals (input_bfd);
4231 return &l[r_symndx].got_offset;
4236 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4237 unsigned long r_symndx)
4240 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
4245 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
4246 unsigned long r_symndx)
4249 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4254 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4255 unsigned long r_symndx)
4258 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4264 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4265 unsigned long r_symndx)
4267 /* Calculate the address of the GOT entry for symbol
4268 referred to in h. */
4271 struct elf_aarch64_link_hash_entry *eh;
4272 eh = (struct elf_aarch64_link_hash_entry *) h;
4273 return &eh->tlsdesc_got_jump_table_offset;
4278 struct elf_aarch64_local_symbol *l;
4280 l = elf_aarch64_locals (input_bfd);
4281 return &l[r_symndx].tlsdesc_got_jump_table_offset;
4286 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4287 unsigned long r_symndx)
4290 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4295 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
4296 struct elf_link_hash_entry *h,
4297 unsigned long r_symndx)
4300 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4305 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4306 unsigned long r_symndx)
4309 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4314 /* Data for make_branch_to_erratum_835769_stub(). */
4316 struct erratum_835769_branch_to_stub_data
4318 struct bfd_link_info *info;
4319 asection *output_section;
4323 /* Helper to insert branches to erratum 835769 stubs in the right
4324 places for a particular section. */
4327 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
4330 struct elf_aarch64_stub_hash_entry *stub_entry;
4331 struct erratum_835769_branch_to_stub_data *data;
4333 unsigned long branch_insn = 0;
4334 bfd_vma veneered_insn_loc, veneer_entry_loc;
4335 bfd_signed_vma branch_offset;
4336 unsigned int target;
4339 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4340 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
4342 if (stub_entry->target_section != data->output_section
4343 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
4346 contents = data->contents;
4347 veneered_insn_loc = stub_entry->target_section->output_section->vma
4348 + stub_entry->target_section->output_offset
4349 + stub_entry->target_value;
4350 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4351 + stub_entry->stub_sec->output_offset
4352 + stub_entry->stub_offset;
4353 branch_offset = veneer_entry_loc - veneered_insn_loc;
4355 abfd = stub_entry->target_section->owner;
4356 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4357 (*_bfd_error_handler)
4358 (_("%B: error: Erratum 835769 stub out "
4359 "of range (input file too large)"), abfd);
4361 target = stub_entry->target_value;
4362 branch_insn = 0x14000000;
4363 branch_offset >>= 2;
4364 branch_offset &= 0x3ffffff;
4365 branch_insn |= branch_offset;
4366 bfd_putl32 (branch_insn, &contents[target]);
4373 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
4376 struct elf_aarch64_stub_hash_entry *stub_entry
4377 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4378 struct erratum_835769_branch_to_stub_data *data
4379 = (struct erratum_835769_branch_to_stub_data *) in_arg;
4380 struct bfd_link_info *info;
4381 struct elf_aarch64_link_hash_table *htab;
4389 contents = data->contents;
4390 section = data->output_section;
4392 htab = elf_aarch64_hash_table (info);
4394 if (stub_entry->target_section != section
4395 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
4398 insn = bfd_getl32 (contents + stub_entry->target_value);
4400 stub_entry->stub_sec->contents + stub_entry->stub_offset);
4402 place = (section->output_section->vma + section->output_offset
4403 + stub_entry->adrp_offset);
4404 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
4406 if ((insn & AARCH64_ADRP_OP_MASK) != AARCH64_ADRP_OP)
4409 bfd_signed_vma imm =
4410 (_bfd_aarch64_sign_extend
4411 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
4414 if (htab->fix_erratum_843419_adr
4415 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
4417 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
4418 | AARCH64_RT (insn));
4419 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
4423 bfd_vma veneered_insn_loc;
4424 bfd_vma veneer_entry_loc;
4425 bfd_signed_vma branch_offset;
4426 uint32_t branch_insn;
4428 veneered_insn_loc = stub_entry->target_section->output_section->vma
4429 + stub_entry->target_section->output_offset
4430 + stub_entry->target_value;
4431 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4432 + stub_entry->stub_sec->output_offset
4433 + stub_entry->stub_offset;
4434 branch_offset = veneer_entry_loc - veneered_insn_loc;
4436 abfd = stub_entry->target_section->owner;
4437 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4438 (*_bfd_error_handler)
4439 (_("%B: error: Erratum 843419 stub out "
4440 "of range (input file too large)"), abfd);
4442 branch_insn = 0x14000000;
4443 branch_offset >>= 2;
4444 branch_offset &= 0x3ffffff;
4445 branch_insn |= branch_offset;
4446 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
4453 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
4454 struct bfd_link_info *link_info,
4459 struct elf_aarch64_link_hash_table *globals =
4460 elf_aarch64_hash_table (link_info);
4462 if (globals == NULL)
4465 /* Fix code to point to erratum 835769 stubs. */
4466 if (globals->fix_erratum_835769)
4468 struct erratum_835769_branch_to_stub_data data;
4470 data.info = link_info;
4471 data.output_section = sec;
4472 data.contents = contents;
4473 bfd_hash_traverse (&globals->stub_hash_table,
4474 make_branch_to_erratum_835769_stub, &data);
4477 if (globals->fix_erratum_843419)
4479 struct erratum_835769_branch_to_stub_data data;
4481 data.info = link_info;
4482 data.output_section = sec;
4483 data.contents = contents;
4484 bfd_hash_traverse (&globals->stub_hash_table,
4485 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
4491 /* Perform a relocation as part of a final link. */
4492 static bfd_reloc_status_type
4493 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4496 asection *input_section,
4498 Elf_Internal_Rela *rel,
4500 struct bfd_link_info *info,
4502 struct elf_link_hash_entry *h,
4503 bfd_boolean *unresolved_reloc_p,
4504 bfd_boolean save_addend,
4505 bfd_vma *saved_addend,
4506 Elf_Internal_Sym *sym)
4508 Elf_Internal_Shdr *symtab_hdr;
4509 unsigned int r_type = howto->type;
4510 bfd_reloc_code_real_type bfd_r_type
4511 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4512 bfd_reloc_code_real_type new_bfd_r_type;
4513 unsigned long r_symndx;
4514 bfd_byte *hit_data = contents + rel->r_offset;
4516 bfd_signed_vma signed_addend;
4517 struct elf_aarch64_link_hash_table *globals;
4518 bfd_boolean weak_undef_p;
4521 globals = elf_aarch64_hash_table (info);
4523 symtab_hdr = &elf_symtab_hdr (input_bfd);
4525 BFD_ASSERT (is_aarch64_elf (input_bfd));
4527 r_symndx = ELFNN_R_SYM (rel->r_info);
4529 /* It is possible to have linker relaxations on some TLS access
4530 models. Update our information here. */
4531 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4532 if (new_bfd_r_type != bfd_r_type)
4534 bfd_r_type = new_bfd_r_type;
4535 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4536 BFD_ASSERT (howto != NULL);
4537 r_type = howto->type;
4540 place = input_section->output_section->vma
4541 + input_section->output_offset + rel->r_offset;
4543 /* Get addend, accumulating the addend for consecutive relocs
4544 which refer to the same offset. */
4545 signed_addend = saved_addend ? *saved_addend : 0;
4546 signed_addend += rel->r_addend;
4548 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4549 : bfd_is_und_section (sym_sec));
4551 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4552 it here if it is defined in a non-shared object. */
4554 && h->type == STT_GNU_IFUNC
4561 if ((input_section->flags & SEC_ALLOC) == 0
4562 || h->plt.offset == (bfd_vma) -1)
4565 /* STT_GNU_IFUNC symbol must go through PLT. */
4566 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4567 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4572 if (h->root.root.string)
4573 name = h->root.root.string;
4575 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4577 (*_bfd_error_handler)
4578 (_("%B: relocation %s against STT_GNU_IFUNC "
4579 "symbol `%s' isn't handled by %s"), input_bfd,
4580 howto->name, name, __FUNCTION__);
4581 bfd_set_error (bfd_error_bad_value);
4584 case BFD_RELOC_AARCH64_NN:
4585 if (rel->r_addend != 0)
4587 if (h->root.root.string)
4588 name = h->root.root.string;
4590 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4592 (*_bfd_error_handler)
4593 (_("%B: relocation %s against STT_GNU_IFUNC "
4594 "symbol `%s' has non-zero addend: %d"),
4595 input_bfd, howto->name, name, rel->r_addend);
4596 bfd_set_error (bfd_error_bad_value);
4600 /* Generate dynamic relocation only when there is a
4601 non-GOT reference in a shared object. */
4602 if (info->shared && h->non_got_ref)
4604 Elf_Internal_Rela outrel;
4607 /* Need a dynamic relocation to get the real function
4609 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4613 if (outrel.r_offset == (bfd_vma) -1
4614 || outrel.r_offset == (bfd_vma) -2)
4617 outrel.r_offset += (input_section->output_section->vma
4618 + input_section->output_offset);
4620 if (h->dynindx == -1
4622 || info->executable)
4624 /* This symbol is resolved locally. */
4625 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4626 outrel.r_addend = (h->root.u.def.value
4627 + h->root.u.def.section->output_section->vma
4628 + h->root.u.def.section->output_offset);
4632 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4633 outrel.r_addend = 0;
4636 sreloc = globals->root.irelifunc;
4637 elf_append_rela (output_bfd, sreloc, &outrel);
4639 /* If this reloc is against an external symbol, we
4640 do not want to fiddle with the addend. Otherwise,
4641 we need to include the symbol value so that it
4642 becomes an addend for the dynamic reloc. For an
4643 internal symbol, we have updated addend. */
4644 return bfd_reloc_ok;
4647 case BFD_RELOC_AARCH64_CALL26:
4648 case BFD_RELOC_AARCH64_JUMP26:
4649 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4652 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4654 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4655 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4656 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4657 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4658 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4659 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4660 base_got = globals->root.sgot;
4661 off = h->got.offset;
4663 if (base_got == NULL)
4666 if (off == (bfd_vma) -1)
4670 /* We can't use h->got.offset here to save state, or
4671 even just remember the offset, as finish_dynamic_symbol
4672 would use that as offset into .got. */
4674 if (globals->root.splt != NULL)
4676 plt_index = ((h->plt.offset - globals->plt_header_size) /
4677 globals->plt_entry_size);
4678 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4679 base_got = globals->root.sgotplt;
4683 plt_index = h->plt.offset / globals->plt_entry_size;
4684 off = plt_index * GOT_ENTRY_SIZE;
4685 base_got = globals->root.igotplt;
4688 if (h->dynindx == -1
4692 /* This references the local definition. We must
4693 initialize this entry in the global offset table.
4694 Since the offset must always be a multiple of 8,
4695 we use the least significant bit to record
4696 whether we have initialized it already.
4698 When doing a dynamic link, we create a .rela.got
4699 relocation entry to initialize the value. This
4700 is done in the finish_dynamic_symbol routine. */
4705 bfd_put_NN (output_bfd, value,
4706 base_got->contents + off);
4707 /* Note that this is harmless as -1 | 1 still is -1. */
4711 value = (base_got->output_section->vma
4712 + base_got->output_offset + off);
4715 value = aarch64_calculate_got_entry_vma (h, globals, info,
4717 unresolved_reloc_p);
4718 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4719 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4720 addend = (globals->root.sgot->output_section->vma
4721 + globals->root.sgot->output_offset);
4722 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4723 addend, weak_undef_p);
4724 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4725 case BFD_RELOC_AARCH64_ADD_LO12:
4726 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4733 case BFD_RELOC_AARCH64_NONE:
4734 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4735 *unresolved_reloc_p = FALSE;
4736 return bfd_reloc_ok;
4738 case BFD_RELOC_AARCH64_NN:
4740 /* When generating a shared object or relocatable executable, these
4741 relocations are copied into the output file to be resolved at
4743 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4744 && (input_section->flags & SEC_ALLOC)
4746 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4747 || h->root.type != bfd_link_hash_undefweak))
4749 Elf_Internal_Rela outrel;
4751 bfd_boolean skip, relocate;
4754 *unresolved_reloc_p = FALSE;
4759 outrel.r_addend = signed_addend;
4761 _bfd_elf_section_offset (output_bfd, info, input_section,
4763 if (outrel.r_offset == (bfd_vma) - 1)
4765 else if (outrel.r_offset == (bfd_vma) - 2)
4771 outrel.r_offset += (input_section->output_section->vma
4772 + input_section->output_offset);
4775 memset (&outrel, 0, sizeof outrel);
4778 && (!info->shared || !SYMBOLIC_BIND (info, h) || !h->def_regular))
4779 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4784 /* On SVR4-ish systems, the dynamic loader cannot
4785 relocate the text and data segments independently,
4786 so the symbol does not matter. */
4788 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4789 outrel.r_addend += value;
4792 sreloc = elf_section_data (input_section)->sreloc;
4793 if (sreloc == NULL || sreloc->contents == NULL)
4794 return bfd_reloc_notsupported;
4796 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4797 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4799 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4801 /* Sanity to check that we have previously allocated
4802 sufficient space in the relocation section for the
4803 number of relocations we actually want to emit. */
4807 /* If this reloc is against an external symbol, we do not want to
4808 fiddle with the addend. Otherwise, we need to include the symbol
4809 value so that it becomes an addend for the dynamic reloc. */
4811 return bfd_reloc_ok;
4813 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4814 contents, rel->r_offset, value,
4818 value += signed_addend;
4821 case BFD_RELOC_AARCH64_CALL26:
4822 case BFD_RELOC_AARCH64_JUMP26:
4824 asection *splt = globals->root.splt;
4825 bfd_boolean via_plt_p =
4826 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4828 /* A call to an undefined weak symbol is converted to a jump to
4829 the next instruction unless a PLT entry will be created.
4830 The jump to the next instruction is optimized as a NOP.
4831 Do the same for local undefined symbols. */
4832 if (weak_undef_p && ! via_plt_p)
4834 bfd_putl32 (INSN_NOP, hit_data);
4835 return bfd_reloc_ok;
4838 /* If the call goes through a PLT entry, make sure to
4839 check distance to the right destination address. */
4842 value = (splt->output_section->vma
4843 + splt->output_offset + h->plt.offset);
4844 *unresolved_reloc_p = FALSE;
4847 /* If the target symbol is global and marked as a function the
4848 relocation applies a function call or a tail call. In this
4849 situation we can veneer out of range branches. The veneers
4850 use IP0 and IP1 hence cannot be used arbitrary out of range
4851 branches that occur within the body of a function. */
4852 if (h && h->type == STT_FUNC)
4854 /* Check if a stub has to be inserted because the destination
4856 if (! aarch64_valid_branch_p (value, place))
4858 /* The target is out of reach, so redirect the branch to
4859 the local stub for this function. */
4860 struct elf_aarch64_stub_hash_entry *stub_entry;
4861 stub_entry = elfNN_aarch64_get_stub_entry (input_section,
4864 if (stub_entry != NULL)
4865 value = (stub_entry->stub_offset
4866 + stub_entry->stub_sec->output_offset
4867 + stub_entry->stub_sec->output_section->vma);
4871 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4872 signed_addend, weak_undef_p);
4875 case BFD_RELOC_AARCH64_16_PCREL:
4876 case BFD_RELOC_AARCH64_32_PCREL:
4877 case BFD_RELOC_AARCH64_64_PCREL:
4878 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4879 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4880 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4881 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4883 && (input_section->flags & SEC_ALLOC) != 0
4884 && (input_section->flags & SEC_READONLY) != 0
4888 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4890 (*_bfd_error_handler)
4891 (_("%B: relocation %s against external symbol `%s' can not be used"
4892 " when making a shared object; recompile with -fPIC"),
4893 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
4894 h->root.root.string);
4895 bfd_set_error (bfd_error_bad_value);
4899 case BFD_RELOC_AARCH64_16:
4901 case BFD_RELOC_AARCH64_32:
4903 case BFD_RELOC_AARCH64_ADD_LO12:
4904 case BFD_RELOC_AARCH64_BRANCH19:
4905 case BFD_RELOC_AARCH64_LDST128_LO12:
4906 case BFD_RELOC_AARCH64_LDST16_LO12:
4907 case BFD_RELOC_AARCH64_LDST32_LO12:
4908 case BFD_RELOC_AARCH64_LDST64_LO12:
4909 case BFD_RELOC_AARCH64_LDST8_LO12:
4910 case BFD_RELOC_AARCH64_MOVW_G0:
4911 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4912 case BFD_RELOC_AARCH64_MOVW_G0_S:
4913 case BFD_RELOC_AARCH64_MOVW_G1:
4914 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4915 case BFD_RELOC_AARCH64_MOVW_G1_S:
4916 case BFD_RELOC_AARCH64_MOVW_G2:
4917 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4918 case BFD_RELOC_AARCH64_MOVW_G2_S:
4919 case BFD_RELOC_AARCH64_MOVW_G3:
4920 case BFD_RELOC_AARCH64_TSTBR14:
4921 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4922 signed_addend, weak_undef_p);
4925 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4926 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4927 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4928 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4929 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4930 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4931 if (globals->root.sgot == NULL)
4932 BFD_ASSERT (h != NULL);
4937 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4939 unresolved_reloc_p);
4940 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4941 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4942 addend = (globals->root.sgot->output_section->vma
4943 + globals->root.sgot->output_offset);
4944 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4945 addend, weak_undef_p);
4950 struct elf_aarch64_local_symbol *locals
4951 = elf_aarch64_locals (input_bfd);
4955 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4956 (*_bfd_error_handler)
4957 (_("%B: Local symbol descriptor table be NULL when applying "
4958 "relocation %s against local symbol"),
4959 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
4963 off = symbol_got_offset (input_bfd, h, r_symndx);
4964 base_got = globals->root.sgot;
4965 bfd_vma got_entry_addr = (base_got->output_section->vma
4966 + base_got->output_offset + off);
4968 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4970 bfd_put_64 (output_bfd, value, base_got->contents + off);
4975 Elf_Internal_Rela outrel;
4977 /* For local symbol, we have done absolute relocation in static
4978 linking stageh. While for share library, we need to update
4979 the content of GOT entry according to the share objects
4980 loading base address. So we need to generate a
4981 R_AARCH64_RELATIVE reloc for dynamic linker. */
4982 s = globals->root.srelgot;
4986 outrel.r_offset = got_entry_addr;
4987 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
4988 outrel.r_addend = value;
4989 elf_append_rela (output_bfd, s, &outrel);
4992 symbol_got_offset_mark (input_bfd, h, r_symndx);
4995 /* Update the relocation value to GOT entry addr as we have transformed
4996 the direct data access into indirect data access through GOT. */
4997 value = got_entry_addr;
4999 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5000 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
5001 addend = base_got->output_section->vma + base_got->output_offset;
5003 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5004 addend, weak_undef_p);
5009 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5010 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5011 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5012 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5013 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5014 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5015 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5016 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5017 if (globals->root.sgot == NULL)
5018 return bfd_reloc_notsupported;
5020 value = (symbol_got_offset (input_bfd, h, r_symndx)
5021 + globals->root.sgot->output_section->vma
5022 + globals->root.sgot->output_offset);
5024 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5026 *unresolved_reloc_p = FALSE;
5029 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5030 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5031 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5032 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5033 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5034 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5035 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5036 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5037 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5038 signed_addend - tpoff_base (info),
5040 *unresolved_reloc_p = FALSE;
5043 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5044 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5045 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5046 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5047 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5048 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5049 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5050 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5051 if (globals->root.sgot == NULL)
5052 return bfd_reloc_notsupported;
5053 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
5054 + globals->root.sgotplt->output_section->vma
5055 + globals->root.sgotplt->output_offset
5056 + globals->sgotplt_jump_table_size);
5058 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5060 *unresolved_reloc_p = FALSE;
5064 return bfd_reloc_notsupported;
5068 *saved_addend = value;
5070 /* Only apply the final relocation in a sequence. */
5072 return bfd_reloc_continue;
5074 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5078 /* Handle TLS relaxations. Relaxing is possible for symbols that use
5079 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
5082 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
5083 is to then call final_link_relocate. Return other values in the
5086 static bfd_reloc_status_type
5087 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
5088 bfd *input_bfd, bfd_byte *contents,
5089 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
5091 bfd_boolean is_local = h == NULL;
5092 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
5095 BFD_ASSERT (globals && input_bfd && contents && rel);
5097 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5099 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5100 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5103 /* GD->LE relaxation:
5104 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
5106 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
5108 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5109 return bfd_reloc_continue;
5113 /* GD->IE relaxation:
5114 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
5116 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
5118 return bfd_reloc_continue;
5121 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5125 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5128 /* Tiny TLSDESC->LE relaxation:
5129 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
5130 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
5134 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5135 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5137 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5138 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
5139 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5141 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5142 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
5143 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5144 return bfd_reloc_continue;
5148 /* Tiny TLSDESC->IE relaxation:
5149 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
5150 adr x0, :tlsdesc:var => nop
5154 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5155 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5157 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5158 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5160 bfd_putl32 (0x58000000, contents + rel->r_offset);
5161 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
5162 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5163 return bfd_reloc_continue;
5166 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5169 /* Tiny GD->LE relaxation:
5170 adr x0, :tlsgd:var => mrs x1, tpidr_el0
5171 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
5172 nop => add x0, x0, #:tprel_lo12_nc:x
5175 /* First kill the tls_get_addr reloc on the bl instruction. */
5176 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5178 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
5179 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
5180 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
5182 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5183 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
5184 rel[1].r_offset = rel->r_offset + 8;
5186 /* Move the current relocation to the second instruction in
5189 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5190 AARCH64_R (TLSLE_ADD_TPREL_HI12));
5191 return bfd_reloc_continue;
5195 /* Tiny GD->IE relaxation:
5196 adr x0, :tlsgd:var => ldr x0, :gottprel:var
5197 bl __tls_get_addr => mrs x1, tpidr_el0
5198 nop => add x0, x0, x1
5201 /* First kill the tls_get_addr reloc on the bl instruction. */
5202 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5203 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5205 bfd_putl32 (0x58000000, contents + rel->r_offset);
5206 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5207 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5208 return bfd_reloc_continue;
5211 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5212 return bfd_reloc_continue;
5214 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5217 /* GD->LE relaxation:
5218 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
5220 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5221 return bfd_reloc_continue;
5225 /* GD->IE relaxation:
5226 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
5228 insn = bfd_getl32 (contents + rel->r_offset);
5230 bfd_putl32 (insn, contents + rel->r_offset);
5231 return bfd_reloc_continue;
5234 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5237 /* GD->LE relaxation
5238 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
5239 bl __tls_get_addr => mrs x1, tpidr_el0
5240 nop => add x0, x1, x0
5243 /* First kill the tls_get_addr reloc on the bl instruction. */
5244 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5245 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5247 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5248 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5249 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5250 return bfd_reloc_continue;
5254 /* GD->IE relaxation
5255 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
5256 BL __tls_get_addr => mrs x1, tpidr_el0
5258 NOP => add x0, x1, x0
5261 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
5263 /* Remove the relocation on the BL instruction. */
5264 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5266 bfd_putl32 (0xf9400000, contents + rel->r_offset);
5268 /* We choose to fixup the BL and NOP instructions using the
5269 offset from the second relocation to allow flexibility in
5270 scheduling instructions between the ADD and BL. */
5271 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
5272 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
5273 return bfd_reloc_continue;
5276 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5277 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5278 /* GD->IE/LE relaxation:
5279 add x0, x0, #:tlsdesc_lo12:var => nop
5282 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
5283 return bfd_reloc_ok;
5285 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5286 /* IE->LE relaxation:
5287 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
5291 insn = bfd_getl32 (contents + rel->r_offset);
5292 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
5294 return bfd_reloc_continue;
5296 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5297 /* IE->LE relaxation:
5298 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
5302 insn = bfd_getl32 (contents + rel->r_offset);
5303 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
5305 return bfd_reloc_continue;
5308 return bfd_reloc_continue;
5311 return bfd_reloc_ok;
5314 /* Relocate an AArch64 ELF section. */
5317 elfNN_aarch64_relocate_section (bfd *output_bfd,
5318 struct bfd_link_info *info,
5320 asection *input_section,
5322 Elf_Internal_Rela *relocs,
5323 Elf_Internal_Sym *local_syms,
5324 asection **local_sections)
5326 Elf_Internal_Shdr *symtab_hdr;
5327 struct elf_link_hash_entry **sym_hashes;
5328 Elf_Internal_Rela *rel;
5329 Elf_Internal_Rela *relend;
5331 struct elf_aarch64_link_hash_table *globals;
5332 bfd_boolean save_addend = FALSE;
5335 globals = elf_aarch64_hash_table (info);
5337 symtab_hdr = &elf_symtab_hdr (input_bfd);
5338 sym_hashes = elf_sym_hashes (input_bfd);
5341 relend = relocs + input_section->reloc_count;
5342 for (; rel < relend; rel++)
5344 unsigned int r_type;
5345 bfd_reloc_code_real_type bfd_r_type;
5346 bfd_reloc_code_real_type relaxed_bfd_r_type;
5347 reloc_howto_type *howto;
5348 unsigned long r_symndx;
5349 Elf_Internal_Sym *sym;
5351 struct elf_link_hash_entry *h;
5353 bfd_reloc_status_type r;
5356 bfd_boolean unresolved_reloc = FALSE;
5357 char *error_message = NULL;
5359 r_symndx = ELFNN_R_SYM (rel->r_info);
5360 r_type = ELFNN_R_TYPE (rel->r_info);
5362 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
5363 howto = bfd_reloc.howto;
5367 (*_bfd_error_handler)
5368 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
5369 input_bfd, input_section, r_type);
5372 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
5378 if (r_symndx < symtab_hdr->sh_info)
5380 sym = local_syms + r_symndx;
5381 sym_type = ELFNN_ST_TYPE (sym->st_info);
5382 sec = local_sections[r_symndx];
5384 /* An object file might have a reference to a local
5385 undefined symbol. This is a daft object file, but we
5386 should at least do something about it. */
5387 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
5388 && bfd_is_und_section (sec)
5389 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
5391 if (!info->callbacks->undefined_symbol
5392 (info, bfd_elf_string_from_elf_section
5393 (input_bfd, symtab_hdr->sh_link, sym->st_name),
5394 input_bfd, input_section, rel->r_offset, TRUE))
5398 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
5400 /* Relocate against local STT_GNU_IFUNC symbol. */
5401 if (!info->relocatable
5402 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
5404 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
5409 /* Set STT_GNU_IFUNC symbol value. */
5410 h->root.u.def.value = sym->st_value;
5411 h->root.u.def.section = sec;
5416 bfd_boolean warned, ignored;
5418 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
5419 r_symndx, symtab_hdr, sym_hashes,
5421 unresolved_reloc, warned, ignored);
5426 if (sec != NULL && discarded_section (sec))
5427 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
5428 rel, 1, relend, howto, 0, contents);
5430 if (info->relocatable)
5434 name = h->root.root.string;
5437 name = (bfd_elf_string_from_elf_section
5438 (input_bfd, symtab_hdr->sh_link, sym->st_name));
5439 if (name == NULL || *name == '\0')
5440 name = bfd_section_name (input_bfd, sec);
5444 && r_type != R_AARCH64_NONE
5445 && r_type != R_AARCH64_NULL
5447 || h->root.type == bfd_link_hash_defined
5448 || h->root.type == bfd_link_hash_defweak)
5449 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
5451 (*_bfd_error_handler)
5452 ((sym_type == STT_TLS
5453 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
5454 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
5456 input_section, (long) rel->r_offset, howto->name, name);
5459 /* We relax only if we can see that there can be a valid transition
5460 from a reloc type to another.
5461 We call elfNN_aarch64_final_link_relocate unless we're completely
5462 done, i.e., the relaxation produced the final output we want. */
5464 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
5466 if (relaxed_bfd_r_type != bfd_r_type)
5468 bfd_r_type = relaxed_bfd_r_type;
5469 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
5470 BFD_ASSERT (howto != NULL);
5471 r_type = howto->type;
5472 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
5473 unresolved_reloc = 0;
5476 r = bfd_reloc_continue;
5478 /* There may be multiple consecutive relocations for the
5479 same offset. In that case we are supposed to treat the
5480 output of each relocation as the addend for the next. */
5481 if (rel + 1 < relend
5482 && rel->r_offset == rel[1].r_offset
5483 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
5484 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
5487 save_addend = FALSE;
5489 if (r == bfd_reloc_continue)
5490 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
5491 input_section, contents, rel,
5492 relocation, info, sec,
5493 h, &unresolved_reloc,
5494 save_addend, &addend, sym);
5496 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5498 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5499 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5500 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5501 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5502 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5504 bfd_boolean need_relocs = FALSE;
5509 off = symbol_got_offset (input_bfd, h, r_symndx);
5510 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5513 (info->shared || indx != 0) &&
5515 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5516 || h->root.type != bfd_link_hash_undefweak);
5518 BFD_ASSERT (globals->root.srelgot != NULL);
5522 Elf_Internal_Rela rela;
5523 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
5525 rela.r_offset = globals->root.sgot->output_section->vma +
5526 globals->root.sgot->output_offset + off;
5529 loc = globals->root.srelgot->contents;
5530 loc += globals->root.srelgot->reloc_count++
5531 * RELOC_SIZE (htab);
5532 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5534 if (elfNN_aarch64_bfd_reloc_from_type (r_type)
5535 == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21)
5537 /* For local dynamic, don't generate DTPREL in any case.
5538 Initialize the DTPREL slot into zero, so we get module
5539 base address when invoke runtime TLS resolver. */
5540 bfd_put_NN (output_bfd, 0,
5541 globals->root.sgot->contents + off
5546 bfd_put_NN (output_bfd,
5547 relocation - dtpoff_base (info),
5548 globals->root.sgot->contents + off
5553 /* This TLS symbol is global. We emit a
5554 relocation to fixup the tls offset at load
5557 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
5560 (globals->root.sgot->output_section->vma
5561 + globals->root.sgot->output_offset + off
5564 loc = globals->root.srelgot->contents;
5565 loc += globals->root.srelgot->reloc_count++
5566 * RELOC_SIZE (globals);
5567 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5568 bfd_put_NN (output_bfd, (bfd_vma) 0,
5569 globals->root.sgot->contents + off
5575 bfd_put_NN (output_bfd, (bfd_vma) 1,
5576 globals->root.sgot->contents + off);
5577 bfd_put_NN (output_bfd,
5578 relocation - dtpoff_base (info),
5579 globals->root.sgot->contents + off
5583 symbol_got_offset_mark (input_bfd, h, r_symndx);
5587 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5588 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5589 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5590 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5592 bfd_boolean need_relocs = FALSE;
5597 off = symbol_got_offset (input_bfd, h, r_symndx);
5599 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5602 (info->shared || indx != 0) &&
5604 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5605 || h->root.type != bfd_link_hash_undefweak);
5607 BFD_ASSERT (globals->root.srelgot != NULL);
5611 Elf_Internal_Rela rela;
5614 rela.r_addend = relocation - dtpoff_base (info);
5618 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5619 rela.r_offset = globals->root.sgot->output_section->vma +
5620 globals->root.sgot->output_offset + off;
5622 loc = globals->root.srelgot->contents;
5623 loc += globals->root.srelgot->reloc_count++
5624 * RELOC_SIZE (htab);
5626 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5628 bfd_put_NN (output_bfd, rela.r_addend,
5629 globals->root.sgot->contents + off);
5632 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5633 globals->root.sgot->contents + off);
5635 symbol_got_offset_mark (input_bfd, h, r_symndx);
5639 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5640 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5641 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5642 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5643 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5644 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5645 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5646 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5649 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5650 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5651 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5652 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5653 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5654 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5656 bfd_boolean need_relocs = FALSE;
5657 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5658 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5660 need_relocs = (h == NULL
5661 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5662 || h->root.type != bfd_link_hash_undefweak);
5664 BFD_ASSERT (globals->root.srelgot != NULL);
5665 BFD_ASSERT (globals->root.sgot != NULL);
5670 Elf_Internal_Rela rela;
5671 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5674 rela.r_offset = (globals->root.sgotplt->output_section->vma
5675 + globals->root.sgotplt->output_offset
5676 + off + globals->sgotplt_jump_table_size);
5679 rela.r_addend = relocation - dtpoff_base (info);
5681 /* Allocate the next available slot in the PLT reloc
5682 section to hold our R_AARCH64_TLSDESC, the next
5683 available slot is determined from reloc_count,
5684 which we step. But note, reloc_count was
5685 artifically moved down while allocating slots for
5686 real PLT relocs such that all of the PLT relocs
5687 will fit above the initial reloc_count and the
5688 extra stuff will fit below. */
5689 loc = globals->root.srelplt->contents;
5690 loc += globals->root.srelplt->reloc_count++
5691 * RELOC_SIZE (globals);
5693 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5695 bfd_put_NN (output_bfd, (bfd_vma) 0,
5696 globals->root.sgotplt->contents + off +
5697 globals->sgotplt_jump_table_size);
5698 bfd_put_NN (output_bfd, (bfd_vma) 0,
5699 globals->root.sgotplt->contents + off +
5700 globals->sgotplt_jump_table_size +
5704 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5715 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5716 because such sections are not SEC_ALLOC and thus ld.so will
5717 not process them. */
5718 if (unresolved_reloc
5719 && !((input_section->flags & SEC_DEBUGGING) != 0
5721 && _bfd_elf_section_offset (output_bfd, info, input_section,
5722 +rel->r_offset) != (bfd_vma) - 1)
5724 (*_bfd_error_handler)
5726 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5727 input_bfd, input_section, (long) rel->r_offset, howto->name,
5728 h->root.root.string);
5732 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5736 case bfd_reloc_overflow:
5737 if (!(*info->callbacks->reloc_overflow)
5738 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
5739 input_bfd, input_section, rel->r_offset))
5743 case bfd_reloc_undefined:
5744 if (!((*info->callbacks->undefined_symbol)
5745 (info, name, input_bfd, input_section,
5746 rel->r_offset, TRUE)))
5750 case bfd_reloc_outofrange:
5751 error_message = _("out of range");
5754 case bfd_reloc_notsupported:
5755 error_message = _("unsupported relocation");
5758 case bfd_reloc_dangerous:
5759 /* error_message should already be set. */
5763 error_message = _("unknown error");
5767 BFD_ASSERT (error_message != NULL);
5768 if (!((*info->callbacks->reloc_dangerous)
5769 (info, error_message, input_bfd, input_section,
5780 /* Set the right machine number. */
5783 elfNN_aarch64_object_p (bfd *abfd)
5786 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5788 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5793 /* Function to keep AArch64 specific flags in the ELF header. */
5796 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5798 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5803 elf_elfheader (abfd)->e_flags = flags;
5804 elf_flags_init (abfd) = TRUE;
5810 /* Merge backend specific data from an object file to the output
5811 object file when linking. */
5814 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5818 bfd_boolean flags_compatible = TRUE;
5821 /* Check if we have the same endianess. */
5822 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5825 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5828 /* The input BFD must have had its flags initialised. */
5829 /* The following seems bogus to me -- The flags are initialized in
5830 the assembler but I don't think an elf_flags_init field is
5831 written into the object. */
5832 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5834 in_flags = elf_elfheader (ibfd)->e_flags;
5835 out_flags = elf_elfheader (obfd)->e_flags;
5837 if (!elf_flags_init (obfd))
5839 /* If the input is the default architecture and had the default
5840 flags then do not bother setting the flags for the output
5841 architecture, instead allow future merges to do this. If no
5842 future merges ever set these flags then they will retain their
5843 uninitialised values, which surprise surprise, correspond
5844 to the default values. */
5845 if (bfd_get_arch_info (ibfd)->the_default
5846 && elf_elfheader (ibfd)->e_flags == 0)
5849 elf_flags_init (obfd) = TRUE;
5850 elf_elfheader (obfd)->e_flags = in_flags;
5852 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5853 && bfd_get_arch_info (obfd)->the_default)
5854 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5855 bfd_get_mach (ibfd));
5860 /* Identical flags must be compatible. */
5861 if (in_flags == out_flags)
5864 /* Check to see if the input BFD actually contains any sections. If
5865 not, its flags may not have been initialised either, but it
5866 cannot actually cause any incompatiblity. Do not short-circuit
5867 dynamic objects; their section list may be emptied by
5868 elf_link_add_object_symbols.
5870 Also check to see if there are no code sections in the input.
5871 In this case there is no need to check for code specific flags.
5872 XXX - do we need to worry about floating-point format compatability
5873 in data sections ? */
5874 if (!(ibfd->flags & DYNAMIC))
5876 bfd_boolean null_input_bfd = TRUE;
5877 bfd_boolean only_data_sections = TRUE;
5879 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5881 if ((bfd_get_section_flags (ibfd, sec)
5882 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5883 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5884 only_data_sections = FALSE;
5886 null_input_bfd = FALSE;
5890 if (null_input_bfd || only_data_sections)
5894 return flags_compatible;
5897 /* Display the flags field. */
5900 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5902 FILE *file = (FILE *) ptr;
5903 unsigned long flags;
5905 BFD_ASSERT (abfd != NULL && ptr != NULL);
5907 /* Print normal ELF private data. */
5908 _bfd_elf_print_private_bfd_data (abfd, ptr);
5910 flags = elf_elfheader (abfd)->e_flags;
5911 /* Ignore init flag - it may not be set, despite the flags field
5912 containing valid data. */
5914 /* xgettext:c-format */
5915 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5918 fprintf (file, _("<Unrecognised flag bits set>"));
5925 /* Update the got entry reference counts for the section being removed. */
5928 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
5929 struct bfd_link_info *info,
5931 const Elf_Internal_Rela * relocs)
5933 struct elf_aarch64_link_hash_table *htab;
5934 Elf_Internal_Shdr *symtab_hdr;
5935 struct elf_link_hash_entry **sym_hashes;
5936 struct elf_aarch64_local_symbol *locals;
5937 const Elf_Internal_Rela *rel, *relend;
5939 if (info->relocatable)
5942 htab = elf_aarch64_hash_table (info);
5947 elf_section_data (sec)->local_dynrel = NULL;
5949 symtab_hdr = &elf_symtab_hdr (abfd);
5950 sym_hashes = elf_sym_hashes (abfd);
5952 locals = elf_aarch64_locals (abfd);
5954 relend = relocs + sec->reloc_count;
5955 for (rel = relocs; rel < relend; rel++)
5957 unsigned long r_symndx;
5958 unsigned int r_type;
5959 struct elf_link_hash_entry *h = NULL;
5961 r_symndx = ELFNN_R_SYM (rel->r_info);
5963 if (r_symndx >= symtab_hdr->sh_info)
5966 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5967 while (h->root.type == bfd_link_hash_indirect
5968 || h->root.type == bfd_link_hash_warning)
5969 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5973 Elf_Internal_Sym *isym;
5975 /* A local symbol. */
5976 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5979 /* Check relocation against local STT_GNU_IFUNC symbol. */
5981 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5983 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
5991 struct elf_aarch64_link_hash_entry *eh;
5992 struct elf_dyn_relocs **pp;
5993 struct elf_dyn_relocs *p;
5995 eh = (struct elf_aarch64_link_hash_entry *) h;
5997 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
6000 /* Everything must go for SEC. */
6006 r_type = ELFNN_R_TYPE (rel->r_info);
6007 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
6009 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6010 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6011 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6012 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6013 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6014 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6015 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6016 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6017 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6018 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6019 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6020 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6021 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6022 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6023 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6024 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6025 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6026 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6027 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6028 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6029 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6030 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6031 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6032 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6033 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6034 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6035 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6036 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6039 if (h->got.refcount > 0)
6040 h->got.refcount -= 1;
6042 if (h->type == STT_GNU_IFUNC)
6044 if (h->plt.refcount > 0)
6045 h->plt.refcount -= 1;
6048 else if (locals != NULL)
6050 if (locals[r_symndx].got_refcount > 0)
6051 locals[r_symndx].got_refcount -= 1;
6055 case BFD_RELOC_AARCH64_CALL26:
6056 case BFD_RELOC_AARCH64_JUMP26:
6057 /* If this is a local symbol then we resolve it
6058 directly without creating a PLT entry. */
6062 if (h->plt.refcount > 0)
6063 h->plt.refcount -= 1;
6066 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6067 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6068 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6069 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6070 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6071 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6072 case BFD_RELOC_AARCH64_MOVW_G3:
6073 case BFD_RELOC_AARCH64_NN:
6074 if (h != NULL && info->executable)
6076 if (h->plt.refcount > 0)
6077 h->plt.refcount -= 1;
6089 /* Adjust a symbol defined by a dynamic object and referenced by a
6090 regular object. The current definition is in some section of the
6091 dynamic object, but we're not including those sections. We have to
6092 change the definition to something the rest of the link can
6096 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
6097 struct elf_link_hash_entry *h)
6099 struct elf_aarch64_link_hash_table *htab;
6102 /* If this is a function, put it in the procedure linkage table. We
6103 will fill in the contents of the procedure linkage table later,
6104 when we know the address of the .got section. */
6105 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
6107 if (h->plt.refcount <= 0
6108 || (h->type != STT_GNU_IFUNC
6109 && (SYMBOL_CALLS_LOCAL (info, h)
6110 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
6111 && h->root.type == bfd_link_hash_undefweak))))
6113 /* This case can occur if we saw a CALL26 reloc in
6114 an input file, but the symbol wasn't referred to
6115 by a dynamic object or all references were
6116 garbage collected. In which case we can end up
6118 h->plt.offset = (bfd_vma) - 1;
6125 /* Otherwise, reset to -1. */
6126 h->plt.offset = (bfd_vma) - 1;
6129 /* If this is a weak symbol, and there is a real definition, the
6130 processor independent code will have arranged for us to see the
6131 real definition first, and we can just use the same value. */
6132 if (h->u.weakdef != NULL)
6134 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
6135 || h->u.weakdef->root.type == bfd_link_hash_defweak);
6136 h->root.u.def.section = h->u.weakdef->root.u.def.section;
6137 h->root.u.def.value = h->u.weakdef->root.u.def.value;
6138 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
6139 h->non_got_ref = h->u.weakdef->non_got_ref;
6143 /* If we are creating a shared library, we must presume that the
6144 only references to the symbol are via the global offset table.
6145 For such cases we need not do anything here; the relocations will
6146 be handled correctly by relocate_section. */
6150 /* If there are no references to this symbol that do not use the
6151 GOT, we don't need to generate a copy reloc. */
6152 if (!h->non_got_ref)
6155 /* If -z nocopyreloc was given, we won't generate them either. */
6156 if (info->nocopyreloc)
6162 /* We must allocate the symbol in our .dynbss section, which will
6163 become part of the .bss section of the executable. There will be
6164 an entry for this symbol in the .dynsym section. The dynamic
6165 object will contain position independent code, so all references
6166 from the dynamic object to this symbol will go through the global
6167 offset table. The dynamic linker will use the .dynsym entry to
6168 determine the address it must put in the global offset table, so
6169 both the dynamic object and the regular object will refer to the
6170 same memory location for the variable. */
6172 htab = elf_aarch64_hash_table (info);
6174 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
6175 to copy the initial value out of the dynamic object and into the
6176 runtime process image. */
6177 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
6179 htab->srelbss->size += RELOC_SIZE (htab);
6185 return _bfd_elf_adjust_dynamic_copy (info, h, s);
6190 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
6192 struct elf_aarch64_local_symbol *locals;
6193 locals = elf_aarch64_locals (abfd);
6196 locals = (struct elf_aarch64_local_symbol *)
6197 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
6200 elf_aarch64_locals (abfd) = locals;
6205 /* Create the .got section to hold the global offset table. */
6208 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
6210 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6213 struct elf_link_hash_entry *h;
6214 struct elf_link_hash_table *htab = elf_hash_table (info);
6216 /* This function may be called more than once. */
6217 s = bfd_get_linker_section (abfd, ".got");
6221 flags = bed->dynamic_sec_flags;
6223 s = bfd_make_section_anyway_with_flags (abfd,
6224 (bed->rela_plts_and_copies_p
6225 ? ".rela.got" : ".rel.got"),
6226 (bed->dynamic_sec_flags
6229 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6233 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
6235 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6238 htab->sgot->size += GOT_ENTRY_SIZE;
6240 if (bed->want_got_sym)
6242 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
6243 (or .got.plt) section. We don't do this in the linker script
6244 because we don't want to define the symbol if we are not creating
6245 a global offset table. */
6246 h = _bfd_elf_define_linkage_sym (abfd, info, s,
6247 "_GLOBAL_OFFSET_TABLE_");
6248 elf_hash_table (info)->hgot = h;
6253 if (bed->want_got_plt)
6255 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
6257 || !bfd_set_section_alignment (abfd, s,
6258 bed->s->log_file_align))
6263 /* The first bit of the global offset table is the header. */
6264 s->size += bed->got_header_size;
6269 /* Look through the relocs for a section during the first phase. */
6272 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
6273 asection *sec, const Elf_Internal_Rela *relocs)
6275 Elf_Internal_Shdr *symtab_hdr;
6276 struct elf_link_hash_entry **sym_hashes;
6277 const Elf_Internal_Rela *rel;
6278 const Elf_Internal_Rela *rel_end;
6281 struct elf_aarch64_link_hash_table *htab;
6283 if (info->relocatable)
6286 BFD_ASSERT (is_aarch64_elf (abfd));
6288 htab = elf_aarch64_hash_table (info);
6291 symtab_hdr = &elf_symtab_hdr (abfd);
6292 sym_hashes = elf_sym_hashes (abfd);
6294 rel_end = relocs + sec->reloc_count;
6295 for (rel = relocs; rel < rel_end; rel++)
6297 struct elf_link_hash_entry *h;
6298 unsigned long r_symndx;
6299 unsigned int r_type;
6300 bfd_reloc_code_real_type bfd_r_type;
6301 Elf_Internal_Sym *isym;
6303 r_symndx = ELFNN_R_SYM (rel->r_info);
6304 r_type = ELFNN_R_TYPE (rel->r_info);
6306 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
6308 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
6313 if (r_symndx < symtab_hdr->sh_info)
6315 /* A local symbol. */
6316 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6321 /* Check relocation against local STT_GNU_IFUNC symbol. */
6322 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6324 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
6329 /* Fake a STT_GNU_IFUNC symbol. */
6330 h->type = STT_GNU_IFUNC;
6333 h->forced_local = 1;
6334 h->root.type = bfd_link_hash_defined;
6341 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6342 while (h->root.type == bfd_link_hash_indirect
6343 || h->root.type == bfd_link_hash_warning)
6344 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6346 /* PR15323, ref flags aren't set for references in the same
6348 h->root.non_ir_ref = 1;
6351 /* Could be done earlier, if h were already available. */
6352 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
6356 /* Create the ifunc sections for static executables. If we
6357 never see an indirect function symbol nor we are building
6358 a static executable, those sections will be empty and
6359 won't appear in output. */
6365 case BFD_RELOC_AARCH64_ADD_LO12:
6366 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6367 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6368 case BFD_RELOC_AARCH64_CALL26:
6369 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6370 case BFD_RELOC_AARCH64_JUMP26:
6371 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6372 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6373 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6374 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6375 case BFD_RELOC_AARCH64_NN:
6376 if (htab->root.dynobj == NULL)
6377 htab->root.dynobj = abfd;
6378 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
6383 /* It is referenced by a non-shared object. */
6385 h->root.non_ir_ref = 1;
6390 case BFD_RELOC_AARCH64_NN:
6392 /* We don't need to handle relocs into sections not going into
6393 the "real" output. */
6394 if ((sec->flags & SEC_ALLOC) == 0)
6402 h->plt.refcount += 1;
6403 h->pointer_equality_needed = 1;
6406 /* No need to do anything if we're not creating a shared
6412 struct elf_dyn_relocs *p;
6413 struct elf_dyn_relocs **head;
6415 /* We must copy these reloc types into the output file.
6416 Create a reloc section in dynobj and make room for
6420 if (htab->root.dynobj == NULL)
6421 htab->root.dynobj = abfd;
6423 sreloc = _bfd_elf_make_dynamic_reloc_section
6424 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
6430 /* If this is a global symbol, we count the number of
6431 relocations we need for this symbol. */
6434 struct elf_aarch64_link_hash_entry *eh;
6435 eh = (struct elf_aarch64_link_hash_entry *) h;
6436 head = &eh->dyn_relocs;
6440 /* Track dynamic relocs needed for local syms too.
6441 We really need local syms available to do this
6447 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6452 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
6456 /* Beware of type punned pointers vs strict aliasing
6458 vpp = &(elf_section_data (s)->local_dynrel);
6459 head = (struct elf_dyn_relocs **) vpp;
6463 if (p == NULL || p->sec != sec)
6465 bfd_size_type amt = sizeof *p;
6466 p = ((struct elf_dyn_relocs *)
6467 bfd_zalloc (htab->root.dynobj, amt));
6480 /* RR: We probably want to keep a consistency check that
6481 there are no dangling GOT_PAGE relocs. */
6482 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6483 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6484 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6485 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6486 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6487 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6488 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6489 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6490 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6491 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6492 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6493 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6494 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6495 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6496 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6497 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6498 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6499 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6500 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6501 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6502 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6503 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6504 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6505 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6506 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6507 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6508 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6509 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6512 unsigned old_got_type;
6514 got_type = aarch64_reloc_got_type (bfd_r_type);
6518 h->got.refcount += 1;
6519 old_got_type = elf_aarch64_hash_entry (h)->got_type;
6523 struct elf_aarch64_local_symbol *locals;
6525 if (!elfNN_aarch64_allocate_local_symbols
6526 (abfd, symtab_hdr->sh_info))
6529 locals = elf_aarch64_locals (abfd);
6530 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6531 locals[r_symndx].got_refcount += 1;
6532 old_got_type = locals[r_symndx].got_type;
6535 /* If a variable is accessed with both general dynamic TLS
6536 methods, two slots may be created. */
6537 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
6538 got_type |= old_got_type;
6540 /* We will already have issued an error message if there
6541 is a TLS/non-TLS mismatch, based on the symbol type.
6542 So just combine any TLS types needed. */
6543 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
6544 && got_type != GOT_NORMAL)
6545 got_type |= old_got_type;
6547 /* If the symbol is accessed by both IE and GD methods, we
6548 are able to relax. Turn off the GD flag, without
6549 messing up with any other kind of TLS types that may be
6551 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
6552 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
6554 if (old_got_type != got_type)
6557 elf_aarch64_hash_entry (h)->got_type = got_type;
6560 struct elf_aarch64_local_symbol *locals;
6561 locals = elf_aarch64_locals (abfd);
6562 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6563 locals[r_symndx].got_type = got_type;
6567 if (htab->root.dynobj == NULL)
6568 htab->root.dynobj = abfd;
6569 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
6574 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6575 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6576 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6577 case BFD_RELOC_AARCH64_MOVW_G3:
6580 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6581 (*_bfd_error_handler)
6582 (_("%B: relocation %s against `%s' can not be used when making "
6583 "a shared object; recompile with -fPIC"),
6584 abfd, elfNN_aarch64_howto_table[howto_index].name,
6585 (h) ? h->root.root.string : "a local symbol");
6586 bfd_set_error (bfd_error_bad_value);
6590 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6591 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6592 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6593 if (h != NULL && info->executable)
6595 /* If this reloc is in a read-only section, we might
6596 need a copy reloc. We can't check reliably at this
6597 stage whether the section is read-only, as input
6598 sections have not yet been mapped to output sections.
6599 Tentatively set the flag for now, and correct in
6600 adjust_dynamic_symbol. */
6602 h->plt.refcount += 1;
6603 h->pointer_equality_needed = 1;
6605 /* FIXME:: RR need to handle these in shared libraries
6606 and essentially bomb out as these being non-PIC
6607 relocations in shared libraries. */
6610 case BFD_RELOC_AARCH64_CALL26:
6611 case BFD_RELOC_AARCH64_JUMP26:
6612 /* If this is a local symbol then we resolve it
6613 directly without creating a PLT entry. */
6618 if (h->plt.refcount <= 0)
6619 h->plt.refcount = 1;
6621 h->plt.refcount += 1;
6632 /* Treat mapping symbols as special target symbols. */
6635 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6638 return bfd_is_aarch64_special_symbol_name (sym->name,
6639 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6642 /* This is a copy of elf_find_function () from elf.c except that
6643 AArch64 mapping symbols are ignored when looking for function names. */
6646 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6650 const char **filename_ptr,
6651 const char **functionname_ptr)
6653 const char *filename = NULL;
6654 asymbol *func = NULL;
6655 bfd_vma low_func = 0;
6658 for (p = symbols; *p != NULL; p++)
6662 q = (elf_symbol_type *) * p;
6664 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6669 filename = bfd_asymbol_name (&q->symbol);
6673 /* Skip mapping symbols. */
6674 if ((q->symbol.flags & BSF_LOCAL)
6675 && (bfd_is_aarch64_special_symbol_name
6676 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6679 if (bfd_get_section (&q->symbol) == section
6680 && q->symbol.value >= low_func && q->symbol.value <= offset)
6682 func = (asymbol *) q;
6683 low_func = q->symbol.value;
6693 *filename_ptr = filename;
6694 if (functionname_ptr)
6695 *functionname_ptr = bfd_asymbol_name (func);
6701 /* Find the nearest line to a particular section and offset, for error
6702 reporting. This code is a duplicate of the code in elf.c, except
6703 that it uses aarch64_elf_find_function. */
6706 elfNN_aarch64_find_nearest_line (bfd *abfd,
6710 const char **filename_ptr,
6711 const char **functionname_ptr,
6712 unsigned int *line_ptr,
6713 unsigned int *discriminator_ptr)
6715 bfd_boolean found = FALSE;
6717 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6718 filename_ptr, functionname_ptr,
6719 line_ptr, discriminator_ptr,
6720 dwarf_debug_sections, 0,
6721 &elf_tdata (abfd)->dwarf2_find_line_info))
6723 if (!*functionname_ptr)
6724 aarch64_elf_find_function (abfd, symbols, section, offset,
6725 *filename_ptr ? NULL : filename_ptr,
6731 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6732 toolchain uses DWARF1. */
6734 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6735 &found, filename_ptr,
6736 functionname_ptr, line_ptr,
6737 &elf_tdata (abfd)->line_info))
6740 if (found && (*functionname_ptr || *line_ptr))
6743 if (symbols == NULL)
6746 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6747 filename_ptr, functionname_ptr))
6755 elfNN_aarch64_find_inliner_info (bfd *abfd,
6756 const char **filename_ptr,
6757 const char **functionname_ptr,
6758 unsigned int *line_ptr)
6761 found = _bfd_dwarf2_find_inliner_info
6762 (abfd, filename_ptr,
6763 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6769 elfNN_aarch64_post_process_headers (bfd *abfd,
6770 struct bfd_link_info *link_info)
6772 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6774 i_ehdrp = elf_elfheader (abfd);
6775 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6777 _bfd_elf_post_process_headers (abfd, link_info);
6780 static enum elf_reloc_type_class
6781 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6782 const asection *rel_sec ATTRIBUTE_UNUSED,
6783 const Elf_Internal_Rela *rela)
6785 switch ((int) ELFNN_R_TYPE (rela->r_info))
6787 case AARCH64_R (RELATIVE):
6788 return reloc_class_relative;
6789 case AARCH64_R (JUMP_SLOT):
6790 return reloc_class_plt;
6791 case AARCH64_R (COPY):
6792 return reloc_class_copy;
6794 return reloc_class_normal;
6798 /* Handle an AArch64 specific section when reading an object file. This is
6799 called when bfd_section_from_shdr finds a section with an unknown
6803 elfNN_aarch64_section_from_shdr (bfd *abfd,
6804 Elf_Internal_Shdr *hdr,
6805 const char *name, int shindex)
6807 /* There ought to be a place to keep ELF backend specific flags, but
6808 at the moment there isn't one. We just keep track of the
6809 sections by their name, instead. Fortunately, the ABI gives
6810 names for all the AArch64 specific sections, so we will probably get
6812 switch (hdr->sh_type)
6814 case SHT_AARCH64_ATTRIBUTES:
6821 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6827 /* A structure used to record a list of sections, independently
6828 of the next and prev fields in the asection structure. */
6829 typedef struct section_list
6832 struct section_list *next;
6833 struct section_list *prev;
6837 /* Unfortunately we need to keep a list of sections for which
6838 an _aarch64_elf_section_data structure has been allocated. This
6839 is because it is possible for functions like elfNN_aarch64_write_section
6840 to be called on a section which has had an elf_data_structure
6841 allocated for it (and so the used_by_bfd field is valid) but
6842 for which the AArch64 extended version of this structure - the
6843 _aarch64_elf_section_data structure - has not been allocated. */
6844 static section_list *sections_with_aarch64_elf_section_data = NULL;
6847 record_section_with_aarch64_elf_section_data (asection *sec)
6849 struct section_list *entry;
6851 entry = bfd_malloc (sizeof (*entry));
6855 entry->next = sections_with_aarch64_elf_section_data;
6857 if (entry->next != NULL)
6858 entry->next->prev = entry;
6859 sections_with_aarch64_elf_section_data = entry;
6862 static struct section_list *
6863 find_aarch64_elf_section_entry (asection *sec)
6865 struct section_list *entry;
6866 static struct section_list *last_entry = NULL;
6868 /* This is a short cut for the typical case where the sections are added
6869 to the sections_with_aarch64_elf_section_data list in forward order and
6870 then looked up here in backwards order. This makes a real difference
6871 to the ld-srec/sec64k.exp linker test. */
6872 entry = sections_with_aarch64_elf_section_data;
6873 if (last_entry != NULL)
6875 if (last_entry->sec == sec)
6877 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6878 entry = last_entry->next;
6881 for (; entry; entry = entry->next)
6882 if (entry->sec == sec)
6886 /* Record the entry prior to this one - it is the entry we are
6887 most likely to want to locate next time. Also this way if we
6888 have been called from
6889 unrecord_section_with_aarch64_elf_section_data () we will not
6890 be caching a pointer that is about to be freed. */
6891 last_entry = entry->prev;
6897 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6899 struct section_list *entry;
6901 entry = find_aarch64_elf_section_entry (sec);
6905 if (entry->prev != NULL)
6906 entry->prev->next = entry->next;
6907 if (entry->next != NULL)
6908 entry->next->prev = entry->prev;
6909 if (entry == sections_with_aarch64_elf_section_data)
6910 sections_with_aarch64_elf_section_data = entry->next;
6919 struct bfd_link_info *info;
6922 int (*func) (void *, const char *, Elf_Internal_Sym *,
6923 asection *, struct elf_link_hash_entry *);
6924 } output_arch_syminfo;
6926 enum map_symbol_type
6933 /* Output a single mapping symbol. */
6936 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
6937 enum map_symbol_type type, bfd_vma offset)
6939 static const char *names[2] = { "$x", "$d" };
6940 Elf_Internal_Sym sym;
6942 sym.st_value = (osi->sec->output_section->vma
6943 + osi->sec->output_offset + offset);
6946 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6947 sym.st_shndx = osi->sec_shndx;
6948 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
6953 /* Output mapping symbols for PLT entries associated with H. */
6956 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
6958 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
6961 if (h->root.type == bfd_link_hash_indirect)
6964 if (h->root.type == bfd_link_hash_warning)
6965 /* When warning symbols are created, they **replace** the "real"
6966 entry in the hash table, thus we never get to see the real
6967 symbol in a hash traversal. So look at it now. */
6968 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6970 if (h->plt.offset == (bfd_vma) - 1)
6973 addr = h->plt.offset;
6976 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6983 /* Output a single local symbol for a generated stub. */
6986 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
6987 bfd_vma offset, bfd_vma size)
6989 Elf_Internal_Sym sym;
6991 sym.st_value = (osi->sec->output_section->vma
6992 + osi->sec->output_offset + offset);
6995 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6996 sym.st_shndx = osi->sec_shndx;
6997 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
7001 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
7003 struct elf_aarch64_stub_hash_entry *stub_entry;
7007 output_arch_syminfo *osi;
7009 /* Massage our args to the form they really have. */
7010 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
7011 osi = (output_arch_syminfo *) in_arg;
7013 stub_sec = stub_entry->stub_sec;
7015 /* Ensure this stub is attached to the current section being
7017 if (stub_sec != osi->sec)
7020 addr = (bfd_vma) stub_entry->stub_offset;
7022 stub_name = stub_entry->output_name;
7024 switch (stub_entry->stub_type)
7026 case aarch64_stub_adrp_branch:
7027 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7028 sizeof (aarch64_adrp_branch_stub)))
7030 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7033 case aarch64_stub_long_branch:
7034 if (!elfNN_aarch64_output_stub_sym
7035 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
7037 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7039 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
7042 case aarch64_stub_erratum_835769_veneer:
7043 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7044 sizeof (aarch64_erratum_835769_stub)))
7046 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7049 case aarch64_stub_erratum_843419_veneer:
7050 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7051 sizeof (aarch64_erratum_843419_stub)))
7053 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7064 /* Output mapping symbols for linker generated sections. */
7067 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
7068 struct bfd_link_info *info,
7070 int (*func) (void *, const char *,
7073 struct elf_link_hash_entry
7076 output_arch_syminfo osi;
7077 struct elf_aarch64_link_hash_table *htab;
7079 htab = elf_aarch64_hash_table (info);
7085 /* Long calls stubs. */
7086 if (htab->stub_bfd && htab->stub_bfd->sections)
7090 for (stub_sec = htab->stub_bfd->sections;
7091 stub_sec != NULL; stub_sec = stub_sec->next)
7093 /* Ignore non-stub sections. */
7094 if (!strstr (stub_sec->name, STUB_SUFFIX))
7099 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7100 (output_bfd, osi.sec->output_section);
7102 /* The first instruction in a stub is always a branch. */
7103 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
7106 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
7111 /* Finally, output mapping symbols for the PLT. */
7112 if (!htab->root.splt || htab->root.splt->size == 0)
7115 /* For now live without mapping symbols for the plt. */
7116 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7117 (output_bfd, htab->root.splt->output_section);
7118 osi.sec = htab->root.splt;
7120 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
7127 /* Allocate target specific section data. */
7130 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
7132 if (!sec->used_by_bfd)
7134 _aarch64_elf_section_data *sdata;
7135 bfd_size_type amt = sizeof (*sdata);
7137 sdata = bfd_zalloc (abfd, amt);
7140 sec->used_by_bfd = sdata;
7143 record_section_with_aarch64_elf_section_data (sec);
7145 return _bfd_elf_new_section_hook (abfd, sec);
7150 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
7152 void *ignore ATTRIBUTE_UNUSED)
7154 unrecord_section_with_aarch64_elf_section_data (sec);
7158 elfNN_aarch64_close_and_cleanup (bfd *abfd)
7161 bfd_map_over_sections (abfd,
7162 unrecord_section_via_map_over_sections, NULL);
7164 return _bfd_elf_close_and_cleanup (abfd);
7168 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
7171 bfd_map_over_sections (abfd,
7172 unrecord_section_via_map_over_sections, NULL);
7174 return _bfd_free_cached_info (abfd);
7177 /* Create dynamic sections. This is different from the ARM backend in that
7178 the got, plt, gotplt and their relocation sections are all created in the
7179 standard part of the bfd elf backend. */
7182 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
7183 struct bfd_link_info *info)
7185 struct elf_aarch64_link_hash_table *htab;
7187 /* We need to create .got section. */
7188 if (!aarch64_elf_create_got_section (dynobj, info))
7191 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
7194 htab = elf_aarch64_hash_table (info);
7195 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
7197 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
7199 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
7206 /* Allocate space in .plt, .got and associated reloc sections for
7210 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
7212 struct bfd_link_info *info;
7213 struct elf_aarch64_link_hash_table *htab;
7214 struct elf_aarch64_link_hash_entry *eh;
7215 struct elf_dyn_relocs *p;
7217 /* An example of a bfd_link_hash_indirect symbol is versioned
7218 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7219 -> __gxx_personality_v0(bfd_link_hash_defined)
7221 There is no need to process bfd_link_hash_indirect symbols here
7222 because we will also be presented with the concrete instance of
7223 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7224 called to copy all relevant data from the generic to the concrete
7227 if (h->root.type == bfd_link_hash_indirect)
7230 if (h->root.type == bfd_link_hash_warning)
7231 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7233 info = (struct bfd_link_info *) inf;
7234 htab = elf_aarch64_hash_table (info);
7236 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7237 here if it is defined and referenced in a non-shared object. */
7238 if (h->type == STT_GNU_IFUNC
7241 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
7243 /* Make sure this symbol is output as a dynamic symbol.
7244 Undefined weak syms won't yet be marked as dynamic. */
7245 if (h->dynindx == -1 && !h->forced_local)
7247 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7251 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
7253 asection *s = htab->root.splt;
7255 /* If this is the first .plt entry, make room for the special
7258 s->size += htab->plt_header_size;
7260 h->plt.offset = s->size;
7262 /* If this symbol is not defined in a regular file, and we are
7263 not generating a shared library, then set the symbol to this
7264 location in the .plt. This is required to make function
7265 pointers compare as equal between the normal executable and
7266 the shared library. */
7267 if (!info->shared && !h->def_regular)
7269 h->root.u.def.section = s;
7270 h->root.u.def.value = h->plt.offset;
7273 /* Make room for this entry. For now we only create the
7274 small model PLT entries. We later need to find a way
7275 of relaxing into these from the large model PLT entries. */
7276 s->size += PLT_SMALL_ENTRY_SIZE;
7278 /* We also need to make an entry in the .got.plt section, which
7279 will be placed in the .got section by the linker script. */
7280 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
7282 /* We also need to make an entry in the .rela.plt section. */
7283 htab->root.srelplt->size += RELOC_SIZE (htab);
7285 /* We need to ensure that all GOT entries that serve the PLT
7286 are consecutive with the special GOT slots [0] [1] and
7287 [2]. Any addtional relocations, such as
7288 R_AARCH64_TLSDESC, must be placed after the PLT related
7289 entries. We abuse the reloc_count such that during
7290 sizing we adjust reloc_count to indicate the number of
7291 PLT related reserved entries. In subsequent phases when
7292 filling in the contents of the reloc entries, PLT related
7293 entries are placed by computing their PLT index (0
7294 .. reloc_count). While other none PLT relocs are placed
7295 at the slot indicated by reloc_count and reloc_count is
7298 htab->root.srelplt->reloc_count++;
7302 h->plt.offset = (bfd_vma) - 1;
7308 h->plt.offset = (bfd_vma) - 1;
7312 eh = (struct elf_aarch64_link_hash_entry *) h;
7313 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7315 if (h->got.refcount > 0)
7318 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
7320 h->got.offset = (bfd_vma) - 1;
7322 dyn = htab->root.dynamic_sections_created;
7324 /* Make sure this symbol is output as a dynamic symbol.
7325 Undefined weak syms won't yet be marked as dynamic. */
7326 if (dyn && h->dynindx == -1 && !h->forced_local)
7328 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7332 if (got_type == GOT_UNKNOWN)
7335 else if (got_type == GOT_NORMAL)
7337 h->got.offset = htab->root.sgot->size;
7338 htab->root.sgot->size += GOT_ENTRY_SIZE;
7339 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7340 || h->root.type != bfd_link_hash_undefweak)
7342 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7344 htab->root.srelgot->size += RELOC_SIZE (htab);
7350 if (got_type & GOT_TLSDESC_GD)
7352 eh->tlsdesc_got_jump_table_offset =
7353 (htab->root.sgotplt->size
7354 - aarch64_compute_jump_table_size (htab));
7355 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7356 h->got.offset = (bfd_vma) - 2;
7359 if (got_type & GOT_TLS_GD)
7361 h->got.offset = htab->root.sgot->size;
7362 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7365 if (got_type & GOT_TLS_IE)
7367 h->got.offset = htab->root.sgot->size;
7368 htab->root.sgot->size += GOT_ENTRY_SIZE;
7371 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7372 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7373 || h->root.type != bfd_link_hash_undefweak)
7376 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7378 if (got_type & GOT_TLSDESC_GD)
7380 htab->root.srelplt->size += RELOC_SIZE (htab);
7381 /* Note reloc_count not incremented here! We have
7382 already adjusted reloc_count for this relocation
7385 /* TLSDESC PLT is now needed, but not yet determined. */
7386 htab->tlsdesc_plt = (bfd_vma) - 1;
7389 if (got_type & GOT_TLS_GD)
7390 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7392 if (got_type & GOT_TLS_IE)
7393 htab->root.srelgot->size += RELOC_SIZE (htab);
7399 h->got.offset = (bfd_vma) - 1;
7402 if (eh->dyn_relocs == NULL)
7405 /* In the shared -Bsymbolic case, discard space allocated for
7406 dynamic pc-relative relocs against symbols which turn out to be
7407 defined in regular objects. For the normal shared case, discard
7408 space for pc-relative relocs that have become local due to symbol
7409 visibility changes. */
7413 /* Relocs that use pc_count are those that appear on a call
7414 insn, or certain REL relocs that can generated via assembly.
7415 We want calls to protected symbols to resolve directly to the
7416 function rather than going via the plt. If people want
7417 function pointer comparisons to work as expected then they
7418 should avoid writing weird assembly. */
7419 if (SYMBOL_CALLS_LOCAL (info, h))
7421 struct elf_dyn_relocs **pp;
7423 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
7425 p->count -= p->pc_count;
7434 /* Also discard relocs on undefined weak syms with non-default
7436 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
7438 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7439 eh->dyn_relocs = NULL;
7441 /* Make sure undefined weak symbols are output as a dynamic
7443 else if (h->dynindx == -1
7445 && !bfd_elf_link_record_dynamic_symbol (info, h))
7450 else if (ELIMINATE_COPY_RELOCS)
7452 /* For the non-shared case, discard space for relocs against
7453 symbols which turn out to need copy relocs or are not
7459 || (htab->root.dynamic_sections_created
7460 && (h->root.type == bfd_link_hash_undefweak
7461 || h->root.type == bfd_link_hash_undefined))))
7463 /* Make sure this symbol is output as a dynamic symbol.
7464 Undefined weak syms won't yet be marked as dynamic. */
7465 if (h->dynindx == -1
7467 && !bfd_elf_link_record_dynamic_symbol (info, h))
7470 /* If that succeeded, we know we'll be keeping all the
7472 if (h->dynindx != -1)
7476 eh->dyn_relocs = NULL;
7481 /* Finally, allocate space. */
7482 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7486 sreloc = elf_section_data (p->sec)->sreloc;
7488 BFD_ASSERT (sreloc != NULL);
7490 sreloc->size += p->count * RELOC_SIZE (htab);
7496 /* Allocate space in .plt, .got and associated reloc sections for
7497 ifunc dynamic relocs. */
7500 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
7503 struct bfd_link_info *info;
7504 struct elf_aarch64_link_hash_table *htab;
7505 struct elf_aarch64_link_hash_entry *eh;
7507 /* An example of a bfd_link_hash_indirect symbol is versioned
7508 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7509 -> __gxx_personality_v0(bfd_link_hash_defined)
7511 There is no need to process bfd_link_hash_indirect symbols here
7512 because we will also be presented with the concrete instance of
7513 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7514 called to copy all relevant data from the generic to the concrete
7517 if (h->root.type == bfd_link_hash_indirect)
7520 if (h->root.type == bfd_link_hash_warning)
7521 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7523 info = (struct bfd_link_info *) inf;
7524 htab = elf_aarch64_hash_table (info);
7526 eh = (struct elf_aarch64_link_hash_entry *) h;
7528 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7529 here if it is defined and referenced in a non-shared object. */
7530 if (h->type == STT_GNU_IFUNC
7532 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
7534 htab->plt_entry_size,
7535 htab->plt_header_size,
7540 /* Allocate space in .plt, .got and associated reloc sections for
7541 local dynamic relocs. */
7544 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
7546 struct elf_link_hash_entry *h
7547 = (struct elf_link_hash_entry *) *slot;
7549 if (h->type != STT_GNU_IFUNC
7553 || h->root.type != bfd_link_hash_defined)
7556 return elfNN_aarch64_allocate_dynrelocs (h, inf);
7559 /* Allocate space in .plt, .got and associated reloc sections for
7560 local ifunc dynamic relocs. */
7563 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
7565 struct elf_link_hash_entry *h
7566 = (struct elf_link_hash_entry *) *slot;
7568 if (h->type != STT_GNU_IFUNC
7572 || h->root.type != bfd_link_hash_defined)
7575 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
7578 /* Find any dynamic relocs that apply to read-only sections. */
7581 aarch64_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
7583 struct elf_aarch64_link_hash_entry * eh;
7584 struct elf_dyn_relocs * p;
7586 eh = (struct elf_aarch64_link_hash_entry *) h;
7587 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7589 asection *s = p->sec;
7591 if (s != NULL && (s->flags & SEC_READONLY) != 0)
7593 struct bfd_link_info *info = (struct bfd_link_info *) inf;
7595 info->flags |= DF_TEXTREL;
7597 /* Not an error, just cut short the traversal. */
7604 /* This is the most important function of all . Innocuosly named
7607 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
7608 struct bfd_link_info *info)
7610 struct elf_aarch64_link_hash_table *htab;
7616 htab = elf_aarch64_hash_table ((info));
7617 dynobj = htab->root.dynobj;
7619 BFD_ASSERT (dynobj != NULL);
7621 if (htab->root.dynamic_sections_created)
7623 if (info->executable)
7625 s = bfd_get_linker_section (dynobj, ".interp");
7628 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7629 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7633 /* Set up .got offsets for local syms, and space for local dynamic
7635 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7637 struct elf_aarch64_local_symbol *locals = NULL;
7638 Elf_Internal_Shdr *symtab_hdr;
7642 if (!is_aarch64_elf (ibfd))
7645 for (s = ibfd->sections; s != NULL; s = s->next)
7647 struct elf_dyn_relocs *p;
7649 for (p = (struct elf_dyn_relocs *)
7650 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7652 if (!bfd_is_abs_section (p->sec)
7653 && bfd_is_abs_section (p->sec->output_section))
7655 /* Input section has been discarded, either because
7656 it is a copy of a linkonce section or due to
7657 linker script /DISCARD/, so we'll be discarding
7660 else if (p->count != 0)
7662 srel = elf_section_data (p->sec)->sreloc;
7663 srel->size += p->count * RELOC_SIZE (htab);
7664 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7665 info->flags |= DF_TEXTREL;
7670 locals = elf_aarch64_locals (ibfd);
7674 symtab_hdr = &elf_symtab_hdr (ibfd);
7675 srel = htab->root.srelgot;
7676 for (i = 0; i < symtab_hdr->sh_info; i++)
7678 locals[i].got_offset = (bfd_vma) - 1;
7679 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7680 if (locals[i].got_refcount > 0)
7682 unsigned got_type = locals[i].got_type;
7683 if (got_type & GOT_TLSDESC_GD)
7685 locals[i].tlsdesc_got_jump_table_offset =
7686 (htab->root.sgotplt->size
7687 - aarch64_compute_jump_table_size (htab));
7688 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7689 locals[i].got_offset = (bfd_vma) - 2;
7692 if (got_type & GOT_TLS_GD)
7694 locals[i].got_offset = htab->root.sgot->size;
7695 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7698 if (got_type & GOT_TLS_IE
7699 || got_type & GOT_NORMAL)
7701 locals[i].got_offset = htab->root.sgot->size;
7702 htab->root.sgot->size += GOT_ENTRY_SIZE;
7705 if (got_type == GOT_UNKNOWN)
7711 if (got_type & GOT_TLSDESC_GD)
7713 htab->root.srelplt->size += RELOC_SIZE (htab);
7714 /* Note RELOC_COUNT not incremented here! */
7715 htab->tlsdesc_plt = (bfd_vma) - 1;
7718 if (got_type & GOT_TLS_GD)
7719 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7721 if (got_type & GOT_TLS_IE
7722 || got_type & GOT_NORMAL)
7723 htab->root.srelgot->size += RELOC_SIZE (htab);
7728 locals[i].got_refcount = (bfd_vma) - 1;
7734 /* Allocate global sym .plt and .got entries, and space for global
7735 sym dynamic relocs. */
7736 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7739 /* Allocate global ifunc sym .plt and .got entries, and space for global
7740 ifunc sym dynamic relocs. */
7741 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7744 /* Allocate .plt and .got entries, and space for local symbols. */
7745 htab_traverse (htab->loc_hash_table,
7746 elfNN_aarch64_allocate_local_dynrelocs,
7749 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7750 htab_traverse (htab->loc_hash_table,
7751 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7754 /* For every jump slot reserved in the sgotplt, reloc_count is
7755 incremented. However, when we reserve space for TLS descriptors,
7756 it's not incremented, so in order to compute the space reserved
7757 for them, it suffices to multiply the reloc count by the jump
7760 if (htab->root.srelplt)
7761 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7763 if (htab->tlsdesc_plt)
7765 if (htab->root.splt->size == 0)
7766 htab->root.splt->size += PLT_ENTRY_SIZE;
7768 htab->tlsdesc_plt = htab->root.splt->size;
7769 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7771 /* If we're not using lazy TLS relocations, don't generate the
7772 GOT entry required. */
7773 if (!(info->flags & DF_BIND_NOW))
7775 htab->dt_tlsdesc_got = htab->root.sgot->size;
7776 htab->root.sgot->size += GOT_ENTRY_SIZE;
7780 /* Init mapping symbols information to use later to distingush between
7781 code and data while scanning for errata. */
7782 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
7783 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7785 if (!is_aarch64_elf (ibfd))
7787 bfd_elfNN_aarch64_init_maps (ibfd);
7790 /* We now have determined the sizes of the various dynamic sections.
7791 Allocate memory for them. */
7793 for (s = dynobj->sections; s != NULL; s = s->next)
7795 if ((s->flags & SEC_LINKER_CREATED) == 0)
7798 if (s == htab->root.splt
7799 || s == htab->root.sgot
7800 || s == htab->root.sgotplt
7801 || s == htab->root.iplt
7802 || s == htab->root.igotplt || s == htab->sdynbss)
7804 /* Strip this section if we don't need it; see the
7807 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7809 if (s->size != 0 && s != htab->root.srelplt)
7812 /* We use the reloc_count field as a counter if we need
7813 to copy relocs into the output file. */
7814 if (s != htab->root.srelplt)
7819 /* It's not one of our sections, so don't allocate space. */
7825 /* If we don't need this section, strip it from the
7826 output file. This is mostly to handle .rela.bss and
7827 .rela.plt. We must create both sections in
7828 create_dynamic_sections, because they must be created
7829 before the linker maps input sections to output
7830 sections. The linker does that before
7831 adjust_dynamic_symbol is called, and it is that
7832 function which decides whether anything needs to go
7833 into these sections. */
7835 s->flags |= SEC_EXCLUDE;
7839 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7842 /* Allocate memory for the section contents. We use bfd_zalloc
7843 here in case unused entries are not reclaimed before the
7844 section's contents are written out. This should not happen,
7845 but this way if it does, we get a R_AARCH64_NONE reloc instead
7847 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7848 if (s->contents == NULL)
7852 if (htab->root.dynamic_sections_created)
7854 /* Add some entries to the .dynamic section. We fill in the
7855 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7856 must add the entries now so that we get the correct size for
7857 the .dynamic section. The DT_DEBUG entry is filled in by the
7858 dynamic linker and used by the debugger. */
7859 #define add_dynamic_entry(TAG, VAL) \
7860 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7862 if (info->executable)
7864 if (!add_dynamic_entry (DT_DEBUG, 0))
7868 if (htab->root.splt->size != 0)
7870 if (!add_dynamic_entry (DT_PLTGOT, 0)
7871 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7872 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7873 || !add_dynamic_entry (DT_JMPREL, 0))
7876 if (htab->tlsdesc_plt
7877 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7878 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7884 if (!add_dynamic_entry (DT_RELA, 0)
7885 || !add_dynamic_entry (DT_RELASZ, 0)
7886 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7889 /* If any dynamic relocs apply to a read-only section,
7890 then we need a DT_TEXTREL entry. */
7891 if ((info->flags & DF_TEXTREL) == 0)
7892 elf_link_hash_traverse (& htab->root, aarch64_readonly_dynrelocs,
7895 if ((info->flags & DF_TEXTREL) != 0)
7897 if (!add_dynamic_entry (DT_TEXTREL, 0))
7902 #undef add_dynamic_entry
7908 elf_aarch64_update_plt_entry (bfd *output_bfd,
7909 bfd_reloc_code_real_type r_type,
7910 bfd_byte *plt_entry, bfd_vma value)
7912 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
7914 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
7918 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
7919 struct elf_aarch64_link_hash_table
7920 *htab, bfd *output_bfd,
7921 struct bfd_link_info *info)
7923 bfd_byte *plt_entry;
7926 bfd_vma gotplt_entry_address;
7927 bfd_vma plt_entry_address;
7928 Elf_Internal_Rela rela;
7930 asection *plt, *gotplt, *relplt;
7932 /* When building a static executable, use .iplt, .igot.plt and
7933 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7934 if (htab->root.splt != NULL)
7936 plt = htab->root.splt;
7937 gotplt = htab->root.sgotplt;
7938 relplt = htab->root.srelplt;
7942 plt = htab->root.iplt;
7943 gotplt = htab->root.igotplt;
7944 relplt = htab->root.irelplt;
7947 /* Get the index in the procedure linkage table which
7948 corresponds to this symbol. This is the index of this symbol
7949 in all the symbols for which we are making plt entries. The
7950 first entry in the procedure linkage table is reserved.
7952 Get the offset into the .got table of the entry that
7953 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
7954 bytes. The first three are reserved for the dynamic linker.
7956 For static executables, we don't reserve anything. */
7958 if (plt == htab->root.splt)
7960 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
7961 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
7965 plt_index = h->plt.offset / htab->plt_entry_size;
7966 got_offset = plt_index * GOT_ENTRY_SIZE;
7969 plt_entry = plt->contents + h->plt.offset;
7970 plt_entry_address = plt->output_section->vma
7971 + plt->output_offset + h->plt.offset;
7972 gotplt_entry_address = gotplt->output_section->vma +
7973 gotplt->output_offset + got_offset;
7975 /* Copy in the boiler-plate for the PLTn entry. */
7976 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
7978 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7979 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7980 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7982 PG (gotplt_entry_address) -
7983 PG (plt_entry_address));
7985 /* Fill in the lo12 bits for the load from the pltgot. */
7986 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7988 PG_OFFSET (gotplt_entry_address));
7990 /* Fill in the lo12 bits for the add from the pltgot entry. */
7991 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7993 PG_OFFSET (gotplt_entry_address));
7995 /* All the GOTPLT Entries are essentially initialized to PLT0. */
7996 bfd_put_NN (output_bfd,
7997 plt->output_section->vma + plt->output_offset,
7998 gotplt->contents + got_offset);
8000 rela.r_offset = gotplt_entry_address;
8002 if (h->dynindx == -1
8003 || ((info->executable
8004 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
8006 && h->type == STT_GNU_IFUNC))
8008 /* If an STT_GNU_IFUNC symbol is locally defined, generate
8009 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
8010 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
8011 rela.r_addend = (h->root.u.def.value
8012 + h->root.u.def.section->output_section->vma
8013 + h->root.u.def.section->output_offset);
8017 /* Fill in the entry in the .rela.plt section. */
8018 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
8022 /* Compute the relocation entry to used based on PLT index and do
8023 not adjust reloc_count. The reloc_count has already been adjusted
8024 to account for this entry. */
8025 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
8026 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8029 /* Size sections even though they're not dynamic. We use it to setup
8030 _TLS_MODULE_BASE_, if needed. */
8033 elfNN_aarch64_always_size_sections (bfd *output_bfd,
8034 struct bfd_link_info *info)
8038 if (info->relocatable)
8041 tls_sec = elf_hash_table (info)->tls_sec;
8045 struct elf_link_hash_entry *tlsbase;
8047 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
8048 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
8052 struct bfd_link_hash_entry *h = NULL;
8053 const struct elf_backend_data *bed =
8054 get_elf_backend_data (output_bfd);
8056 if (!(_bfd_generic_link_add_one_symbol
8057 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
8058 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
8061 tlsbase->type = STT_TLS;
8062 tlsbase = (struct elf_link_hash_entry *) h;
8063 tlsbase->def_regular = 1;
8064 tlsbase->other = STV_HIDDEN;
8065 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
8072 /* Finish up dynamic symbol handling. We set the contents of various
8073 dynamic sections here. */
8075 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
8076 struct bfd_link_info *info,
8077 struct elf_link_hash_entry *h,
8078 Elf_Internal_Sym *sym)
8080 struct elf_aarch64_link_hash_table *htab;
8081 htab = elf_aarch64_hash_table (info);
8083 if (h->plt.offset != (bfd_vma) - 1)
8085 asection *plt, *gotplt, *relplt;
8087 /* This symbol has an entry in the procedure linkage table. Set
8090 /* When building a static executable, use .iplt, .igot.plt and
8091 .rela.iplt sections for STT_GNU_IFUNC symbols. */
8092 if (htab->root.splt != NULL)
8094 plt = htab->root.splt;
8095 gotplt = htab->root.sgotplt;
8096 relplt = htab->root.srelplt;
8100 plt = htab->root.iplt;
8101 gotplt = htab->root.igotplt;
8102 relplt = htab->root.irelplt;
8105 /* This symbol has an entry in the procedure linkage table. Set
8107 if ((h->dynindx == -1
8108 && !((h->forced_local || info->executable)
8110 && h->type == STT_GNU_IFUNC))
8116 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
8117 if (!h->def_regular)
8119 /* Mark the symbol as undefined, rather than as defined in
8120 the .plt section. */
8121 sym->st_shndx = SHN_UNDEF;
8122 /* If the symbol is weak we need to clear the value.
8123 Otherwise, the PLT entry would provide a definition for
8124 the symbol even if the symbol wasn't defined anywhere,
8125 and so the symbol would never be NULL. Leave the value if
8126 there were any relocations where pointer equality matters
8127 (this is a clue for the dynamic linker, to make function
8128 pointer comparisons work between an application and shared
8130 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
8135 if (h->got.offset != (bfd_vma) - 1
8136 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
8138 Elf_Internal_Rela rela;
8141 /* This symbol has an entry in the global offset table. Set it
8143 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
8146 rela.r_offset = (htab->root.sgot->output_section->vma
8147 + htab->root.sgot->output_offset
8148 + (h->got.offset & ~(bfd_vma) 1));
8151 && h->type == STT_GNU_IFUNC)
8155 /* Generate R_AARCH64_GLOB_DAT. */
8162 if (!h->pointer_equality_needed)
8165 /* For non-shared object, we can't use .got.plt, which
8166 contains the real function address if we need pointer
8167 equality. We load the GOT entry with the PLT entry. */
8168 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
8169 bfd_put_NN (output_bfd, (plt->output_section->vma
8170 + plt->output_offset
8172 htab->root.sgot->contents
8173 + (h->got.offset & ~(bfd_vma) 1));
8177 else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
8179 if (!h->def_regular)
8182 BFD_ASSERT ((h->got.offset & 1) != 0);
8183 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
8184 rela.r_addend = (h->root.u.def.value
8185 + h->root.u.def.section->output_section->vma
8186 + h->root.u.def.section->output_offset);
8191 BFD_ASSERT ((h->got.offset & 1) == 0);
8192 bfd_put_NN (output_bfd, (bfd_vma) 0,
8193 htab->root.sgot->contents + h->got.offset);
8194 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
8198 loc = htab->root.srelgot->contents;
8199 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
8200 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8205 Elf_Internal_Rela rela;
8208 /* This symbol needs a copy reloc. Set it up. */
8210 if (h->dynindx == -1
8211 || (h->root.type != bfd_link_hash_defined
8212 && h->root.type != bfd_link_hash_defweak)
8213 || htab->srelbss == NULL)
8216 rela.r_offset = (h->root.u.def.value
8217 + h->root.u.def.section->output_section->vma
8218 + h->root.u.def.section->output_offset);
8219 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
8221 loc = htab->srelbss->contents;
8222 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
8223 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8226 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
8227 be NULL for local symbols. */
8229 && (h == elf_hash_table (info)->hdynamic
8230 || h == elf_hash_table (info)->hgot))
8231 sym->st_shndx = SHN_ABS;
8236 /* Finish up local dynamic symbol handling. We set the contents of
8237 various dynamic sections here. */
8240 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
8242 struct elf_link_hash_entry *h
8243 = (struct elf_link_hash_entry *) *slot;
8244 struct bfd_link_info *info
8245 = (struct bfd_link_info *) inf;
8247 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
8252 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
8253 struct elf_aarch64_link_hash_table
8256 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
8257 small and large plts and at the minute just generates
8260 /* PLT0 of the small PLT looks like this in ELF64 -
8261 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
8262 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
8263 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
8265 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
8266 // GOTPLT entry for this.
8268 PLT0 will be slightly different in ELF32 due to different got entry
8271 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
8275 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
8277 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
8280 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
8281 + htab->root.sgotplt->output_offset
8282 + GOT_ENTRY_SIZE * 2);
8284 plt_base = htab->root.splt->output_section->vma +
8285 htab->root.splt->output_offset;
8287 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8288 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8289 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8290 htab->root.splt->contents + 4,
8291 PG (plt_got_2nd_ent) - PG (plt_base + 4));
8293 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8294 htab->root.splt->contents + 8,
8295 PG_OFFSET (plt_got_2nd_ent));
8297 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8298 htab->root.splt->contents + 12,
8299 PG_OFFSET (plt_got_2nd_ent));
8303 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
8304 struct bfd_link_info *info)
8306 struct elf_aarch64_link_hash_table *htab;
8310 htab = elf_aarch64_hash_table (info);
8311 dynobj = htab->root.dynobj;
8312 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
8314 if (htab->root.dynamic_sections_created)
8316 ElfNN_External_Dyn *dyncon, *dynconend;
8318 if (sdyn == NULL || htab->root.sgot == NULL)
8321 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
8322 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
8323 for (; dyncon < dynconend; dyncon++)
8325 Elf_Internal_Dyn dyn;
8328 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
8336 s = htab->root.sgotplt;
8337 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
8341 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
8345 s = htab->root.srelplt;
8346 dyn.d_un.d_val = s->size;
8350 /* The procedure linkage table relocs (DT_JMPREL) should
8351 not be included in the overall relocs (DT_RELA).
8352 Therefore, we override the DT_RELASZ entry here to
8353 make it not include the JMPREL relocs. Since the
8354 linker script arranges for .rela.plt to follow all
8355 other relocation sections, we don't have to worry
8356 about changing the DT_RELA entry. */
8357 if (htab->root.srelplt != NULL)
8359 s = htab->root.srelplt;
8360 dyn.d_un.d_val -= s->size;
8364 case DT_TLSDESC_PLT:
8365 s = htab->root.splt;
8366 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8367 + htab->tlsdesc_plt;
8370 case DT_TLSDESC_GOT:
8371 s = htab->root.sgot;
8372 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8373 + htab->dt_tlsdesc_got;
8377 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
8382 /* Fill in the special first entry in the procedure linkage table. */
8383 if (htab->root.splt && htab->root.splt->size > 0)
8385 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
8387 elf_section_data (htab->root.splt->output_section)->
8388 this_hdr.sh_entsize = htab->plt_entry_size;
8391 if (htab->tlsdesc_plt)
8393 bfd_put_NN (output_bfd, (bfd_vma) 0,
8394 htab->root.sgot->contents + htab->dt_tlsdesc_got);
8396 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
8397 elfNN_aarch64_tlsdesc_small_plt_entry,
8398 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
8401 bfd_vma adrp1_addr =
8402 htab->root.splt->output_section->vma
8403 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
8405 bfd_vma adrp2_addr = adrp1_addr + 4;
8408 htab->root.sgot->output_section->vma
8409 + htab->root.sgot->output_offset;
8411 bfd_vma pltgot_addr =
8412 htab->root.sgotplt->output_section->vma
8413 + htab->root.sgotplt->output_offset;
8415 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
8417 bfd_byte *plt_entry =
8418 htab->root.splt->contents + htab->tlsdesc_plt;
8420 /* adrp x2, DT_TLSDESC_GOT */
8421 elf_aarch64_update_plt_entry (output_bfd,
8422 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8424 (PG (dt_tlsdesc_got)
8425 - PG (adrp1_addr)));
8428 elf_aarch64_update_plt_entry (output_bfd,
8429 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8432 - PG (adrp2_addr)));
8434 /* ldr x2, [x2, #0] */
8435 elf_aarch64_update_plt_entry (output_bfd,
8436 BFD_RELOC_AARCH64_LDSTNN_LO12,
8438 PG_OFFSET (dt_tlsdesc_got));
8441 elf_aarch64_update_plt_entry (output_bfd,
8442 BFD_RELOC_AARCH64_ADD_LO12,
8444 PG_OFFSET (pltgot_addr));
8449 if (htab->root.sgotplt)
8451 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
8453 (*_bfd_error_handler)
8454 (_("discarded output section: `%A'"), htab->root.sgotplt);
8458 /* Fill in the first three entries in the global offset table. */
8459 if (htab->root.sgotplt->size > 0)
8461 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
8463 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
8464 bfd_put_NN (output_bfd,
8466 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
8467 bfd_put_NN (output_bfd,
8469 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
8472 if (htab->root.sgot)
8474 if (htab->root.sgot->size > 0)
8477 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
8478 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
8482 elf_section_data (htab->root.sgotplt->output_section)->
8483 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
8486 if (htab->root.sgot && htab->root.sgot->size > 0)
8487 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
8490 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
8491 htab_traverse (htab->loc_hash_table,
8492 elfNN_aarch64_finish_local_dynamic_symbol,
8498 /* Return address for Ith PLT stub in section PLT, for relocation REL
8499 or (bfd_vma) -1 if it should not be included. */
8502 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
8503 const arelent *rel ATTRIBUTE_UNUSED)
8505 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
8509 /* We use this so we can override certain functions
8510 (though currently we don't). */
8512 const struct elf_size_info elfNN_aarch64_size_info =
8514 sizeof (ElfNN_External_Ehdr),
8515 sizeof (ElfNN_External_Phdr),
8516 sizeof (ElfNN_External_Shdr),
8517 sizeof (ElfNN_External_Rel),
8518 sizeof (ElfNN_External_Rela),
8519 sizeof (ElfNN_External_Sym),
8520 sizeof (ElfNN_External_Dyn),
8521 sizeof (Elf_External_Note),
8522 4, /* Hash table entry size. */
8523 1, /* Internal relocs per external relocs. */
8524 ARCH_SIZE, /* Arch size. */
8525 LOG_FILE_ALIGN, /* Log_file_align. */
8526 ELFCLASSNN, EV_CURRENT,
8527 bfd_elfNN_write_out_phdrs,
8528 bfd_elfNN_write_shdrs_and_ehdr,
8529 bfd_elfNN_checksum_contents,
8530 bfd_elfNN_write_relocs,
8531 bfd_elfNN_swap_symbol_in,
8532 bfd_elfNN_swap_symbol_out,
8533 bfd_elfNN_slurp_reloc_table,
8534 bfd_elfNN_slurp_symbol_table,
8535 bfd_elfNN_swap_dyn_in,
8536 bfd_elfNN_swap_dyn_out,
8537 bfd_elfNN_swap_reloc_in,
8538 bfd_elfNN_swap_reloc_out,
8539 bfd_elfNN_swap_reloca_in,
8540 bfd_elfNN_swap_reloca_out
8543 #define ELF_ARCH bfd_arch_aarch64
8544 #define ELF_MACHINE_CODE EM_AARCH64
8545 #define ELF_MAXPAGESIZE 0x10000
8546 #define ELF_MINPAGESIZE 0x1000
8547 #define ELF_COMMONPAGESIZE 0x1000
8549 #define bfd_elfNN_close_and_cleanup \
8550 elfNN_aarch64_close_and_cleanup
8552 #define bfd_elfNN_bfd_free_cached_info \
8553 elfNN_aarch64_bfd_free_cached_info
8555 #define bfd_elfNN_bfd_is_target_special_symbol \
8556 elfNN_aarch64_is_target_special_symbol
8558 #define bfd_elfNN_bfd_link_hash_table_create \
8559 elfNN_aarch64_link_hash_table_create
8561 #define bfd_elfNN_bfd_merge_private_bfd_data \
8562 elfNN_aarch64_merge_private_bfd_data
8564 #define bfd_elfNN_bfd_print_private_bfd_data \
8565 elfNN_aarch64_print_private_bfd_data
8567 #define bfd_elfNN_bfd_reloc_type_lookup \
8568 elfNN_aarch64_reloc_type_lookup
8570 #define bfd_elfNN_bfd_reloc_name_lookup \
8571 elfNN_aarch64_reloc_name_lookup
8573 #define bfd_elfNN_bfd_set_private_flags \
8574 elfNN_aarch64_set_private_flags
8576 #define bfd_elfNN_find_inliner_info \
8577 elfNN_aarch64_find_inliner_info
8579 #define bfd_elfNN_find_nearest_line \
8580 elfNN_aarch64_find_nearest_line
8582 #define bfd_elfNN_mkobject \
8583 elfNN_aarch64_mkobject
8585 #define bfd_elfNN_new_section_hook \
8586 elfNN_aarch64_new_section_hook
8588 #define elf_backend_adjust_dynamic_symbol \
8589 elfNN_aarch64_adjust_dynamic_symbol
8591 #define elf_backend_always_size_sections \
8592 elfNN_aarch64_always_size_sections
8594 #define elf_backend_check_relocs \
8595 elfNN_aarch64_check_relocs
8597 #define elf_backend_copy_indirect_symbol \
8598 elfNN_aarch64_copy_indirect_symbol
8600 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
8601 to them in our hash. */
8602 #define elf_backend_create_dynamic_sections \
8603 elfNN_aarch64_create_dynamic_sections
8605 #define elf_backend_init_index_section \
8606 _bfd_elf_init_2_index_sections
8608 #define elf_backend_finish_dynamic_sections \
8609 elfNN_aarch64_finish_dynamic_sections
8611 #define elf_backend_finish_dynamic_symbol \
8612 elfNN_aarch64_finish_dynamic_symbol
8614 #define elf_backend_gc_sweep_hook \
8615 elfNN_aarch64_gc_sweep_hook
8617 #define elf_backend_object_p \
8618 elfNN_aarch64_object_p
8620 #define elf_backend_output_arch_local_syms \
8621 elfNN_aarch64_output_arch_local_syms
8623 #define elf_backend_plt_sym_val \
8624 elfNN_aarch64_plt_sym_val
8626 #define elf_backend_post_process_headers \
8627 elfNN_aarch64_post_process_headers
8629 #define elf_backend_relocate_section \
8630 elfNN_aarch64_relocate_section
8632 #define elf_backend_reloc_type_class \
8633 elfNN_aarch64_reloc_type_class
8635 #define elf_backend_section_from_shdr \
8636 elfNN_aarch64_section_from_shdr
8638 #define elf_backend_size_dynamic_sections \
8639 elfNN_aarch64_size_dynamic_sections
8641 #define elf_backend_size_info \
8642 elfNN_aarch64_size_info
8644 #define elf_backend_write_section \
8645 elfNN_aarch64_write_section
8647 #define elf_backend_can_refcount 1
8648 #define elf_backend_can_gc_sections 1
8649 #define elf_backend_plt_readonly 1
8650 #define elf_backend_want_got_plt 1
8651 #define elf_backend_want_plt_sym 0
8652 #define elf_backend_may_use_rel_p 0
8653 #define elf_backend_may_use_rela_p 1
8654 #define elf_backend_default_use_rela_p 1
8655 #define elf_backend_rela_normal 1
8656 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8657 #define elf_backend_default_execstack 0
8659 #undef elf_backend_obj_attrs_section
8660 #define elf_backend_obj_attrs_section ".ARM.attributes"
8662 #include "elfNN-target.h"