1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 /* Notes on implementation:
23 Thread Local Store (TLS)
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
95 elfNN_aarch64_check_relocs()
97 This function is invoked for each relocation.
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
107 elfNN_aarch64_allocate_dynrelocs ()
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
115 elfNN_aarch64_size_dynamic_sections ()
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
122 elfNN_aarch64_relocate_section ()
124 Calls elfNN_aarch64_final_link_relocate ()
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
134 elfNN_aarch64_final_link_relocate ()
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
140 #include "libiberty.h"
142 #include "bfd_stdint.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
189 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
190 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
191 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
193 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
194 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
204 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
205 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
207 #define ELIMINATE_COPY_RELOCS 0
209 /* Return size of a relocation entry. HTAB is the bfd's
210 elf_aarch64_link_hash_entry. */
211 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
213 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
214 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
215 #define PLT_ENTRY_SIZE (32)
216 #define PLT_SMALL_ENTRY_SIZE (16)
217 #define PLT_TLSDESC_ENTRY_SIZE (32)
219 /* Encoding of the nop instruction */
220 #define INSN_NOP 0xd503201f
222 #define aarch64_compute_jump_table_size(htab) \
223 (((htab)->root.srelplt == NULL) ? 0 \
224 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
226 /* The first entry in a procedure linkage table looks like this
227 if the distance between the PLTGOT and the PLT is < 4GB use
228 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
229 in x16 and needs to work out PLTGOT[1] by using an address of
230 [x16,#-GOT_ENTRY_SIZE]. */
231 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
233 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
234 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
236 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
237 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
239 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
240 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
242 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
243 0x1f, 0x20, 0x03, 0xd5, /* nop */
244 0x1f, 0x20, 0x03, 0xd5, /* nop */
245 0x1f, 0x20, 0x03, 0xd5, /* nop */
248 /* Per function entry in a procedure linkage table looks like this
249 if the distance between the PLTGOT and the PLT is < 4GB use
250 these PLT entries. */
251 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
253 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
255 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
256 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
258 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
259 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
261 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
264 static const bfd_byte
265 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
267 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
268 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
269 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
271 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
272 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
274 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
275 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
277 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
278 0x1f, 0x20, 0x03, 0xd5, /* nop */
279 0x1f, 0x20, 0x03, 0xd5, /* nop */
282 #define elf_info_to_howto elfNN_aarch64_info_to_howto
283 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
285 #define AARCH64_ELF_ABI_VERSION 0
287 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
288 #define ALL_ONES (~ (bfd_vma) 0)
290 /* Indexed by the bfd interal reloc enumerators.
291 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
294 static reloc_howto_type elfNN_aarch64_howto_table[] =
298 /* Basic data relocations. */
301 HOWTO (R_AARCH64_NULL, /* type */
303 3, /* size (0 = byte, 1 = short, 2 = long) */
305 FALSE, /* pc_relative */
307 complain_overflow_dont, /* complain_on_overflow */
308 bfd_elf_generic_reloc, /* special_function */
309 "R_AARCH64_NULL", /* name */
310 FALSE, /* partial_inplace */
313 FALSE), /* pcrel_offset */
315 HOWTO (R_AARCH64_NONE, /* type */
317 3, /* size (0 = byte, 1 = short, 2 = long) */
319 FALSE, /* pc_relative */
321 complain_overflow_dont, /* complain_on_overflow */
322 bfd_elf_generic_reloc, /* special_function */
323 "R_AARCH64_NONE", /* name */
324 FALSE, /* partial_inplace */
327 FALSE), /* pcrel_offset */
331 HOWTO64 (AARCH64_R (ABS64), /* type */
333 4, /* size (4 = long long) */
335 FALSE, /* pc_relative */
337 complain_overflow_unsigned, /* complain_on_overflow */
338 bfd_elf_generic_reloc, /* special_function */
339 AARCH64_R_STR (ABS64), /* name */
340 FALSE, /* partial_inplace */
341 ALL_ONES, /* src_mask */
342 ALL_ONES, /* dst_mask */
343 FALSE), /* pcrel_offset */
346 HOWTO (AARCH64_R (ABS32), /* type */
348 2, /* size (0 = byte, 1 = short, 2 = long) */
350 FALSE, /* pc_relative */
352 complain_overflow_unsigned, /* complain_on_overflow */
353 bfd_elf_generic_reloc, /* special_function */
354 AARCH64_R_STR (ABS32), /* name */
355 FALSE, /* partial_inplace */
356 0xffffffff, /* src_mask */
357 0xffffffff, /* dst_mask */
358 FALSE), /* pcrel_offset */
361 HOWTO (AARCH64_R (ABS16), /* type */
363 1, /* size (0 = byte, 1 = short, 2 = long) */
365 FALSE, /* pc_relative */
367 complain_overflow_unsigned, /* complain_on_overflow */
368 bfd_elf_generic_reloc, /* special_function */
369 AARCH64_R_STR (ABS16), /* name */
370 FALSE, /* partial_inplace */
371 0xffff, /* src_mask */
372 0xffff, /* dst_mask */
373 FALSE), /* pcrel_offset */
375 /* .xword: (S+A-P) */
376 HOWTO64 (AARCH64_R (PREL64), /* type */
378 4, /* size (4 = long long) */
380 TRUE, /* pc_relative */
382 complain_overflow_signed, /* complain_on_overflow */
383 bfd_elf_generic_reloc, /* special_function */
384 AARCH64_R_STR (PREL64), /* name */
385 FALSE, /* partial_inplace */
386 ALL_ONES, /* src_mask */
387 ALL_ONES, /* dst_mask */
388 TRUE), /* pcrel_offset */
391 HOWTO (AARCH64_R (PREL32), /* type */
393 2, /* size (0 = byte, 1 = short, 2 = long) */
395 TRUE, /* pc_relative */
397 complain_overflow_signed, /* complain_on_overflow */
398 bfd_elf_generic_reloc, /* special_function */
399 AARCH64_R_STR (PREL32), /* name */
400 FALSE, /* partial_inplace */
401 0xffffffff, /* src_mask */
402 0xffffffff, /* dst_mask */
403 TRUE), /* pcrel_offset */
406 HOWTO (AARCH64_R (PREL16), /* type */
408 1, /* size (0 = byte, 1 = short, 2 = long) */
410 TRUE, /* pc_relative */
412 complain_overflow_signed, /* complain_on_overflow */
413 bfd_elf_generic_reloc, /* special_function */
414 AARCH64_R_STR (PREL16), /* name */
415 FALSE, /* partial_inplace */
416 0xffff, /* src_mask */
417 0xffff, /* dst_mask */
418 TRUE), /* pcrel_offset */
420 /* Group relocations to create a 16, 32, 48 or 64 bit
421 unsigned data or abs address inline. */
423 /* MOVZ: ((S+A) >> 0) & 0xffff */
424 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
426 2, /* size (0 = byte, 1 = short, 2 = long) */
428 FALSE, /* pc_relative */
430 complain_overflow_unsigned, /* complain_on_overflow */
431 bfd_elf_generic_reloc, /* special_function */
432 AARCH64_R_STR (MOVW_UABS_G0), /* name */
433 FALSE, /* partial_inplace */
434 0xffff, /* src_mask */
435 0xffff, /* dst_mask */
436 FALSE), /* pcrel_offset */
438 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
439 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
441 2, /* size (0 = byte, 1 = short, 2 = long) */
443 FALSE, /* pc_relative */
445 complain_overflow_dont, /* complain_on_overflow */
446 bfd_elf_generic_reloc, /* special_function */
447 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
448 FALSE, /* partial_inplace */
449 0xffff, /* src_mask */
450 0xffff, /* dst_mask */
451 FALSE), /* pcrel_offset */
453 /* MOVZ: ((S+A) >> 16) & 0xffff */
454 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
456 2, /* size (0 = byte, 1 = short, 2 = long) */
458 FALSE, /* pc_relative */
460 complain_overflow_unsigned, /* complain_on_overflow */
461 bfd_elf_generic_reloc, /* special_function */
462 AARCH64_R_STR (MOVW_UABS_G1), /* name */
463 FALSE, /* partial_inplace */
464 0xffff, /* src_mask */
465 0xffff, /* dst_mask */
466 FALSE), /* pcrel_offset */
468 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
469 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
471 2, /* size (0 = byte, 1 = short, 2 = long) */
473 FALSE, /* pc_relative */
475 complain_overflow_dont, /* complain_on_overflow */
476 bfd_elf_generic_reloc, /* special_function */
477 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
478 FALSE, /* partial_inplace */
479 0xffff, /* src_mask */
480 0xffff, /* dst_mask */
481 FALSE), /* pcrel_offset */
483 /* MOVZ: ((S+A) >> 32) & 0xffff */
484 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
488 FALSE, /* pc_relative */
490 complain_overflow_unsigned, /* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 AARCH64_R_STR (MOVW_UABS_G2), /* name */
493 FALSE, /* partial_inplace */
494 0xffff, /* src_mask */
495 0xffff, /* dst_mask */
496 FALSE), /* pcrel_offset */
498 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
499 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
501 2, /* size (0 = byte, 1 = short, 2 = long) */
503 FALSE, /* pc_relative */
505 complain_overflow_dont, /* complain_on_overflow */
506 bfd_elf_generic_reloc, /* special_function */
507 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
508 FALSE, /* partial_inplace */
509 0xffff, /* src_mask */
510 0xffff, /* dst_mask */
511 FALSE), /* pcrel_offset */
513 /* MOVZ: ((S+A) >> 48) & 0xffff */
514 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
516 2, /* size (0 = byte, 1 = short, 2 = long) */
518 FALSE, /* pc_relative */
520 complain_overflow_unsigned, /* complain_on_overflow */
521 bfd_elf_generic_reloc, /* special_function */
522 AARCH64_R_STR (MOVW_UABS_G3), /* name */
523 FALSE, /* partial_inplace */
524 0xffff, /* src_mask */
525 0xffff, /* dst_mask */
526 FALSE), /* pcrel_offset */
528 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
529 signed data or abs address inline. Will change instruction
530 to MOVN or MOVZ depending on sign of calculated value. */
532 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
533 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
535 2, /* size (0 = byte, 1 = short, 2 = long) */
537 FALSE, /* pc_relative */
539 complain_overflow_signed, /* complain_on_overflow */
540 bfd_elf_generic_reloc, /* special_function */
541 AARCH64_R_STR (MOVW_SABS_G0), /* name */
542 FALSE, /* partial_inplace */
543 0xffff, /* src_mask */
544 0xffff, /* dst_mask */
545 FALSE), /* pcrel_offset */
547 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
548 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
550 2, /* size (0 = byte, 1 = short, 2 = long) */
552 FALSE, /* pc_relative */
554 complain_overflow_signed, /* complain_on_overflow */
555 bfd_elf_generic_reloc, /* special_function */
556 AARCH64_R_STR (MOVW_SABS_G1), /* name */
557 FALSE, /* partial_inplace */
558 0xffff, /* src_mask */
559 0xffff, /* dst_mask */
560 FALSE), /* pcrel_offset */
562 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
563 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
565 2, /* size (0 = byte, 1 = short, 2 = long) */
567 FALSE, /* pc_relative */
569 complain_overflow_signed, /* complain_on_overflow */
570 bfd_elf_generic_reloc, /* special_function */
571 AARCH64_R_STR (MOVW_SABS_G2), /* name */
572 FALSE, /* partial_inplace */
573 0xffff, /* src_mask */
574 0xffff, /* dst_mask */
575 FALSE), /* pcrel_offset */
577 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
578 addresses: PG(x) is (x & ~0xfff). */
580 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
581 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
583 2, /* size (0 = byte, 1 = short, 2 = long) */
585 TRUE, /* pc_relative */
587 complain_overflow_signed, /* complain_on_overflow */
588 bfd_elf_generic_reloc, /* special_function */
589 AARCH64_R_STR (LD_PREL_LO19), /* name */
590 FALSE, /* partial_inplace */
591 0x7ffff, /* src_mask */
592 0x7ffff, /* dst_mask */
593 TRUE), /* pcrel_offset */
595 /* ADR: (S+A-P) & 0x1fffff */
596 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
600 TRUE, /* pc_relative */
602 complain_overflow_signed, /* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 AARCH64_R_STR (ADR_PREL_LO21), /* name */
605 FALSE, /* partial_inplace */
606 0x1fffff, /* src_mask */
607 0x1fffff, /* dst_mask */
608 TRUE), /* pcrel_offset */
610 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
611 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
613 2, /* size (0 = byte, 1 = short, 2 = long) */
615 TRUE, /* pc_relative */
617 complain_overflow_signed, /* complain_on_overflow */
618 bfd_elf_generic_reloc, /* special_function */
619 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
620 FALSE, /* partial_inplace */
621 0x1fffff, /* src_mask */
622 0x1fffff, /* dst_mask */
623 TRUE), /* pcrel_offset */
625 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
626 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
628 2, /* size (0 = byte, 1 = short, 2 = long) */
630 TRUE, /* pc_relative */
632 complain_overflow_dont, /* complain_on_overflow */
633 bfd_elf_generic_reloc, /* special_function */
634 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
635 FALSE, /* partial_inplace */
636 0x1fffff, /* src_mask */
637 0x1fffff, /* dst_mask */
638 TRUE), /* pcrel_offset */
640 /* ADD: (S+A) & 0xfff [no overflow check] */
641 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
643 2, /* size (0 = byte, 1 = short, 2 = long) */
645 FALSE, /* pc_relative */
647 complain_overflow_dont, /* complain_on_overflow */
648 bfd_elf_generic_reloc, /* special_function */
649 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
650 FALSE, /* partial_inplace */
651 0x3ffc00, /* src_mask */
652 0x3ffc00, /* dst_mask */
653 FALSE), /* pcrel_offset */
655 /* LD/ST8: (S+A) & 0xfff */
656 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
658 2, /* size (0 = byte, 1 = short, 2 = long) */
660 FALSE, /* pc_relative */
662 complain_overflow_dont, /* complain_on_overflow */
663 bfd_elf_generic_reloc, /* special_function */
664 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
665 FALSE, /* partial_inplace */
666 0xfff, /* src_mask */
667 0xfff, /* dst_mask */
668 FALSE), /* pcrel_offset */
670 /* Relocations for control-flow instructions. */
672 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
673 HOWTO (AARCH64_R (TSTBR14), /* type */
675 2, /* size (0 = byte, 1 = short, 2 = long) */
677 TRUE, /* pc_relative */
679 complain_overflow_signed, /* complain_on_overflow */
680 bfd_elf_generic_reloc, /* special_function */
681 AARCH64_R_STR (TSTBR14), /* name */
682 FALSE, /* partial_inplace */
683 0x3fff, /* src_mask */
684 0x3fff, /* dst_mask */
685 TRUE), /* pcrel_offset */
687 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
688 HOWTO (AARCH64_R (CONDBR19), /* type */
690 2, /* size (0 = byte, 1 = short, 2 = long) */
692 TRUE, /* pc_relative */
694 complain_overflow_signed, /* complain_on_overflow */
695 bfd_elf_generic_reloc, /* special_function */
696 AARCH64_R_STR (CONDBR19), /* name */
697 FALSE, /* partial_inplace */
698 0x7ffff, /* src_mask */
699 0x7ffff, /* dst_mask */
700 TRUE), /* pcrel_offset */
702 /* B: ((S+A-P) >> 2) & 0x3ffffff */
703 HOWTO (AARCH64_R (JUMP26), /* type */
705 2, /* size (0 = byte, 1 = short, 2 = long) */
707 TRUE, /* pc_relative */
709 complain_overflow_signed, /* complain_on_overflow */
710 bfd_elf_generic_reloc, /* special_function */
711 AARCH64_R_STR (JUMP26), /* name */
712 FALSE, /* partial_inplace */
713 0x3ffffff, /* src_mask */
714 0x3ffffff, /* dst_mask */
715 TRUE), /* pcrel_offset */
717 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
718 HOWTO (AARCH64_R (CALL26), /* type */
720 2, /* size (0 = byte, 1 = short, 2 = long) */
722 TRUE, /* pc_relative */
724 complain_overflow_signed, /* complain_on_overflow */
725 bfd_elf_generic_reloc, /* special_function */
726 AARCH64_R_STR (CALL26), /* name */
727 FALSE, /* partial_inplace */
728 0x3ffffff, /* src_mask */
729 0x3ffffff, /* dst_mask */
730 TRUE), /* pcrel_offset */
732 /* LD/ST16: (S+A) & 0xffe */
733 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
735 2, /* size (0 = byte, 1 = short, 2 = long) */
737 FALSE, /* pc_relative */
739 complain_overflow_dont, /* complain_on_overflow */
740 bfd_elf_generic_reloc, /* special_function */
741 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
742 FALSE, /* partial_inplace */
743 0xffe, /* src_mask */
744 0xffe, /* dst_mask */
745 FALSE), /* pcrel_offset */
747 /* LD/ST32: (S+A) & 0xffc */
748 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
750 2, /* size (0 = byte, 1 = short, 2 = long) */
752 FALSE, /* pc_relative */
754 complain_overflow_dont, /* complain_on_overflow */
755 bfd_elf_generic_reloc, /* special_function */
756 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
757 FALSE, /* partial_inplace */
758 0xffc, /* src_mask */
759 0xffc, /* dst_mask */
760 FALSE), /* pcrel_offset */
762 /* LD/ST64: (S+A) & 0xff8 */
763 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
765 2, /* size (0 = byte, 1 = short, 2 = long) */
767 FALSE, /* pc_relative */
769 complain_overflow_dont, /* complain_on_overflow */
770 bfd_elf_generic_reloc, /* special_function */
771 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
772 FALSE, /* partial_inplace */
773 0xff8, /* src_mask */
774 0xff8, /* dst_mask */
775 FALSE), /* pcrel_offset */
777 /* LD/ST128: (S+A) & 0xff0 */
778 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
782 FALSE, /* pc_relative */
784 complain_overflow_dont, /* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
787 FALSE, /* partial_inplace */
788 0xff0, /* src_mask */
789 0xff0, /* dst_mask */
790 FALSE), /* pcrel_offset */
792 /* Set a load-literal immediate field to bits
793 0x1FFFFC of G(S)-P */
794 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
796 2, /* size (0 = byte,1 = short,2 = long) */
798 TRUE, /* pc_relative */
800 complain_overflow_signed, /* complain_on_overflow */
801 bfd_elf_generic_reloc, /* special_function */
802 AARCH64_R_STR (GOT_LD_PREL19), /* name */
803 FALSE, /* partial_inplace */
804 0xffffe0, /* src_mask */
805 0xffffe0, /* dst_mask */
806 TRUE), /* pcrel_offset */
808 /* Get to the page for the GOT entry for the symbol
809 (G(S) - P) using an ADRP instruction. */
810 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
812 2, /* size (0 = byte, 1 = short, 2 = long) */
814 TRUE, /* pc_relative */
816 complain_overflow_dont, /* complain_on_overflow */
817 bfd_elf_generic_reloc, /* special_function */
818 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
819 FALSE, /* partial_inplace */
820 0x1fffff, /* src_mask */
821 0x1fffff, /* dst_mask */
822 TRUE), /* pcrel_offset */
824 /* LD64: GOT offset G(S) & 0xff8 */
825 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
827 2, /* size (0 = byte, 1 = short, 2 = long) */
829 FALSE, /* pc_relative */
831 complain_overflow_dont, /* complain_on_overflow */
832 bfd_elf_generic_reloc, /* special_function */
833 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
834 FALSE, /* partial_inplace */
835 0xff8, /* src_mask */
836 0xff8, /* dst_mask */
837 FALSE), /* pcrel_offset */
839 /* LD32: GOT offset G(S) & 0xffc */
840 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
842 2, /* size (0 = byte, 1 = short, 2 = long) */
844 FALSE, /* pc_relative */
846 complain_overflow_dont, /* complain_on_overflow */
847 bfd_elf_generic_reloc, /* special_function */
848 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
849 FALSE, /* partial_inplace */
850 0xffc, /* src_mask */
851 0xffc, /* dst_mask */
852 FALSE), /* pcrel_offset */
854 /* LD64: GOT offset for the symbol. */
855 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
857 2, /* size (0 = byte, 1 = short, 2 = long) */
859 FALSE, /* pc_relative */
861 complain_overflow_unsigned, /* complain_on_overflow */
862 bfd_elf_generic_reloc, /* special_function */
863 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
864 FALSE, /* partial_inplace */
865 0x7ff8, /* src_mask */
866 0x7ff8, /* dst_mask */
867 FALSE), /* pcrel_offset */
869 /* LD32: GOT offset to the page address of GOT table.
870 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
871 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
873 2, /* size (0 = byte, 1 = short, 2 = long) */
875 FALSE, /* pc_relative */
877 complain_overflow_unsigned, /* complain_on_overflow */
878 bfd_elf_generic_reloc, /* special_function */
879 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
880 FALSE, /* partial_inplace */
881 0x5ffc, /* src_mask */
882 0x5ffc, /* dst_mask */
883 FALSE), /* pcrel_offset */
885 /* LD64: GOT offset to the page address of GOT table.
886 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
887 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
889 2, /* size (0 = byte, 1 = short, 2 = long) */
891 FALSE, /* pc_relative */
893 complain_overflow_unsigned, /* complain_on_overflow */
894 bfd_elf_generic_reloc, /* special_function */
895 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
896 FALSE, /* partial_inplace */
897 0x7ff8, /* src_mask */
898 0x7ff8, /* dst_mask */
899 FALSE), /* pcrel_offset */
901 /* Get to the page for the GOT entry for the symbol
902 (G(S) - P) using an ADRP instruction. */
903 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
905 2, /* size (0 = byte, 1 = short, 2 = long) */
907 TRUE, /* pc_relative */
909 complain_overflow_dont, /* complain_on_overflow */
910 bfd_elf_generic_reloc, /* special_function */
911 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
912 FALSE, /* partial_inplace */
913 0x1fffff, /* src_mask */
914 0x1fffff, /* dst_mask */
915 TRUE), /* pcrel_offset */
917 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
919 2, /* size (0 = byte, 1 = short, 2 = long) */
921 TRUE, /* pc_relative */
923 complain_overflow_dont, /* complain_on_overflow */
924 bfd_elf_generic_reloc, /* special_function */
925 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
926 FALSE, /* partial_inplace */
927 0x1fffff, /* src_mask */
928 0x1fffff, /* dst_mask */
929 TRUE), /* pcrel_offset */
931 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
932 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
934 2, /* size (0 = byte, 1 = short, 2 = long) */
936 FALSE, /* pc_relative */
938 complain_overflow_dont, /* complain_on_overflow */
939 bfd_elf_generic_reloc, /* special_function */
940 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
941 FALSE, /* partial_inplace */
942 0xfff, /* src_mask */
943 0xfff, /* dst_mask */
944 FALSE), /* pcrel_offset */
946 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
948 2, /* size (0 = byte, 1 = short, 2 = long) */
950 FALSE, /* pc_relative */
952 complain_overflow_dont, /* complain_on_overflow */
953 bfd_elf_generic_reloc, /* special_function */
954 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
955 FALSE, /* partial_inplace */
956 0xffff, /* src_mask */
957 0xffff, /* dst_mask */
958 FALSE), /* pcrel_offset */
960 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
962 2, /* size (0 = byte, 1 = short, 2 = long) */
964 FALSE, /* pc_relative */
966 complain_overflow_dont, /* complain_on_overflow */
967 bfd_elf_generic_reloc, /* special_function */
968 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
969 FALSE, /* partial_inplace */
970 0xffff, /* src_mask */
971 0xffff, /* dst_mask */
972 FALSE), /* pcrel_offset */
974 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
976 2, /* size (0 = byte, 1 = short, 2 = long) */
978 FALSE, /* pc_relative */
980 complain_overflow_dont, /* complain_on_overflow */
981 bfd_elf_generic_reloc, /* special_function */
982 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
983 FALSE, /* partial_inplace */
984 0x1fffff, /* src_mask */
985 0x1fffff, /* dst_mask */
986 FALSE), /* pcrel_offset */
988 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
990 2, /* size (0 = byte, 1 = short, 2 = long) */
992 FALSE, /* pc_relative */
994 complain_overflow_dont, /* complain_on_overflow */
995 bfd_elf_generic_reloc, /* special_function */
996 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
997 FALSE, /* partial_inplace */
998 0xff8, /* src_mask */
999 0xff8, /* dst_mask */
1000 FALSE), /* pcrel_offset */
1002 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
1004 2, /* size (0 = byte, 1 = short, 2 = long) */
1006 FALSE, /* pc_relative */
1008 complain_overflow_dont, /* complain_on_overflow */
1009 bfd_elf_generic_reloc, /* special_function */
1010 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
1011 FALSE, /* partial_inplace */
1012 0xffc, /* src_mask */
1013 0xffc, /* dst_mask */
1014 FALSE), /* pcrel_offset */
1016 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
1018 2, /* size (0 = byte, 1 = short, 2 = long) */
1020 FALSE, /* pc_relative */
1022 complain_overflow_dont, /* complain_on_overflow */
1023 bfd_elf_generic_reloc, /* special_function */
1024 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
1025 FALSE, /* partial_inplace */
1026 0x1ffffc, /* src_mask */
1027 0x1ffffc, /* dst_mask */
1028 FALSE), /* pcrel_offset */
1030 /* Unsigned 12 bit byte offset to module TLS base address. */
1031 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12), /* type */
1033 2, /* size (0 = byte, 1 = short, 2 = long) */
1035 FALSE, /* pc_relative */
1037 complain_overflow_unsigned, /* complain_on_overflow */
1038 bfd_elf_generic_reloc, /* special_function */
1039 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12), /* name */
1040 FALSE, /* partial_inplace */
1041 0xfff, /* src_mask */
1042 0xfff, /* dst_mask */
1043 FALSE), /* pcrel_offset */
1045 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1046 HOWTO (AARCH64_R (TLSLD_ADD_LO12_NC), /* type */
1048 2, /* size (0 = byte, 1 = short, 2 = long) */
1050 FALSE, /* pc_relative */
1052 complain_overflow_dont, /* complain_on_overflow */
1053 bfd_elf_generic_reloc, /* special_function */
1054 AARCH64_R_STR (TLSLD_ADD_LO12_NC), /* name */
1055 FALSE, /* partial_inplace */
1056 0xfff, /* src_mask */
1057 0xfff, /* dst_mask */
1058 FALSE), /* pcrel_offset */
1060 /* Get to the page for the GOT entry for the symbol
1061 (G(S) - P) using an ADRP instruction. */
1062 HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */
1063 12, /* rightshift */
1064 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 TRUE, /* pc_relative */
1068 complain_overflow_signed, /* complain_on_overflow */
1069 bfd_elf_generic_reloc, /* special_function */
1070 AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */
1071 FALSE, /* partial_inplace */
1072 0x1fffff, /* src_mask */
1073 0x1fffff, /* dst_mask */
1074 TRUE), /* pcrel_offset */
1076 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
1078 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 TRUE, /* pc_relative */
1082 complain_overflow_signed, /* complain_on_overflow */
1083 bfd_elf_generic_reloc, /* special_function */
1084 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
1085 FALSE, /* partial_inplace */
1086 0x1fffff, /* src_mask */
1087 0x1fffff, /* dst_mask */
1088 TRUE), /* pcrel_offset */
1090 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
1091 32, /* rightshift */
1092 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 FALSE, /* pc_relative */
1096 complain_overflow_unsigned, /* complain_on_overflow */
1097 bfd_elf_generic_reloc, /* special_function */
1098 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
1099 FALSE, /* partial_inplace */
1100 0xffff, /* src_mask */
1101 0xffff, /* dst_mask */
1102 FALSE), /* pcrel_offset */
1104 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
1105 16, /* rightshift */
1106 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 FALSE, /* pc_relative */
1110 complain_overflow_dont, /* complain_on_overflow */
1111 bfd_elf_generic_reloc, /* special_function */
1112 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1113 FALSE, /* partial_inplace */
1114 0xffff, /* src_mask */
1115 0xffff, /* dst_mask */
1116 FALSE), /* pcrel_offset */
1118 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1119 16, /* rightshift */
1120 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 FALSE, /* pc_relative */
1124 complain_overflow_dont, /* complain_on_overflow */
1125 bfd_elf_generic_reloc, /* special_function */
1126 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1127 FALSE, /* partial_inplace */
1128 0xffff, /* src_mask */
1129 0xffff, /* dst_mask */
1130 FALSE), /* pcrel_offset */
1132 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1134 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 FALSE, /* pc_relative */
1138 complain_overflow_dont, /* complain_on_overflow */
1139 bfd_elf_generic_reloc, /* special_function */
1140 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1141 FALSE, /* partial_inplace */
1142 0xffff, /* src_mask */
1143 0xffff, /* dst_mask */
1144 FALSE), /* pcrel_offset */
1146 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1148 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 FALSE, /* pc_relative */
1152 complain_overflow_dont, /* complain_on_overflow */
1153 bfd_elf_generic_reloc, /* special_function */
1154 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1155 FALSE, /* partial_inplace */
1156 0xffff, /* src_mask */
1157 0xffff, /* dst_mask */
1158 FALSE), /* pcrel_offset */
1160 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1161 12, /* rightshift */
1162 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 FALSE, /* pc_relative */
1166 complain_overflow_unsigned, /* complain_on_overflow */
1167 bfd_elf_generic_reloc, /* special_function */
1168 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1169 FALSE, /* partial_inplace */
1170 0xfff, /* src_mask */
1171 0xfff, /* dst_mask */
1172 FALSE), /* pcrel_offset */
1174 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1176 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 FALSE, /* pc_relative */
1180 complain_overflow_unsigned, /* complain_on_overflow */
1181 bfd_elf_generic_reloc, /* special_function */
1182 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1183 FALSE, /* partial_inplace */
1184 0xfff, /* src_mask */
1185 0xfff, /* dst_mask */
1186 FALSE), /* pcrel_offset */
1188 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1190 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 FALSE, /* pc_relative */
1194 complain_overflow_dont, /* complain_on_overflow */
1195 bfd_elf_generic_reloc, /* special_function */
1196 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1197 FALSE, /* partial_inplace */
1198 0xfff, /* src_mask */
1199 0xfff, /* dst_mask */
1200 FALSE), /* pcrel_offset */
1202 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1204 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 TRUE, /* pc_relative */
1208 complain_overflow_dont, /* complain_on_overflow */
1209 bfd_elf_generic_reloc, /* special_function */
1210 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1211 FALSE, /* partial_inplace */
1212 0x0ffffe0, /* src_mask */
1213 0x0ffffe0, /* dst_mask */
1214 TRUE), /* pcrel_offset */
1216 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1218 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 TRUE, /* pc_relative */
1222 complain_overflow_dont, /* complain_on_overflow */
1223 bfd_elf_generic_reloc, /* special_function */
1224 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1225 FALSE, /* partial_inplace */
1226 0x1fffff, /* src_mask */
1227 0x1fffff, /* dst_mask */
1228 TRUE), /* pcrel_offset */
1230 /* Get to the page for the GOT entry for the symbol
1231 (G(S) - P) using an ADRP instruction. */
1232 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1233 12, /* rightshift */
1234 2, /* size (0 = byte, 1 = short, 2 = long) */
1236 TRUE, /* pc_relative */
1238 complain_overflow_dont, /* complain_on_overflow */
1239 bfd_elf_generic_reloc, /* special_function */
1240 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1241 FALSE, /* partial_inplace */
1242 0x1fffff, /* src_mask */
1243 0x1fffff, /* dst_mask */
1244 TRUE), /* pcrel_offset */
1246 /* LD64: GOT offset G(S) & 0xff8. */
1247 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1249 2, /* size (0 = byte, 1 = short, 2 = long) */
1251 FALSE, /* pc_relative */
1253 complain_overflow_dont, /* complain_on_overflow */
1254 bfd_elf_generic_reloc, /* special_function */
1255 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1256 FALSE, /* partial_inplace */
1257 0xff8, /* src_mask */
1258 0xff8, /* dst_mask */
1259 FALSE), /* pcrel_offset */
1261 /* LD32: GOT offset G(S) & 0xffc. */
1262 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1264 2, /* size (0 = byte, 1 = short, 2 = long) */
1266 FALSE, /* pc_relative */
1268 complain_overflow_dont, /* complain_on_overflow */
1269 bfd_elf_generic_reloc, /* special_function */
1270 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1271 FALSE, /* partial_inplace */
1272 0xffc, /* src_mask */
1273 0xffc, /* dst_mask */
1274 FALSE), /* pcrel_offset */
1276 /* ADD: GOT offset G(S) & 0xfff. */
1277 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1279 2, /* size (0 = byte, 1 = short, 2 = long) */
1281 FALSE, /* pc_relative */
1283 complain_overflow_dont, /* complain_on_overflow */
1284 bfd_elf_generic_reloc, /* special_function */
1285 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1286 FALSE, /* partial_inplace */
1287 0xfff, /* src_mask */
1288 0xfff, /* dst_mask */
1289 FALSE), /* pcrel_offset */
1291 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1292 16, /* rightshift */
1293 2, /* size (0 = byte, 1 = short, 2 = long) */
1295 FALSE, /* pc_relative */
1297 complain_overflow_dont, /* complain_on_overflow */
1298 bfd_elf_generic_reloc, /* special_function */
1299 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1300 FALSE, /* partial_inplace */
1301 0xffff, /* src_mask */
1302 0xffff, /* dst_mask */
1303 FALSE), /* pcrel_offset */
1305 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1307 2, /* size (0 = byte, 1 = short, 2 = long) */
1309 FALSE, /* pc_relative */
1311 complain_overflow_dont, /* complain_on_overflow */
1312 bfd_elf_generic_reloc, /* special_function */
1313 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1314 FALSE, /* partial_inplace */
1315 0xffff, /* src_mask */
1316 0xffff, /* dst_mask */
1317 FALSE), /* pcrel_offset */
1319 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1321 2, /* size (0 = byte, 1 = short, 2 = long) */
1323 FALSE, /* pc_relative */
1325 complain_overflow_dont, /* complain_on_overflow */
1326 bfd_elf_generic_reloc, /* special_function */
1327 AARCH64_R_STR (TLSDESC_LDR), /* name */
1328 FALSE, /* partial_inplace */
1331 FALSE), /* pcrel_offset */
1333 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1335 2, /* size (0 = byte, 1 = short, 2 = long) */
1337 FALSE, /* pc_relative */
1339 complain_overflow_dont, /* complain_on_overflow */
1340 bfd_elf_generic_reloc, /* special_function */
1341 AARCH64_R_STR (TLSDESC_ADD), /* name */
1342 FALSE, /* partial_inplace */
1345 FALSE), /* pcrel_offset */
1347 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1349 2, /* size (0 = byte, 1 = short, 2 = long) */
1351 FALSE, /* pc_relative */
1353 complain_overflow_dont, /* complain_on_overflow */
1354 bfd_elf_generic_reloc, /* special_function */
1355 AARCH64_R_STR (TLSDESC_CALL), /* name */
1356 FALSE, /* partial_inplace */
1359 FALSE), /* pcrel_offset */
1361 HOWTO (AARCH64_R (COPY), /* type */
1363 2, /* size (0 = byte, 1 = short, 2 = long) */
1365 FALSE, /* pc_relative */
1367 complain_overflow_bitfield, /* complain_on_overflow */
1368 bfd_elf_generic_reloc, /* special_function */
1369 AARCH64_R_STR (COPY), /* name */
1370 TRUE, /* partial_inplace */
1371 0xffffffff, /* src_mask */
1372 0xffffffff, /* dst_mask */
1373 FALSE), /* pcrel_offset */
1375 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1377 2, /* size (0 = byte, 1 = short, 2 = long) */
1379 FALSE, /* pc_relative */
1381 complain_overflow_bitfield, /* complain_on_overflow */
1382 bfd_elf_generic_reloc, /* special_function */
1383 AARCH64_R_STR (GLOB_DAT), /* name */
1384 TRUE, /* partial_inplace */
1385 0xffffffff, /* src_mask */
1386 0xffffffff, /* dst_mask */
1387 FALSE), /* pcrel_offset */
1389 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1391 2, /* size (0 = byte, 1 = short, 2 = long) */
1393 FALSE, /* pc_relative */
1395 complain_overflow_bitfield, /* complain_on_overflow */
1396 bfd_elf_generic_reloc, /* special_function */
1397 AARCH64_R_STR (JUMP_SLOT), /* name */
1398 TRUE, /* partial_inplace */
1399 0xffffffff, /* src_mask */
1400 0xffffffff, /* dst_mask */
1401 FALSE), /* pcrel_offset */
1403 HOWTO (AARCH64_R (RELATIVE), /* type */
1405 2, /* size (0 = byte, 1 = short, 2 = long) */
1407 FALSE, /* pc_relative */
1409 complain_overflow_bitfield, /* complain_on_overflow */
1410 bfd_elf_generic_reloc, /* special_function */
1411 AARCH64_R_STR (RELATIVE), /* name */
1412 TRUE, /* partial_inplace */
1413 ALL_ONES, /* src_mask */
1414 ALL_ONES, /* dst_mask */
1415 FALSE), /* pcrel_offset */
1417 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1419 2, /* size (0 = byte, 1 = short, 2 = long) */
1421 FALSE, /* pc_relative */
1423 complain_overflow_dont, /* complain_on_overflow */
1424 bfd_elf_generic_reloc, /* special_function */
1426 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1428 AARCH64_R_STR (TLS_DTPMOD), /* name */
1430 FALSE, /* partial_inplace */
1432 ALL_ONES, /* dst_mask */
1433 FALSE), /* pc_reloffset */
1435 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1437 2, /* size (0 = byte, 1 = short, 2 = long) */
1439 FALSE, /* pc_relative */
1441 complain_overflow_dont, /* complain_on_overflow */
1442 bfd_elf_generic_reloc, /* special_function */
1444 AARCH64_R_STR (TLS_DTPREL64), /* name */
1446 AARCH64_R_STR (TLS_DTPREL), /* name */
1448 FALSE, /* partial_inplace */
1450 ALL_ONES, /* dst_mask */
1451 FALSE), /* pcrel_offset */
1453 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1455 2, /* size (0 = byte, 1 = short, 2 = long) */
1457 FALSE, /* pc_relative */
1459 complain_overflow_dont, /* complain_on_overflow */
1460 bfd_elf_generic_reloc, /* special_function */
1462 AARCH64_R_STR (TLS_TPREL64), /* name */
1464 AARCH64_R_STR (TLS_TPREL), /* name */
1466 FALSE, /* partial_inplace */
1468 ALL_ONES, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1471 HOWTO (AARCH64_R (TLSDESC), /* type */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 FALSE, /* pc_relative */
1477 complain_overflow_dont, /* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 AARCH64_R_STR (TLSDESC), /* name */
1480 FALSE, /* partial_inplace */
1482 ALL_ONES, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1485 HOWTO (AARCH64_R (IRELATIVE), /* type */
1487 2, /* size (0 = byte, 1 = short, 2 = long) */
1489 FALSE, /* pc_relative */
1491 complain_overflow_bitfield, /* complain_on_overflow */
1492 bfd_elf_generic_reloc, /* special_function */
1493 AARCH64_R_STR (IRELATIVE), /* name */
1494 FALSE, /* partial_inplace */
1496 ALL_ONES, /* dst_mask */
1497 FALSE), /* pcrel_offset */
1502 static reloc_howto_type elfNN_aarch64_howto_none =
1503 HOWTO (R_AARCH64_NONE, /* type */
1505 3, /* size (0 = byte, 1 = short, 2 = long) */
1507 FALSE, /* pc_relative */
1509 complain_overflow_dont,/* complain_on_overflow */
1510 bfd_elf_generic_reloc, /* special_function */
1511 "R_AARCH64_NONE", /* name */
1512 FALSE, /* partial_inplace */
1515 FALSE); /* pcrel_offset */
1517 /* Given HOWTO, return the bfd internal relocation enumerator. */
1519 static bfd_reloc_code_real_type
1520 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1523 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1524 const ptrdiff_t offset
1525 = howto - elfNN_aarch64_howto_table;
1527 if (offset > 0 && offset < size - 1)
1528 return BFD_RELOC_AARCH64_RELOC_START + offset;
1530 if (howto == &elfNN_aarch64_howto_none)
1531 return BFD_RELOC_AARCH64_NONE;
1533 return BFD_RELOC_AARCH64_RELOC_START;
1536 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1538 static bfd_reloc_code_real_type
1539 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1541 static bfd_boolean initialized_p = FALSE;
1542 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1543 static unsigned int offsets[R_AARCH64_end];
1545 if (initialized_p == FALSE)
1549 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1550 if (elfNN_aarch64_howto_table[i].type != 0)
1551 offsets[elfNN_aarch64_howto_table[i].type] = i;
1553 initialized_p = TRUE;
1556 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1557 return BFD_RELOC_AARCH64_NONE;
1559 /* PR 17512: file: b371e70a. */
1560 if (r_type >= R_AARCH64_end)
1562 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1563 bfd_set_error (bfd_error_bad_value);
1564 return BFD_RELOC_AARCH64_NONE;
1567 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1570 struct elf_aarch64_reloc_map
1572 bfd_reloc_code_real_type from;
1573 bfd_reloc_code_real_type to;
1576 /* Map bfd generic reloc to AArch64-specific reloc. */
1577 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1579 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1581 /* Basic data relocations. */
1582 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1583 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1584 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1585 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1586 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1587 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1588 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1591 /* Given the bfd internal relocation enumerator in CODE, return the
1592 corresponding howto entry. */
1594 static reloc_howto_type *
1595 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1599 /* Convert bfd generic reloc to AArch64-specific reloc. */
1600 if (code < BFD_RELOC_AARCH64_RELOC_START
1601 || code > BFD_RELOC_AARCH64_RELOC_END)
1602 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1603 if (elf_aarch64_reloc_map[i].from == code)
1605 code = elf_aarch64_reloc_map[i].to;
1609 if (code > BFD_RELOC_AARCH64_RELOC_START
1610 && code < BFD_RELOC_AARCH64_RELOC_END)
1611 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1612 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1614 if (code == BFD_RELOC_AARCH64_NONE)
1615 return &elfNN_aarch64_howto_none;
1620 static reloc_howto_type *
1621 elfNN_aarch64_howto_from_type (unsigned int r_type)
1623 bfd_reloc_code_real_type val;
1624 reloc_howto_type *howto;
1629 bfd_set_error (bfd_error_bad_value);
1634 if (r_type == R_AARCH64_NONE)
1635 return &elfNN_aarch64_howto_none;
1637 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1638 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1643 bfd_set_error (bfd_error_bad_value);
1648 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1649 Elf_Internal_Rela *elf_reloc)
1651 unsigned int r_type;
1653 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1654 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1657 static reloc_howto_type *
1658 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1659 bfd_reloc_code_real_type code)
1661 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1666 bfd_set_error (bfd_error_bad_value);
1670 static reloc_howto_type *
1671 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1676 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1677 if (elfNN_aarch64_howto_table[i].name != NULL
1678 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1679 return &elfNN_aarch64_howto_table[i];
1684 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1685 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1686 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1687 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1689 /* The linker script knows the section names for placement.
1690 The entry_names are used to do simple name mangling on the stubs.
1691 Given a function name, and its type, the stub can be found. The
1692 name can be changed. The only requirement is the %s be present. */
1693 #define STUB_ENTRY_NAME "__%s_veneer"
1695 /* The name of the dynamic interpreter. This is put in the .interp
1697 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1699 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1700 (((1 << 25) - 1) << 2)
1701 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1704 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1705 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1708 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1710 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1711 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1715 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1717 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1718 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1719 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1722 static const uint32_t aarch64_adrp_branch_stub [] =
1724 0x90000010, /* adrp ip0, X */
1725 /* R_AARCH64_ADR_HI21_PCREL(X) */
1726 0x91000210, /* add ip0, ip0, :lo12:X */
1727 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1728 0xd61f0200, /* br ip0 */
1731 static const uint32_t aarch64_long_branch_stub[] =
1734 0x58000090, /* ldr ip0, 1f */
1736 0x18000090, /* ldr wip0, 1f */
1738 0x10000011, /* adr ip1, #0 */
1739 0x8b110210, /* add ip0, ip0, ip1 */
1740 0xd61f0200, /* br ip0 */
1741 0x00000000, /* 1: .xword or .word
1742 R_AARCH64_PRELNN(X) + 12
1747 static const uint32_t aarch64_erratum_835769_stub[] =
1749 0x00000000, /* Placeholder for multiply accumulate. */
1750 0x14000000, /* b <label> */
1753 static const uint32_t aarch64_erratum_843419_stub[] =
1755 0x00000000, /* Placeholder for LDR instruction. */
1756 0x14000000, /* b <label> */
1759 /* Section name for stubs is the associated section name plus this
1761 #define STUB_SUFFIX ".stub"
1763 enum elf_aarch64_stub_type
1766 aarch64_stub_adrp_branch,
1767 aarch64_stub_long_branch,
1768 aarch64_stub_erratum_835769_veneer,
1769 aarch64_stub_erratum_843419_veneer,
1772 struct elf_aarch64_stub_hash_entry
1774 /* Base hash table entry structure. */
1775 struct bfd_hash_entry root;
1777 /* The stub section. */
1780 /* Offset within stub_sec of the beginning of this stub. */
1781 bfd_vma stub_offset;
1783 /* Given the symbol's value and its section we can determine its final
1784 value when building the stubs (so the stub knows where to jump). */
1785 bfd_vma target_value;
1786 asection *target_section;
1788 enum elf_aarch64_stub_type stub_type;
1790 /* The symbol table entry, if any, that this was derived from. */
1791 struct elf_aarch64_link_hash_entry *h;
1793 /* Destination symbol type */
1794 unsigned char st_type;
1796 /* Where this stub is being called from, or, in the case of combined
1797 stub sections, the first input section in the group. */
1800 /* The name for the local symbol at the start of this stub. The
1801 stub name in the hash table has to be unique; this does not, so
1802 it can be friendlier. */
1805 /* The instruction which caused this stub to be generated (only valid for
1806 erratum 835769 workaround stubs at present). */
1807 uint32_t veneered_insn;
1809 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
1810 bfd_vma adrp_offset;
1813 /* Used to build a map of a section. This is required for mixed-endian
1816 typedef struct elf_elf_section_map
1821 elf_aarch64_section_map;
1824 typedef struct _aarch64_elf_section_data
1826 struct bfd_elf_section_data elf;
1827 unsigned int mapcount;
1828 unsigned int mapsize;
1829 elf_aarch64_section_map *map;
1831 _aarch64_elf_section_data;
1833 #define elf_aarch64_section_data(sec) \
1834 ((_aarch64_elf_section_data *) elf_section_data (sec))
1836 /* The size of the thread control block which is defined to be two pointers. */
1837 #define TCB_SIZE (ARCH_SIZE/8)*2
1839 struct elf_aarch64_local_symbol
1841 unsigned int got_type;
1842 bfd_signed_vma got_refcount;
1845 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1846 offset is from the end of the jump table and reserved entries
1849 The magic value (bfd_vma) -1 indicates that an offset has not be
1851 bfd_vma tlsdesc_got_jump_table_offset;
1854 struct elf_aarch64_obj_tdata
1856 struct elf_obj_tdata root;
1858 /* local symbol descriptors */
1859 struct elf_aarch64_local_symbol *locals;
1861 /* Zero to warn when linking objects with incompatible enum sizes. */
1862 int no_enum_size_warning;
1864 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1865 int no_wchar_size_warning;
1868 #define elf_aarch64_tdata(bfd) \
1869 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1871 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1873 #define is_aarch64_elf(bfd) \
1874 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1875 && elf_tdata (bfd) != NULL \
1876 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1879 elfNN_aarch64_mkobject (bfd *abfd)
1881 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1885 #define elf_aarch64_hash_entry(ent) \
1886 ((struct elf_aarch64_link_hash_entry *)(ent))
1888 #define GOT_UNKNOWN 0
1889 #define GOT_NORMAL 1
1890 #define GOT_TLS_GD 2
1891 #define GOT_TLS_IE 4
1892 #define GOT_TLSDESC_GD 8
1894 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1896 /* AArch64 ELF linker hash entry. */
1897 struct elf_aarch64_link_hash_entry
1899 struct elf_link_hash_entry root;
1901 /* Track dynamic relocs copied for this symbol. */
1902 struct elf_dyn_relocs *dyn_relocs;
1904 /* Since PLT entries have variable size, we need to record the
1905 index into .got.plt instead of recomputing it from the PLT
1907 bfd_signed_vma plt_got_offset;
1909 /* Bit mask representing the type of GOT entry(s) if any required by
1911 unsigned int got_type;
1913 /* A pointer to the most recently used stub hash entry against this
1915 struct elf_aarch64_stub_hash_entry *stub_cache;
1917 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1918 is from the end of the jump table and reserved entries within the PLTGOT.
1920 The magic value (bfd_vma) -1 indicates that an offset has not
1922 bfd_vma tlsdesc_got_jump_table_offset;
1926 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1928 unsigned long r_symndx)
1931 return elf_aarch64_hash_entry (h)->got_type;
1933 if (! elf_aarch64_locals (abfd))
1936 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1939 /* Get the AArch64 elf linker hash table from a link_info structure. */
1940 #define elf_aarch64_hash_table(info) \
1941 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1943 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1944 ((struct elf_aarch64_stub_hash_entry *) \
1945 bfd_hash_lookup ((table), (string), (create), (copy)))
1947 /* AArch64 ELF linker hash table. */
1948 struct elf_aarch64_link_hash_table
1950 /* The main hash table. */
1951 struct elf_link_hash_table root;
1953 /* Nonzero to force PIC branch veneers. */
1956 /* Fix erratum 835769. */
1957 int fix_erratum_835769;
1959 /* Fix erratum 843419. */
1960 int fix_erratum_843419;
1962 /* Enable ADRP->ADR rewrite for erratum 843419 workaround. */
1963 int fix_erratum_843419_adr;
1965 /* The number of bytes in the initial entry in the PLT. */
1966 bfd_size_type plt_header_size;
1968 /* The number of bytes in the subsequent PLT etries. */
1969 bfd_size_type plt_entry_size;
1971 /* Short-cuts to get to dynamic linker sections. */
1975 /* Small local sym cache. */
1976 struct sym_cache sym_cache;
1978 /* For convenience in allocate_dynrelocs. */
1981 /* The amount of space used by the reserved portion of the sgotplt
1982 section, plus whatever space is used by the jump slots. */
1983 bfd_vma sgotplt_jump_table_size;
1985 /* The stub hash table. */
1986 struct bfd_hash_table stub_hash_table;
1988 /* Linker stub bfd. */
1991 /* Linker call-backs. */
1992 asection *(*add_stub_section) (const char *, asection *);
1993 void (*layout_sections_again) (void);
1995 /* Array to keep track of which stub sections have been created, and
1996 information on stub grouping. */
1999 /* This is the section to which stubs in the group will be
2002 /* The stub section. */
2006 /* Assorted information used by elfNN_aarch64_size_stubs. */
2007 unsigned int bfd_count;
2009 asection **input_list;
2011 /* The offset into splt of the PLT entry for the TLS descriptor
2012 resolver. Special values are 0, if not necessary (or not found
2013 to be necessary yet), and -1 if needed but not determined
2015 bfd_vma tlsdesc_plt;
2017 /* The GOT offset for the lazy trampoline. Communicated to the
2018 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
2019 indicates an offset is not allocated. */
2020 bfd_vma dt_tlsdesc_got;
2022 /* Used by local STT_GNU_IFUNC symbols. */
2023 htab_t loc_hash_table;
2024 void * loc_hash_memory;
2027 /* Create an entry in an AArch64 ELF linker hash table. */
2029 static struct bfd_hash_entry *
2030 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
2031 struct bfd_hash_table *table,
2034 struct elf_aarch64_link_hash_entry *ret =
2035 (struct elf_aarch64_link_hash_entry *) entry;
2037 /* Allocate the structure if it has not already been allocated by a
2040 ret = bfd_hash_allocate (table,
2041 sizeof (struct elf_aarch64_link_hash_entry));
2043 return (struct bfd_hash_entry *) ret;
2045 /* Call the allocation method of the superclass. */
2046 ret = ((struct elf_aarch64_link_hash_entry *)
2047 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2051 ret->dyn_relocs = NULL;
2052 ret->got_type = GOT_UNKNOWN;
2053 ret->plt_got_offset = (bfd_vma) - 1;
2054 ret->stub_cache = NULL;
2055 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
2058 return (struct bfd_hash_entry *) ret;
2061 /* Initialize an entry in the stub hash table. */
2063 static struct bfd_hash_entry *
2064 stub_hash_newfunc (struct bfd_hash_entry *entry,
2065 struct bfd_hash_table *table, const char *string)
2067 /* Allocate the structure if it has not already been allocated by a
2071 entry = bfd_hash_allocate (table,
2073 elf_aarch64_stub_hash_entry));
2078 /* Call the allocation method of the superclass. */
2079 entry = bfd_hash_newfunc (entry, table, string);
2082 struct elf_aarch64_stub_hash_entry *eh;
2084 /* Initialize the local fields. */
2085 eh = (struct elf_aarch64_stub_hash_entry *) entry;
2086 eh->adrp_offset = 0;
2087 eh->stub_sec = NULL;
2088 eh->stub_offset = 0;
2089 eh->target_value = 0;
2090 eh->target_section = NULL;
2091 eh->stub_type = aarch64_stub_none;
2099 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
2100 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2101 as global symbol. We reuse indx and dynstr_index for local symbol
2102 hash since they aren't used by global symbols in this backend. */
2105 elfNN_aarch64_local_htab_hash (const void *ptr)
2107 struct elf_link_hash_entry *h
2108 = (struct elf_link_hash_entry *) ptr;
2109 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2112 /* Compare local hash entries. */
2115 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2117 struct elf_link_hash_entry *h1
2118 = (struct elf_link_hash_entry *) ptr1;
2119 struct elf_link_hash_entry *h2
2120 = (struct elf_link_hash_entry *) ptr2;
2122 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2125 /* Find and/or create a hash entry for local symbol. */
2127 static struct elf_link_hash_entry *
2128 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2129 bfd *abfd, const Elf_Internal_Rela *rel,
2132 struct elf_aarch64_link_hash_entry e, *ret;
2133 asection *sec = abfd->sections;
2134 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2135 ELFNN_R_SYM (rel->r_info));
2138 e.root.indx = sec->id;
2139 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2140 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2141 create ? INSERT : NO_INSERT);
2148 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2152 ret = (struct elf_aarch64_link_hash_entry *)
2153 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2154 sizeof (struct elf_aarch64_link_hash_entry));
2157 memset (ret, 0, sizeof (*ret));
2158 ret->root.indx = sec->id;
2159 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2160 ret->root.dynindx = -1;
2166 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2169 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2170 struct elf_link_hash_entry *dir,
2171 struct elf_link_hash_entry *ind)
2173 struct elf_aarch64_link_hash_entry *edir, *eind;
2175 edir = (struct elf_aarch64_link_hash_entry *) dir;
2176 eind = (struct elf_aarch64_link_hash_entry *) ind;
2178 if (eind->dyn_relocs != NULL)
2180 if (edir->dyn_relocs != NULL)
2182 struct elf_dyn_relocs **pp;
2183 struct elf_dyn_relocs *p;
2185 /* Add reloc counts against the indirect sym to the direct sym
2186 list. Merge any entries against the same section. */
2187 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2189 struct elf_dyn_relocs *q;
2191 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2192 if (q->sec == p->sec)
2194 q->pc_count += p->pc_count;
2195 q->count += p->count;
2202 *pp = edir->dyn_relocs;
2205 edir->dyn_relocs = eind->dyn_relocs;
2206 eind->dyn_relocs = NULL;
2209 if (ind->root.type == bfd_link_hash_indirect)
2211 /* Copy over PLT info. */
2212 if (dir->got.refcount <= 0)
2214 edir->got_type = eind->got_type;
2215 eind->got_type = GOT_UNKNOWN;
2219 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2222 /* Destroy an AArch64 elf linker hash table. */
2225 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2227 struct elf_aarch64_link_hash_table *ret
2228 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2230 if (ret->loc_hash_table)
2231 htab_delete (ret->loc_hash_table);
2232 if (ret->loc_hash_memory)
2233 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2235 bfd_hash_table_free (&ret->stub_hash_table);
2236 _bfd_elf_link_hash_table_free (obfd);
2239 /* Create an AArch64 elf linker hash table. */
2241 static struct bfd_link_hash_table *
2242 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2244 struct elf_aarch64_link_hash_table *ret;
2245 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2247 ret = bfd_zmalloc (amt);
2251 if (!_bfd_elf_link_hash_table_init
2252 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2253 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2259 ret->plt_header_size = PLT_ENTRY_SIZE;
2260 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2262 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2264 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2265 sizeof (struct elf_aarch64_stub_hash_entry)))
2267 _bfd_elf_link_hash_table_free (abfd);
2271 ret->loc_hash_table = htab_try_create (1024,
2272 elfNN_aarch64_local_htab_hash,
2273 elfNN_aarch64_local_htab_eq,
2275 ret->loc_hash_memory = objalloc_create ();
2276 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2278 elfNN_aarch64_link_hash_table_free (abfd);
2281 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2283 return &ret->root.root;
2287 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2288 bfd_vma offset, bfd_vma value)
2290 reloc_howto_type *howto;
2293 howto = elfNN_aarch64_howto_from_type (r_type);
2294 place = (input_section->output_section->vma + input_section->output_offset
2297 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2298 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2299 return _bfd_aarch64_elf_put_addend (input_bfd,
2300 input_section->contents + offset, r_type,
2304 static enum elf_aarch64_stub_type
2305 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2307 if (aarch64_valid_for_adrp_p (value, place))
2308 return aarch64_stub_adrp_branch;
2309 return aarch64_stub_long_branch;
2312 /* Determine the type of stub needed, if any, for a call. */
2314 static enum elf_aarch64_stub_type
2315 aarch64_type_of_stub (struct bfd_link_info *info,
2316 asection *input_sec,
2317 const Elf_Internal_Rela *rel,
2318 unsigned char st_type,
2319 struct elf_aarch64_link_hash_entry *hash,
2320 bfd_vma destination)
2323 bfd_signed_vma branch_offset;
2324 unsigned int r_type;
2325 struct elf_aarch64_link_hash_table *globals;
2326 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2327 bfd_boolean via_plt_p;
2329 if (st_type != STT_FUNC)
2332 globals = elf_aarch64_hash_table (info);
2333 via_plt_p = (globals->root.splt != NULL && hash != NULL
2334 && hash->root.plt.offset != (bfd_vma) - 1);
2339 /* Determine where the call point is. */
2340 location = (input_sec->output_offset
2341 + input_sec->output_section->vma + rel->r_offset);
2343 branch_offset = (bfd_signed_vma) (destination - location);
2345 r_type = ELFNN_R_TYPE (rel->r_info);
2347 /* We don't want to redirect any old unconditional jump in this way,
2348 only one which is being used for a sibcall, where it is
2349 acceptable for the IP0 and IP1 registers to be clobbered. */
2350 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2351 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2352 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2354 stub_type = aarch64_stub_long_branch;
2360 /* Build a name for an entry in the stub hash table. */
2363 elfNN_aarch64_stub_name (const asection *input_section,
2364 const asection *sym_sec,
2365 const struct elf_aarch64_link_hash_entry *hash,
2366 const Elf_Internal_Rela *rel)
2373 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2374 stub_name = bfd_malloc (len);
2375 if (stub_name != NULL)
2376 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2377 (unsigned int) input_section->id,
2378 hash->root.root.root.string,
2383 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2384 stub_name = bfd_malloc (len);
2385 if (stub_name != NULL)
2386 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2387 (unsigned int) input_section->id,
2388 (unsigned int) sym_sec->id,
2389 (unsigned int) ELFNN_R_SYM (rel->r_info),
2396 /* Look up an entry in the stub hash. Stub entries are cached because
2397 creating the stub name takes a bit of time. */
2399 static struct elf_aarch64_stub_hash_entry *
2400 elfNN_aarch64_get_stub_entry (const asection *input_section,
2401 const asection *sym_sec,
2402 struct elf_link_hash_entry *hash,
2403 const Elf_Internal_Rela *rel,
2404 struct elf_aarch64_link_hash_table *htab)
2406 struct elf_aarch64_stub_hash_entry *stub_entry;
2407 struct elf_aarch64_link_hash_entry *h =
2408 (struct elf_aarch64_link_hash_entry *) hash;
2409 const asection *id_sec;
2411 if ((input_section->flags & SEC_CODE) == 0)
2414 /* If this input section is part of a group of sections sharing one
2415 stub section, then use the id of the first section in the group.
2416 Stub names need to include a section id, as there may well be
2417 more than one stub used to reach say, printf, and we need to
2418 distinguish between them. */
2419 id_sec = htab->stub_group[input_section->id].link_sec;
2421 if (h != NULL && h->stub_cache != NULL
2422 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2424 stub_entry = h->stub_cache;
2430 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2431 if (stub_name == NULL)
2434 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2435 stub_name, FALSE, FALSE);
2437 h->stub_cache = stub_entry;
2446 /* Create a stub section. */
2449 _bfd_aarch64_create_stub_section (asection *section,
2450 struct elf_aarch64_link_hash_table *htab)
2456 namelen = strlen (section->name);
2457 len = namelen + sizeof (STUB_SUFFIX);
2458 s_name = bfd_alloc (htab->stub_bfd, len);
2462 memcpy (s_name, section->name, namelen);
2463 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2464 return (*htab->add_stub_section) (s_name, section);
2468 /* Find or create a stub section for a link section.
2470 Fix or create the stub section used to collect stubs attached to
2471 the specified link section. */
2474 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
2475 struct elf_aarch64_link_hash_table *htab)
2477 if (htab->stub_group[link_section->id].stub_sec == NULL)
2478 htab->stub_group[link_section->id].stub_sec
2479 = _bfd_aarch64_create_stub_section (link_section, htab);
2480 return htab->stub_group[link_section->id].stub_sec;
2484 /* Find or create a stub section in the stub group for an input
2488 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2489 struct elf_aarch64_link_hash_table *htab)
2491 asection *link_sec = htab->stub_group[section->id].link_sec;
2492 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
2496 /* Add a new stub entry in the stub group associated with an input
2497 section to the stub hash. Not all fields of the new stub entry are
2500 static struct elf_aarch64_stub_hash_entry *
2501 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2503 struct elf_aarch64_link_hash_table *htab)
2507 struct elf_aarch64_stub_hash_entry *stub_entry;
2509 link_sec = htab->stub_group[section->id].link_sec;
2510 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2512 /* Enter this entry into the linker stub hash table. */
2513 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2515 if (stub_entry == NULL)
2517 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2518 section->owner, stub_name);
2522 stub_entry->stub_sec = stub_sec;
2523 stub_entry->stub_offset = 0;
2524 stub_entry->id_sec = link_sec;
2529 /* Add a new stub entry in the final stub section to the stub hash.
2530 Not all fields of the new stub entry are initialised. */
2532 static struct elf_aarch64_stub_hash_entry *
2533 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
2534 asection *link_section,
2535 struct elf_aarch64_link_hash_table *htab)
2538 struct elf_aarch64_stub_hash_entry *stub_entry;
2540 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
2541 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2543 if (stub_entry == NULL)
2545 (*_bfd_error_handler) (_("cannot create stub entry %s"), stub_name);
2549 stub_entry->stub_sec = stub_sec;
2550 stub_entry->stub_offset = 0;
2551 stub_entry->id_sec = link_section;
2558 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2559 void *in_arg ATTRIBUTE_UNUSED)
2561 struct elf_aarch64_stub_hash_entry *stub_entry;
2566 bfd_vma veneered_insn_loc;
2567 bfd_vma veneer_entry_loc;
2568 bfd_signed_vma branch_offset = 0;
2569 unsigned int template_size;
2570 const uint32_t *template;
2573 /* Massage our args to the form they really have. */
2574 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2576 stub_sec = stub_entry->stub_sec;
2578 /* Make a note of the offset within the stubs for this entry. */
2579 stub_entry->stub_offset = stub_sec->size;
2580 loc = stub_sec->contents + stub_entry->stub_offset;
2582 stub_bfd = stub_sec->owner;
2584 /* This is the address of the stub destination. */
2585 sym_value = (stub_entry->target_value
2586 + stub_entry->target_section->output_offset
2587 + stub_entry->target_section->output_section->vma);
2589 if (stub_entry->stub_type == aarch64_stub_long_branch)
2591 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2592 + stub_sec->output_offset);
2594 /* See if we can relax the stub. */
2595 if (aarch64_valid_for_adrp_p (sym_value, place))
2596 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2599 switch (stub_entry->stub_type)
2601 case aarch64_stub_adrp_branch:
2602 template = aarch64_adrp_branch_stub;
2603 template_size = sizeof (aarch64_adrp_branch_stub);
2605 case aarch64_stub_long_branch:
2606 template = aarch64_long_branch_stub;
2607 template_size = sizeof (aarch64_long_branch_stub);
2609 case aarch64_stub_erratum_835769_veneer:
2610 template = aarch64_erratum_835769_stub;
2611 template_size = sizeof (aarch64_erratum_835769_stub);
2613 case aarch64_stub_erratum_843419_veneer:
2614 template = aarch64_erratum_843419_stub;
2615 template_size = sizeof (aarch64_erratum_843419_stub);
2621 for (i = 0; i < (template_size / sizeof template[0]); i++)
2623 bfd_putl32 (template[i], loc);
2627 template_size = (template_size + 7) & ~7;
2628 stub_sec->size += template_size;
2630 switch (stub_entry->stub_type)
2632 case aarch64_stub_adrp_branch:
2633 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2634 stub_entry->stub_offset, sym_value))
2635 /* The stub would not have been relaxed if the offset was out
2639 if (aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
2640 stub_entry->stub_offset + 4, sym_value))
2644 case aarch64_stub_long_branch:
2645 /* We want the value relative to the address 12 bytes back from the
2647 if (aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
2648 stub_entry->stub_offset + 16, sym_value + 12))
2652 case aarch64_stub_erratum_835769_veneer:
2653 veneered_insn_loc = stub_entry->target_section->output_section->vma
2654 + stub_entry->target_section->output_offset
2655 + stub_entry->target_value;
2656 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2657 + stub_entry->stub_sec->output_offset
2658 + stub_entry->stub_offset;
2659 branch_offset = veneered_insn_loc - veneer_entry_loc;
2660 branch_offset >>= 2;
2661 branch_offset &= 0x3ffffff;
2662 bfd_putl32 (stub_entry->veneered_insn,
2663 stub_sec->contents + stub_entry->stub_offset);
2664 bfd_putl32 (template[1] | branch_offset,
2665 stub_sec->contents + stub_entry->stub_offset + 4);
2668 case aarch64_stub_erratum_843419_veneer:
2669 if (aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
2670 stub_entry->stub_offset + 4, sym_value + 4))
2681 /* As above, but don't actually build the stub. Just bump offset so
2682 we know stub section sizes. */
2685 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2686 void *in_arg ATTRIBUTE_UNUSED)
2688 struct elf_aarch64_stub_hash_entry *stub_entry;
2691 /* Massage our args to the form they really have. */
2692 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2694 switch (stub_entry->stub_type)
2696 case aarch64_stub_adrp_branch:
2697 size = sizeof (aarch64_adrp_branch_stub);
2699 case aarch64_stub_long_branch:
2700 size = sizeof (aarch64_long_branch_stub);
2702 case aarch64_stub_erratum_835769_veneer:
2703 size = sizeof (aarch64_erratum_835769_stub);
2705 case aarch64_stub_erratum_843419_veneer:
2706 size = sizeof (aarch64_erratum_843419_stub);
2712 size = (size + 7) & ~7;
2713 stub_entry->stub_sec->size += size;
2717 /* External entry points for sizing and building linker stubs. */
2719 /* Set up various things so that we can make a list of input sections
2720 for each output section included in the link. Returns -1 on error,
2721 0 when no stubs will be needed, and 1 on success. */
2724 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2725 struct bfd_link_info *info)
2728 unsigned int bfd_count;
2729 int top_id, top_index;
2731 asection **input_list, **list;
2733 struct elf_aarch64_link_hash_table *htab =
2734 elf_aarch64_hash_table (info);
2736 if (!is_elf_hash_table (htab))
2739 /* Count the number of input BFDs and find the top input section id. */
2740 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2741 input_bfd != NULL; input_bfd = input_bfd->link.next)
2744 for (section = input_bfd->sections;
2745 section != NULL; section = section->next)
2747 if (top_id < section->id)
2748 top_id = section->id;
2751 htab->bfd_count = bfd_count;
2753 amt = sizeof (struct map_stub) * (top_id + 1);
2754 htab->stub_group = bfd_zmalloc (amt);
2755 if (htab->stub_group == NULL)
2758 /* We can't use output_bfd->section_count here to find the top output
2759 section index as some sections may have been removed, and
2760 _bfd_strip_section_from_output doesn't renumber the indices. */
2761 for (section = output_bfd->sections, top_index = 0;
2762 section != NULL; section = section->next)
2764 if (top_index < section->index)
2765 top_index = section->index;
2768 htab->top_index = top_index;
2769 amt = sizeof (asection *) * (top_index + 1);
2770 input_list = bfd_malloc (amt);
2771 htab->input_list = input_list;
2772 if (input_list == NULL)
2775 /* For sections we aren't interested in, mark their entries with a
2776 value we can check later. */
2777 list = input_list + top_index;
2779 *list = bfd_abs_section_ptr;
2780 while (list-- != input_list);
2782 for (section = output_bfd->sections;
2783 section != NULL; section = section->next)
2785 if ((section->flags & SEC_CODE) != 0)
2786 input_list[section->index] = NULL;
2792 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2793 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2795 /* The linker repeatedly calls this function for each input section,
2796 in the order that input sections are linked into output sections.
2797 Build lists of input sections to determine groupings between which
2798 we may insert linker stubs. */
2801 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2803 struct elf_aarch64_link_hash_table *htab =
2804 elf_aarch64_hash_table (info);
2806 if (isec->output_section->index <= htab->top_index)
2808 asection **list = htab->input_list + isec->output_section->index;
2810 if (*list != bfd_abs_section_ptr)
2812 /* Steal the link_sec pointer for our list. */
2813 /* This happens to make the list in reverse order,
2814 which is what we want. */
2815 PREV_SEC (isec) = *list;
2821 /* See whether we can group stub sections together. Grouping stub
2822 sections may result in fewer stubs. More importantly, we need to
2823 put all .init* and .fini* stubs at the beginning of the .init or
2824 .fini output sections respectively, because glibc splits the
2825 _init and _fini functions into multiple parts. Putting a stub in
2826 the middle of a function is not a good idea. */
2829 group_sections (struct elf_aarch64_link_hash_table *htab,
2830 bfd_size_type stub_group_size,
2831 bfd_boolean stubs_always_before_branch)
2833 asection **list = htab->input_list + htab->top_index;
2837 asection *tail = *list;
2839 if (tail == bfd_abs_section_ptr)
2842 while (tail != NULL)
2846 bfd_size_type total;
2850 while ((prev = PREV_SEC (curr)) != NULL
2851 && ((total += curr->output_offset - prev->output_offset)
2855 /* OK, the size from the start of CURR to the end is less
2856 than stub_group_size and thus can be handled by one stub
2857 section. (Or the tail section is itself larger than
2858 stub_group_size, in which case we may be toast.)
2859 We should really be keeping track of the total size of
2860 stubs added here, as stubs contribute to the final output
2864 prev = PREV_SEC (tail);
2865 /* Set up this stub group. */
2866 htab->stub_group[tail->id].link_sec = curr;
2868 while (tail != curr && (tail = prev) != NULL);
2870 /* But wait, there's more! Input sections up to stub_group_size
2871 bytes before the stub section can be handled by it too. */
2872 if (!stubs_always_before_branch)
2876 && ((total += tail->output_offset - prev->output_offset)
2880 prev = PREV_SEC (tail);
2881 htab->stub_group[tail->id].link_sec = curr;
2887 while (list-- != htab->input_list);
2889 free (htab->input_list);
2894 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2896 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2897 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2898 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2899 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2900 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2901 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2903 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2904 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2905 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2906 #define AARCH64_ZR 0x1f
2908 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2909 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2911 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2912 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2913 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2914 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2915 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2916 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2917 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2918 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2919 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2920 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2921 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2922 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2923 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2924 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2925 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2926 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2927 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2928 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2930 /* Classify an INSN if it is indeed a load/store.
2932 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2934 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2937 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2942 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2943 bfd_boolean *pair, bfd_boolean *load)
2951 /* Bail out quickly if INSN doesn't fall into the the load-store
2953 if (!AARCH64_LDST (insn))
2958 if (AARCH64_LDST_EX (insn))
2960 *rt = AARCH64_RT (insn);
2962 if (AARCH64_BIT (insn, 21) == 1)
2965 *rt2 = AARCH64_RT2 (insn);
2967 *load = AARCH64_LD (insn);
2970 else if (AARCH64_LDST_NAP (insn)
2971 || AARCH64_LDSTP_PI (insn)
2972 || AARCH64_LDSTP_O (insn)
2973 || AARCH64_LDSTP_PRE (insn))
2976 *rt = AARCH64_RT (insn);
2977 *rt2 = AARCH64_RT2 (insn);
2978 *load = AARCH64_LD (insn);
2981 else if (AARCH64_LDST_PCREL (insn)
2982 || AARCH64_LDST_UI (insn)
2983 || AARCH64_LDST_PIIMM (insn)
2984 || AARCH64_LDST_U (insn)
2985 || AARCH64_LDST_PREIMM (insn)
2986 || AARCH64_LDST_RO (insn)
2987 || AARCH64_LDST_UIMM (insn))
2989 *rt = AARCH64_RT (insn);
2991 if (AARCH64_LDST_PCREL (insn))
2993 opc = AARCH64_BITS (insn, 22, 2);
2994 v = AARCH64_BIT (insn, 26);
2995 opc_v = opc | (v << 2);
2996 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
2997 || opc_v == 5 || opc_v == 7);
3000 else if (AARCH64_LDST_SIMD_M (insn)
3001 || AARCH64_LDST_SIMD_M_PI (insn))
3003 *rt = AARCH64_RT (insn);
3004 *load = AARCH64_BIT (insn, 22);
3005 opcode = (insn >> 12) & 0xf;
3032 else if (AARCH64_LDST_SIMD_S (insn)
3033 || AARCH64_LDST_SIMD_S_PI (insn))
3035 *rt = AARCH64_RT (insn);
3036 r = (insn >> 21) & 1;
3037 *load = AARCH64_BIT (insn, 22);
3038 opcode = (insn >> 13) & 0x7;
3050 *rt2 = *rt + (r == 0 ? 2 : 3);
3058 *rt2 = *rt + (r == 0 ? 2 : 3);
3070 /* Return TRUE if INSN is multiply-accumulate. */
3073 aarch64_mlxl_p (uint32_t insn)
3075 uint32_t op31 = AARCH64_OP31 (insn);
3077 if (AARCH64_MAC (insn)
3078 && (op31 == 0 || op31 == 1 || op31 == 5)
3079 /* Exclude MUL instructions which are encoded as a multiple accumulate
3081 && AARCH64_RA (insn) != AARCH64_ZR)
3087 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
3088 it is possible for a 64-bit multiply-accumulate instruction to generate an
3089 incorrect result. The details are quite complex and hard to
3090 determine statically, since branches in the code may exist in some
3091 circumstances, but all cases end with a memory (load, store, or
3092 prefetch) instruction followed immediately by the multiply-accumulate
3093 operation. We employ a linker patching technique, by moving the potentially
3094 affected multiply-accumulate instruction into a patch region and replacing
3095 the original instruction with a branch to the patch. This function checks
3096 if INSN_1 is the memory operation followed by a multiply-accumulate
3097 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
3098 if INSN_1 and INSN_2 are safe. */
3101 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3111 if (aarch64_mlxl_p (insn_2)
3112 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3114 /* Any SIMD memory op is independent of the subsequent MLA
3115 by definition of the erratum. */
3116 if (AARCH64_BIT (insn_1, 26))
3119 /* If not SIMD, check for integer memory ops and MLA relationship. */
3120 rn = AARCH64_RN (insn_2);
3121 ra = AARCH64_RA (insn_2);
3122 rm = AARCH64_RM (insn_2);
3124 /* If this is a load and there's a true(RAW) dependency, we are safe
3125 and this is not an erratum sequence. */
3127 (rt == rn || rt == rm || rt == ra
3128 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3131 /* We conservatively put out stubs for all other cases (including
3139 /* Used to order a list of mapping symbols by address. */
3142 elf_aarch64_compare_mapping (const void *a, const void *b)
3144 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3145 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3147 if (amap->vma > bmap->vma)
3149 else if (amap->vma < bmap->vma)
3151 else if (amap->type > bmap->type)
3152 /* Ensure results do not depend on the host qsort for objects with
3153 multiple mapping symbols at the same address by sorting on type
3156 else if (amap->type < bmap->type)
3164 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3166 char *stub_name = (char *) bfd_malloc
3167 (strlen ("__erratum_835769_veneer_") + 16);
3168 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3172 /* Scan for Cortex-A53 erratum 835769 sequence.
3174 Return TRUE else FALSE on abnormal termination. */
3177 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3178 struct bfd_link_info *info,
3179 unsigned int *num_fixes_p)
3182 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3183 unsigned int num_fixes = *num_fixes_p;
3188 for (section = input_bfd->sections;
3190 section = section->next)
3192 bfd_byte *contents = NULL;
3193 struct _aarch64_elf_section_data *sec_data;
3196 if (elf_section_type (section) != SHT_PROGBITS
3197 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3198 || (section->flags & SEC_EXCLUDE) != 0
3199 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3200 || (section->output_section == bfd_abs_section_ptr))
3203 if (elf_section_data (section)->this_hdr.contents != NULL)
3204 contents = elf_section_data (section)->this_hdr.contents;
3205 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3208 sec_data = elf_aarch64_section_data (section);
3210 qsort (sec_data->map, sec_data->mapcount,
3211 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3213 for (span = 0; span < sec_data->mapcount; span++)
3215 unsigned int span_start = sec_data->map[span].vma;
3216 unsigned int span_end = ((span == sec_data->mapcount - 1)
3217 ? sec_data->map[0].vma + section->size
3218 : sec_data->map[span + 1].vma);
3220 char span_type = sec_data->map[span].type;
3222 if (span_type == 'd')
3225 for (i = span_start; i + 4 < span_end; i += 4)
3227 uint32_t insn_1 = bfd_getl32 (contents + i);
3228 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3230 if (aarch64_erratum_sequence (insn_1, insn_2))
3232 struct elf_aarch64_stub_hash_entry *stub_entry;
3233 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3237 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3243 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3244 stub_entry->target_section = section;
3245 stub_entry->target_value = i + 4;
3246 stub_entry->veneered_insn = insn_2;
3247 stub_entry->output_name = stub_name;
3252 if (elf_section_data (section)->this_hdr.contents == NULL)
3256 *num_fixes_p = num_fixes;
3262 /* Test if instruction INSN is ADRP. */
3265 _bfd_aarch64_adrp_p (uint32_t insn)
3267 return ((insn & 0x9f000000) == 0x90000000);
3271 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
3274 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
3282 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
3285 && AARCH64_LDST_UIMM (insn_3)
3286 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
3290 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
3292 Return TRUE if section CONTENTS at offset I contains one of the
3293 erratum 843419 sequences, otherwise return FALSE. If a sequence is
3294 seen set P_VENEER_I to the offset of the final LOAD/STORE
3295 instruction in the sequence.
3299 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
3300 bfd_vma i, bfd_vma span_end,
3301 bfd_vma *p_veneer_i)
3303 uint32_t insn_1 = bfd_getl32 (contents + i);
3305 if (!_bfd_aarch64_adrp_p (insn_1))
3308 if (span_end < i + 12)
3311 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3312 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
3314 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
3317 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
3319 *p_veneer_i = i + 8;
3323 if (span_end < i + 16)
3326 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
3328 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
3330 *p_veneer_i = i + 12;
3338 /* Resize all stub sections. */
3341 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
3345 /* OK, we've added some stubs. Find out the new size of the
3347 for (section = htab->stub_bfd->sections;
3348 section != NULL; section = section->next)
3350 /* Ignore non-stub sections. */
3351 if (!strstr (section->name, STUB_SUFFIX))
3356 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3358 for (section = htab->stub_bfd->sections;
3359 section != NULL; section = section->next)
3361 if (!strstr (section->name, STUB_SUFFIX))
3367 /* Ensure all stub sections have a size which is a multiple of
3368 4096. This is important in order to ensure that the insertion
3369 of stub sections does not in itself move existing code around
3370 in such a way that new errata sequences are created. */
3371 if (htab->fix_erratum_843419)
3373 section->size = BFD_ALIGN (section->size, 0x1000);
3378 /* Construct an erratum 843419 workaround stub name.
3382 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
3385 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
3386 char *stub_name = bfd_malloc (len);
3388 if (stub_name != NULL)
3389 snprintf (stub_name, len, "e843419@%04x_%08x_%" BFD_VMA_FMT "x",
3390 input_section->owner->id,
3396 /* Build a stub_entry structure describing an 843419 fixup.
3398 The stub_entry constructed is populated with the bit pattern INSN
3399 of the instruction located at OFFSET within input SECTION.
3401 Returns TRUE on success. */
3404 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
3405 bfd_vma adrp_offset,
3406 bfd_vma ldst_offset,
3408 struct bfd_link_info *info)
3410 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3412 struct elf_aarch64_stub_hash_entry *stub_entry;
3414 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
3415 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3423 /* We always place an 843419 workaround veneer in the stub section
3424 attached to the input section in which an erratum sequence has
3425 been found. This ensures that later in the link process (in
3426 elfNN_aarch64_write_section) when we copy the veneered
3427 instruction from the input section into the stub section the
3428 copied instruction will have had any relocations applied to it.
3429 If we placed workaround veneers in any other stub section then we
3430 could not assume that all relocations have been processed on the
3431 corresponding input section at the point we output the stub
3435 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
3436 if (stub_entry == NULL)
3442 stub_entry->adrp_offset = adrp_offset;
3443 stub_entry->target_value = ldst_offset;
3444 stub_entry->target_section = section;
3445 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
3446 stub_entry->veneered_insn = insn;
3447 stub_entry->output_name = stub_name;
3453 /* Scan an input section looking for the signature of erratum 843419.
3455 Scans input SECTION in INPUT_BFD looking for erratum 843419
3456 signatures, for each signature found a stub_entry is created
3457 describing the location of the erratum for subsequent fixup.
3459 Return TRUE on successful scan, FALSE on failure to scan.
3463 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
3464 struct bfd_link_info *info)
3466 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3471 if (elf_section_type (section) != SHT_PROGBITS
3472 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3473 || (section->flags & SEC_EXCLUDE) != 0
3474 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3475 || (section->output_section == bfd_abs_section_ptr))
3480 bfd_byte *contents = NULL;
3481 struct _aarch64_elf_section_data *sec_data;
3484 if (elf_section_data (section)->this_hdr.contents != NULL)
3485 contents = elf_section_data (section)->this_hdr.contents;
3486 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3489 sec_data = elf_aarch64_section_data (section);
3491 qsort (sec_data->map, sec_data->mapcount,
3492 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3494 for (span = 0; span < sec_data->mapcount; span++)
3496 unsigned int span_start = sec_data->map[span].vma;
3497 unsigned int span_end = ((span == sec_data->mapcount - 1)
3498 ? sec_data->map[0].vma + section->size
3499 : sec_data->map[span + 1].vma);
3501 char span_type = sec_data->map[span].type;
3503 if (span_type == 'd')
3506 for (i = span_start; i + 8 < span_end; i += 4)
3508 bfd_vma vma = (section->output_section->vma
3509 + section->output_offset
3513 if (_bfd_aarch64_erratum_843419_p
3514 (contents, vma, i, span_end, &veneer_i))
3516 uint32_t insn = bfd_getl32 (contents + veneer_i);
3518 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
3525 if (elf_section_data (section)->this_hdr.contents == NULL)
3534 /* Determine and set the size of the stub section for a final link.
3536 The basic idea here is to examine all the relocations looking for
3537 PC-relative calls to a target that is unreachable with a "bl"
3541 elfNN_aarch64_size_stubs (bfd *output_bfd,
3543 struct bfd_link_info *info,
3544 bfd_signed_vma group_size,
3545 asection * (*add_stub_section) (const char *,
3547 void (*layout_sections_again) (void))
3549 bfd_size_type stub_group_size;
3550 bfd_boolean stubs_always_before_branch;
3551 bfd_boolean stub_changed = FALSE;
3552 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3553 unsigned int num_erratum_835769_fixes = 0;
3555 /* Propagate mach to stub bfd, because it may not have been
3556 finalized when we created stub_bfd. */
3557 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3558 bfd_get_mach (output_bfd));
3560 /* Stash our params away. */
3561 htab->stub_bfd = stub_bfd;
3562 htab->add_stub_section = add_stub_section;
3563 htab->layout_sections_again = layout_sections_again;
3564 stubs_always_before_branch = group_size < 0;
3566 stub_group_size = -group_size;
3568 stub_group_size = group_size;
3570 if (stub_group_size == 1)
3572 /* Default values. */
3573 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3574 stub_group_size = 127 * 1024 * 1024;
3577 group_sections (htab, stub_group_size, stubs_always_before_branch);
3579 (*htab->layout_sections_again) ();
3581 if (htab->fix_erratum_835769)
3585 for (input_bfd = info->input_bfds;
3586 input_bfd != NULL; input_bfd = input_bfd->link.next)
3587 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
3588 &num_erratum_835769_fixes))
3591 _bfd_aarch64_resize_stubs (htab);
3592 (*htab->layout_sections_again) ();
3595 if (htab->fix_erratum_843419)
3599 for (input_bfd = info->input_bfds;
3601 input_bfd = input_bfd->link.next)
3605 for (section = input_bfd->sections;
3607 section = section->next)
3608 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
3612 _bfd_aarch64_resize_stubs (htab);
3613 (*htab->layout_sections_again) ();
3620 for (input_bfd = info->input_bfds;
3621 input_bfd != NULL; input_bfd = input_bfd->link.next)
3623 Elf_Internal_Shdr *symtab_hdr;
3625 Elf_Internal_Sym *local_syms = NULL;
3627 /* We'll need the symbol table in a second. */
3628 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3629 if (symtab_hdr->sh_info == 0)
3632 /* Walk over each section attached to the input bfd. */
3633 for (section = input_bfd->sections;
3634 section != NULL; section = section->next)
3636 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3638 /* If there aren't any relocs, then there's nothing more
3640 if ((section->flags & SEC_RELOC) == 0
3641 || section->reloc_count == 0
3642 || (section->flags & SEC_CODE) == 0)
3645 /* If this section is a link-once section that will be
3646 discarded, then don't create any stubs. */
3647 if (section->output_section == NULL
3648 || section->output_section->owner != output_bfd)
3651 /* Get the relocs. */
3653 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3654 NULL, info->keep_memory);
3655 if (internal_relocs == NULL)
3656 goto error_ret_free_local;
3658 /* Now examine each relocation. */
3659 irela = internal_relocs;
3660 irelaend = irela + section->reloc_count;
3661 for (; irela < irelaend; irela++)
3663 unsigned int r_type, r_indx;
3664 enum elf_aarch64_stub_type stub_type;
3665 struct elf_aarch64_stub_hash_entry *stub_entry;
3668 bfd_vma destination;
3669 struct elf_aarch64_link_hash_entry *hash;
3670 const char *sym_name;
3672 const asection *id_sec;
3673 unsigned char st_type;
3676 r_type = ELFNN_R_TYPE (irela->r_info);
3677 r_indx = ELFNN_R_SYM (irela->r_info);
3679 if (r_type >= (unsigned int) R_AARCH64_end)
3681 bfd_set_error (bfd_error_bad_value);
3682 error_ret_free_internal:
3683 if (elf_section_data (section)->relocs == NULL)
3684 free (internal_relocs);
3685 goto error_ret_free_local;
3688 /* Only look for stubs on unconditional branch and
3689 branch and link instructions. */
3690 if (r_type != (unsigned int) AARCH64_R (CALL26)
3691 && r_type != (unsigned int) AARCH64_R (JUMP26))
3694 /* Now determine the call target, its name, value,
3701 if (r_indx < symtab_hdr->sh_info)
3703 /* It's a local symbol. */
3704 Elf_Internal_Sym *sym;
3705 Elf_Internal_Shdr *hdr;
3707 if (local_syms == NULL)
3710 = (Elf_Internal_Sym *) symtab_hdr->contents;
3711 if (local_syms == NULL)
3713 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3714 symtab_hdr->sh_info, 0,
3716 if (local_syms == NULL)
3717 goto error_ret_free_internal;
3720 sym = local_syms + r_indx;
3721 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3722 sym_sec = hdr->bfd_section;
3724 /* This is an undefined symbol. It can never
3728 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3729 sym_value = sym->st_value;
3730 destination = (sym_value + irela->r_addend
3731 + sym_sec->output_offset
3732 + sym_sec->output_section->vma);
3733 st_type = ELF_ST_TYPE (sym->st_info);
3735 = bfd_elf_string_from_elf_section (input_bfd,
3736 symtab_hdr->sh_link,
3743 e_indx = r_indx - symtab_hdr->sh_info;
3744 hash = ((struct elf_aarch64_link_hash_entry *)
3745 elf_sym_hashes (input_bfd)[e_indx]);
3747 while (hash->root.root.type == bfd_link_hash_indirect
3748 || hash->root.root.type == bfd_link_hash_warning)
3749 hash = ((struct elf_aarch64_link_hash_entry *)
3750 hash->root.root.u.i.link);
3752 if (hash->root.root.type == bfd_link_hash_defined
3753 || hash->root.root.type == bfd_link_hash_defweak)
3755 struct elf_aarch64_link_hash_table *globals =
3756 elf_aarch64_hash_table (info);
3757 sym_sec = hash->root.root.u.def.section;
3758 sym_value = hash->root.root.u.def.value;
3759 /* For a destination in a shared library,
3760 use the PLT stub as target address to
3761 decide whether a branch stub is
3763 if (globals->root.splt != NULL && hash != NULL
3764 && hash->root.plt.offset != (bfd_vma) - 1)
3766 sym_sec = globals->root.splt;
3767 sym_value = hash->root.plt.offset;
3768 if (sym_sec->output_section != NULL)
3769 destination = (sym_value
3770 + sym_sec->output_offset
3772 sym_sec->output_section->vma);
3774 else if (sym_sec->output_section != NULL)
3775 destination = (sym_value + irela->r_addend
3776 + sym_sec->output_offset
3777 + sym_sec->output_section->vma);
3779 else if (hash->root.root.type == bfd_link_hash_undefined
3780 || (hash->root.root.type
3781 == bfd_link_hash_undefweak))
3783 /* For a shared library, use the PLT stub as
3784 target address to decide whether a long
3785 branch stub is needed.
3786 For absolute code, they cannot be handled. */
3787 struct elf_aarch64_link_hash_table *globals =
3788 elf_aarch64_hash_table (info);
3790 if (globals->root.splt != NULL && hash != NULL
3791 && hash->root.plt.offset != (bfd_vma) - 1)
3793 sym_sec = globals->root.splt;
3794 sym_value = hash->root.plt.offset;
3795 if (sym_sec->output_section != NULL)
3796 destination = (sym_value
3797 + sym_sec->output_offset
3799 sym_sec->output_section->vma);
3806 bfd_set_error (bfd_error_bad_value);
3807 goto error_ret_free_internal;
3809 st_type = ELF_ST_TYPE (hash->root.type);
3810 sym_name = hash->root.root.root.string;
3813 /* Determine what (if any) linker stub is needed. */
3814 stub_type = aarch64_type_of_stub
3815 (info, section, irela, st_type, hash, destination);
3816 if (stub_type == aarch64_stub_none)
3819 /* Support for grouping stub sections. */
3820 id_sec = htab->stub_group[section->id].link_sec;
3822 /* Get the name of this stub. */
3823 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3826 goto error_ret_free_internal;
3829 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3830 stub_name, FALSE, FALSE);
3831 if (stub_entry != NULL)
3833 /* The proper stub has already been created. */
3838 stub_entry = _bfd_aarch64_add_stub_entry_in_group
3839 (stub_name, section, htab);
3840 if (stub_entry == NULL)
3843 goto error_ret_free_internal;
3846 stub_entry->target_value = sym_value;
3847 stub_entry->target_section = sym_sec;
3848 stub_entry->stub_type = stub_type;
3849 stub_entry->h = hash;
3850 stub_entry->st_type = st_type;
3852 if (sym_name == NULL)
3853 sym_name = "unnamed";
3854 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3855 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3856 if (stub_entry->output_name == NULL)
3859 goto error_ret_free_internal;
3862 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3865 stub_changed = TRUE;
3868 /* We're done with the internal relocs, free them. */
3869 if (elf_section_data (section)->relocs == NULL)
3870 free (internal_relocs);
3877 _bfd_aarch64_resize_stubs (htab);
3879 /* Ask the linker to do its stuff. */
3880 (*htab->layout_sections_again) ();
3881 stub_changed = FALSE;
3886 error_ret_free_local:
3890 /* Build all the stubs associated with the current output file. The
3891 stubs are kept in a hash table attached to the main linker hash
3892 table. We also set up the .plt entries for statically linked PIC
3893 functions here. This function is called via aarch64_elf_finish in the
3897 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3900 struct bfd_hash_table *table;
3901 struct elf_aarch64_link_hash_table *htab;
3903 htab = elf_aarch64_hash_table (info);
3905 for (stub_sec = htab->stub_bfd->sections;
3906 stub_sec != NULL; stub_sec = stub_sec->next)
3910 /* Ignore non-stub sections. */
3911 if (!strstr (stub_sec->name, STUB_SUFFIX))
3914 /* Allocate memory to hold the linker stubs. */
3915 size = stub_sec->size;
3916 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3917 if (stub_sec->contents == NULL && size != 0)
3921 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
3922 stub_sec->size += 4;
3925 /* Build the stubs as directed by the stub hash table. */
3926 table = &htab->stub_hash_table;
3927 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3933 /* Add an entry to the code/data map for section SEC. */
3936 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3938 struct _aarch64_elf_section_data *sec_data =
3939 elf_aarch64_section_data (sec);
3940 unsigned int newidx;
3942 if (sec_data->map == NULL)
3944 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3945 sec_data->mapcount = 0;
3946 sec_data->mapsize = 1;
3949 newidx = sec_data->mapcount++;
3951 if (sec_data->mapcount > sec_data->mapsize)
3953 sec_data->mapsize *= 2;
3954 sec_data->map = bfd_realloc_or_free
3955 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3960 sec_data->map[newidx].vma = vma;
3961 sec_data->map[newidx].type = type;
3966 /* Initialise maps of insn/data for input BFDs. */
3968 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3970 Elf_Internal_Sym *isymbuf;
3971 Elf_Internal_Shdr *hdr;
3972 unsigned int i, localsyms;
3974 /* Make sure that we are dealing with an AArch64 elf binary. */
3975 if (!is_aarch64_elf (abfd))
3978 if ((abfd->flags & DYNAMIC) != 0)
3981 hdr = &elf_symtab_hdr (abfd);
3982 localsyms = hdr->sh_info;
3984 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3985 should contain the number of local symbols, which should come before any
3986 global symbols. Mapping symbols are always local. */
3987 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3989 /* No internal symbols read? Skip this BFD. */
3990 if (isymbuf == NULL)
3993 for (i = 0; i < localsyms; i++)
3995 Elf_Internal_Sym *isym = &isymbuf[i];
3996 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3999 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
4001 name = bfd_elf_string_from_elf_section (abfd,
4005 if (bfd_is_aarch64_special_symbol_name
4006 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
4007 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
4012 /* Set option values needed during linking. */
4014 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
4015 struct bfd_link_info *link_info,
4017 int no_wchar_warn, int pic_veneer,
4018 int fix_erratum_835769,
4019 int fix_erratum_843419)
4021 struct elf_aarch64_link_hash_table *globals;
4023 globals = elf_aarch64_hash_table (link_info);
4024 globals->pic_veneer = pic_veneer;
4025 globals->fix_erratum_835769 = fix_erratum_835769;
4026 globals->fix_erratum_843419 = fix_erratum_843419;
4027 globals->fix_erratum_843419_adr = TRUE;
4029 BFD_ASSERT (is_aarch64_elf (output_bfd));
4030 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
4031 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
4035 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
4036 struct elf_aarch64_link_hash_table
4037 *globals, struct bfd_link_info *info,
4038 bfd_vma value, bfd *output_bfd,
4039 bfd_boolean *unresolved_reloc_p)
4041 bfd_vma off = (bfd_vma) - 1;
4042 asection *basegot = globals->root.sgot;
4043 bfd_boolean dyn = globals->root.dynamic_sections_created;
4047 BFD_ASSERT (basegot != NULL);
4048 off = h->got.offset;
4049 BFD_ASSERT (off != (bfd_vma) - 1);
4050 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
4052 && SYMBOL_REFERENCES_LOCAL (info, h))
4053 || (ELF_ST_VISIBILITY (h->other)
4054 && h->root.type == bfd_link_hash_undefweak))
4056 /* This is actually a static link, or it is a -Bsymbolic link
4057 and the symbol is defined locally. We must initialize this
4058 entry in the global offset table. Since the offset must
4059 always be a multiple of 8 (4 in the case of ILP32), we use
4060 the least significant bit to record whether we have
4061 initialized it already.
4062 When doing a dynamic link, we create a .rel(a).got relocation
4063 entry to initialize the value. This is done in the
4064 finish_dynamic_symbol routine. */
4069 bfd_put_NN (output_bfd, value, basegot->contents + off);
4074 *unresolved_reloc_p = FALSE;
4076 off = off + basegot->output_section->vma + basegot->output_offset;
4082 /* Change R_TYPE to a more efficient access model where possible,
4083 return the new reloc type. */
4085 static bfd_reloc_code_real_type
4086 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
4087 struct elf_link_hash_entry *h)
4089 bfd_boolean is_local = h == NULL;
4093 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4094 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4096 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4097 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
4099 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4101 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4104 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4106 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4107 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4109 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4110 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4112 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4113 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
4115 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4116 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
4118 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4119 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
4121 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4124 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4126 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
4127 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4129 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4130 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4131 /* Instructions with these relocations will become NOPs. */
4132 return BFD_RELOC_AARCH64_NONE;
4142 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
4146 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4147 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4148 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4149 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4150 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4151 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4154 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4155 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4156 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4157 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
4158 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
4159 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
4162 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4163 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4164 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4165 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4166 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4167 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4168 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4169 return GOT_TLSDESC_GD;
4171 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4172 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4173 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4174 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4177 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4178 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4179 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4180 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4181 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4182 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4183 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4184 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4194 aarch64_can_relax_tls (bfd *input_bfd,
4195 struct bfd_link_info *info,
4196 bfd_reloc_code_real_type r_type,
4197 struct elf_link_hash_entry *h,
4198 unsigned long r_symndx)
4200 unsigned int symbol_got_type;
4201 unsigned int reloc_got_type;
4203 if (! IS_AARCH64_TLS_RELOC (r_type))
4206 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
4207 reloc_got_type = aarch64_reloc_got_type (r_type);
4209 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
4215 if (h && h->root.type == bfd_link_hash_undefweak)
4221 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
4224 static bfd_reloc_code_real_type
4225 aarch64_tls_transition (bfd *input_bfd,
4226 struct bfd_link_info *info,
4227 unsigned int r_type,
4228 struct elf_link_hash_entry *h,
4229 unsigned long r_symndx)
4231 bfd_reloc_code_real_type bfd_r_type
4232 = elfNN_aarch64_bfd_reloc_from_type (r_type);
4234 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
4237 return aarch64_tls_transition_without_check (bfd_r_type, h);
4240 /* Return the base VMA address which should be subtracted from real addresses
4241 when resolving R_AARCH64_TLS_DTPREL relocation. */
4244 dtpoff_base (struct bfd_link_info *info)
4246 /* If tls_sec is NULL, we should have signalled an error already. */
4247 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
4248 return elf_hash_table (info)->tls_sec->vma;
4251 /* Return the base VMA address which should be subtracted from real addresses
4252 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
4255 tpoff_base (struct bfd_link_info *info)
4257 struct elf_link_hash_table *htab = elf_hash_table (info);
4259 /* If tls_sec is NULL, we should have signalled an error already. */
4260 BFD_ASSERT (htab->tls_sec != NULL);
4262 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
4263 htab->tls_sec->alignment_power);
4264 return htab->tls_sec->vma - base;
4268 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4269 unsigned long r_symndx)
4271 /* Calculate the address of the GOT entry for symbol
4272 referred to in h. */
4274 return &h->got.offset;
4278 struct elf_aarch64_local_symbol *l;
4280 l = elf_aarch64_locals (input_bfd);
4281 return &l[r_symndx].got_offset;
4286 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4287 unsigned long r_symndx)
4290 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
4295 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
4296 unsigned long r_symndx)
4299 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4304 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4305 unsigned long r_symndx)
4308 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4314 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4315 unsigned long r_symndx)
4317 /* Calculate the address of the GOT entry for symbol
4318 referred to in h. */
4321 struct elf_aarch64_link_hash_entry *eh;
4322 eh = (struct elf_aarch64_link_hash_entry *) h;
4323 return &eh->tlsdesc_got_jump_table_offset;
4328 struct elf_aarch64_local_symbol *l;
4330 l = elf_aarch64_locals (input_bfd);
4331 return &l[r_symndx].tlsdesc_got_jump_table_offset;
4336 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4337 unsigned long r_symndx)
4340 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4345 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
4346 struct elf_link_hash_entry *h,
4347 unsigned long r_symndx)
4350 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4355 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4356 unsigned long r_symndx)
4359 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4364 /* Data for make_branch_to_erratum_835769_stub(). */
4366 struct erratum_835769_branch_to_stub_data
4368 struct bfd_link_info *info;
4369 asection *output_section;
4373 /* Helper to insert branches to erratum 835769 stubs in the right
4374 places for a particular section. */
4377 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
4380 struct elf_aarch64_stub_hash_entry *stub_entry;
4381 struct erratum_835769_branch_to_stub_data *data;
4383 unsigned long branch_insn = 0;
4384 bfd_vma veneered_insn_loc, veneer_entry_loc;
4385 bfd_signed_vma branch_offset;
4386 unsigned int target;
4389 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4390 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
4392 if (stub_entry->target_section != data->output_section
4393 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
4396 contents = data->contents;
4397 veneered_insn_loc = stub_entry->target_section->output_section->vma
4398 + stub_entry->target_section->output_offset
4399 + stub_entry->target_value;
4400 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4401 + stub_entry->stub_sec->output_offset
4402 + stub_entry->stub_offset;
4403 branch_offset = veneer_entry_loc - veneered_insn_loc;
4405 abfd = stub_entry->target_section->owner;
4406 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4407 (*_bfd_error_handler)
4408 (_("%B: error: Erratum 835769 stub out "
4409 "of range (input file too large)"), abfd);
4411 target = stub_entry->target_value;
4412 branch_insn = 0x14000000;
4413 branch_offset >>= 2;
4414 branch_offset &= 0x3ffffff;
4415 branch_insn |= branch_offset;
4416 bfd_putl32 (branch_insn, &contents[target]);
4423 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
4426 struct elf_aarch64_stub_hash_entry *stub_entry
4427 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4428 struct erratum_835769_branch_to_stub_data *data
4429 = (struct erratum_835769_branch_to_stub_data *) in_arg;
4430 struct bfd_link_info *info;
4431 struct elf_aarch64_link_hash_table *htab;
4439 contents = data->contents;
4440 section = data->output_section;
4442 htab = elf_aarch64_hash_table (info);
4444 if (stub_entry->target_section != section
4445 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
4448 insn = bfd_getl32 (contents + stub_entry->target_value);
4450 stub_entry->stub_sec->contents + stub_entry->stub_offset);
4452 place = (section->output_section->vma + section->output_offset
4453 + stub_entry->adrp_offset);
4454 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
4456 if ((insn & AARCH64_ADRP_OP_MASK) != AARCH64_ADRP_OP)
4459 bfd_signed_vma imm =
4460 (_bfd_aarch64_sign_extend
4461 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
4464 if (htab->fix_erratum_843419_adr
4465 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
4467 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
4468 | AARCH64_RT (insn));
4469 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
4473 bfd_vma veneered_insn_loc;
4474 bfd_vma veneer_entry_loc;
4475 bfd_signed_vma branch_offset;
4476 uint32_t branch_insn;
4478 veneered_insn_loc = stub_entry->target_section->output_section->vma
4479 + stub_entry->target_section->output_offset
4480 + stub_entry->target_value;
4481 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4482 + stub_entry->stub_sec->output_offset
4483 + stub_entry->stub_offset;
4484 branch_offset = veneer_entry_loc - veneered_insn_loc;
4486 abfd = stub_entry->target_section->owner;
4487 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4488 (*_bfd_error_handler)
4489 (_("%B: error: Erratum 843419 stub out "
4490 "of range (input file too large)"), abfd);
4492 branch_insn = 0x14000000;
4493 branch_offset >>= 2;
4494 branch_offset &= 0x3ffffff;
4495 branch_insn |= branch_offset;
4496 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
4503 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
4504 struct bfd_link_info *link_info,
4509 struct elf_aarch64_link_hash_table *globals =
4510 elf_aarch64_hash_table (link_info);
4512 if (globals == NULL)
4515 /* Fix code to point to erratum 835769 stubs. */
4516 if (globals->fix_erratum_835769)
4518 struct erratum_835769_branch_to_stub_data data;
4520 data.info = link_info;
4521 data.output_section = sec;
4522 data.contents = contents;
4523 bfd_hash_traverse (&globals->stub_hash_table,
4524 make_branch_to_erratum_835769_stub, &data);
4527 if (globals->fix_erratum_843419)
4529 struct erratum_835769_branch_to_stub_data data;
4531 data.info = link_info;
4532 data.output_section = sec;
4533 data.contents = contents;
4534 bfd_hash_traverse (&globals->stub_hash_table,
4535 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
4541 /* Perform a relocation as part of a final link. */
4542 static bfd_reloc_status_type
4543 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4546 asection *input_section,
4548 Elf_Internal_Rela *rel,
4550 struct bfd_link_info *info,
4552 struct elf_link_hash_entry *h,
4553 bfd_boolean *unresolved_reloc_p,
4554 bfd_boolean save_addend,
4555 bfd_vma *saved_addend,
4556 Elf_Internal_Sym *sym)
4558 Elf_Internal_Shdr *symtab_hdr;
4559 unsigned int r_type = howto->type;
4560 bfd_reloc_code_real_type bfd_r_type
4561 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4562 bfd_reloc_code_real_type new_bfd_r_type;
4563 unsigned long r_symndx;
4564 bfd_byte *hit_data = contents + rel->r_offset;
4566 bfd_signed_vma signed_addend;
4567 struct elf_aarch64_link_hash_table *globals;
4568 bfd_boolean weak_undef_p;
4571 globals = elf_aarch64_hash_table (info);
4573 symtab_hdr = &elf_symtab_hdr (input_bfd);
4575 BFD_ASSERT (is_aarch64_elf (input_bfd));
4577 r_symndx = ELFNN_R_SYM (rel->r_info);
4579 /* It is possible to have linker relaxations on some TLS access
4580 models. Update our information here. */
4581 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4582 if (new_bfd_r_type != bfd_r_type)
4584 bfd_r_type = new_bfd_r_type;
4585 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4586 BFD_ASSERT (howto != NULL);
4587 r_type = howto->type;
4590 place = input_section->output_section->vma
4591 + input_section->output_offset + rel->r_offset;
4593 /* Get addend, accumulating the addend for consecutive relocs
4594 which refer to the same offset. */
4595 signed_addend = saved_addend ? *saved_addend : 0;
4596 signed_addend += rel->r_addend;
4598 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4599 : bfd_is_und_section (sym_sec));
4601 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4602 it here if it is defined in a non-shared object. */
4604 && h->type == STT_GNU_IFUNC
4611 if ((input_section->flags & SEC_ALLOC) == 0
4612 || h->plt.offset == (bfd_vma) -1)
4615 /* STT_GNU_IFUNC symbol must go through PLT. */
4616 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4617 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4622 if (h->root.root.string)
4623 name = h->root.root.string;
4625 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4627 (*_bfd_error_handler)
4628 (_("%B: relocation %s against STT_GNU_IFUNC "
4629 "symbol `%s' isn't handled by %s"), input_bfd,
4630 howto->name, name, __FUNCTION__);
4631 bfd_set_error (bfd_error_bad_value);
4634 case BFD_RELOC_AARCH64_NN:
4635 if (rel->r_addend != 0)
4637 if (h->root.root.string)
4638 name = h->root.root.string;
4640 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4642 (*_bfd_error_handler)
4643 (_("%B: relocation %s against STT_GNU_IFUNC "
4644 "symbol `%s' has non-zero addend: %d"),
4645 input_bfd, howto->name, name, rel->r_addend);
4646 bfd_set_error (bfd_error_bad_value);
4650 /* Generate dynamic relocation only when there is a
4651 non-GOT reference in a shared object. */
4652 if (info->shared && h->non_got_ref)
4654 Elf_Internal_Rela outrel;
4657 /* Need a dynamic relocation to get the real function
4659 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4663 if (outrel.r_offset == (bfd_vma) -1
4664 || outrel.r_offset == (bfd_vma) -2)
4667 outrel.r_offset += (input_section->output_section->vma
4668 + input_section->output_offset);
4670 if (h->dynindx == -1
4672 || info->executable)
4674 /* This symbol is resolved locally. */
4675 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4676 outrel.r_addend = (h->root.u.def.value
4677 + h->root.u.def.section->output_section->vma
4678 + h->root.u.def.section->output_offset);
4682 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4683 outrel.r_addend = 0;
4686 sreloc = globals->root.irelifunc;
4687 elf_append_rela (output_bfd, sreloc, &outrel);
4689 /* If this reloc is against an external symbol, we
4690 do not want to fiddle with the addend. Otherwise,
4691 we need to include the symbol value so that it
4692 becomes an addend for the dynamic reloc. For an
4693 internal symbol, we have updated addend. */
4694 return bfd_reloc_ok;
4697 case BFD_RELOC_AARCH64_CALL26:
4698 case BFD_RELOC_AARCH64_JUMP26:
4699 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4702 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4704 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4705 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4706 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4707 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4708 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4709 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4710 base_got = globals->root.sgot;
4711 off = h->got.offset;
4713 if (base_got == NULL)
4716 if (off == (bfd_vma) -1)
4720 /* We can't use h->got.offset here to save state, or
4721 even just remember the offset, as finish_dynamic_symbol
4722 would use that as offset into .got. */
4724 if (globals->root.splt != NULL)
4726 plt_index = ((h->plt.offset - globals->plt_header_size) /
4727 globals->plt_entry_size);
4728 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4729 base_got = globals->root.sgotplt;
4733 plt_index = h->plt.offset / globals->plt_entry_size;
4734 off = plt_index * GOT_ENTRY_SIZE;
4735 base_got = globals->root.igotplt;
4738 if (h->dynindx == -1
4742 /* This references the local definition. We must
4743 initialize this entry in the global offset table.
4744 Since the offset must always be a multiple of 8,
4745 we use the least significant bit to record
4746 whether we have initialized it already.
4748 When doing a dynamic link, we create a .rela.got
4749 relocation entry to initialize the value. This
4750 is done in the finish_dynamic_symbol routine. */
4755 bfd_put_NN (output_bfd, value,
4756 base_got->contents + off);
4757 /* Note that this is harmless as -1 | 1 still is -1. */
4761 value = (base_got->output_section->vma
4762 + base_got->output_offset + off);
4765 value = aarch64_calculate_got_entry_vma (h, globals, info,
4767 unresolved_reloc_p);
4768 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4769 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4770 addend = (globals->root.sgot->output_section->vma
4771 + globals->root.sgot->output_offset);
4772 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4773 addend, weak_undef_p);
4774 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4775 case BFD_RELOC_AARCH64_ADD_LO12:
4776 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4783 case BFD_RELOC_AARCH64_NONE:
4784 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4785 *unresolved_reloc_p = FALSE;
4786 return bfd_reloc_ok;
4788 case BFD_RELOC_AARCH64_NN:
4790 /* When generating a shared object or relocatable executable, these
4791 relocations are copied into the output file to be resolved at
4793 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4794 && (input_section->flags & SEC_ALLOC)
4796 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4797 || h->root.type != bfd_link_hash_undefweak))
4799 Elf_Internal_Rela outrel;
4801 bfd_boolean skip, relocate;
4804 *unresolved_reloc_p = FALSE;
4809 outrel.r_addend = signed_addend;
4811 _bfd_elf_section_offset (output_bfd, info, input_section,
4813 if (outrel.r_offset == (bfd_vma) - 1)
4815 else if (outrel.r_offset == (bfd_vma) - 2)
4821 outrel.r_offset += (input_section->output_section->vma
4822 + input_section->output_offset);
4825 memset (&outrel, 0, sizeof outrel);
4828 && (!info->shared || !SYMBOLIC_BIND (info, h) || !h->def_regular))
4829 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4834 /* On SVR4-ish systems, the dynamic loader cannot
4835 relocate the text and data segments independently,
4836 so the symbol does not matter. */
4838 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4839 outrel.r_addend += value;
4842 sreloc = elf_section_data (input_section)->sreloc;
4843 if (sreloc == NULL || sreloc->contents == NULL)
4844 return bfd_reloc_notsupported;
4846 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4847 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4849 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4851 /* Sanity to check that we have previously allocated
4852 sufficient space in the relocation section for the
4853 number of relocations we actually want to emit. */
4857 /* If this reloc is against an external symbol, we do not want to
4858 fiddle with the addend. Otherwise, we need to include the symbol
4859 value so that it becomes an addend for the dynamic reloc. */
4861 return bfd_reloc_ok;
4863 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4864 contents, rel->r_offset, value,
4868 value += signed_addend;
4871 case BFD_RELOC_AARCH64_CALL26:
4872 case BFD_RELOC_AARCH64_JUMP26:
4874 asection *splt = globals->root.splt;
4875 bfd_boolean via_plt_p =
4876 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4878 /* A call to an undefined weak symbol is converted to a jump to
4879 the next instruction unless a PLT entry will be created.
4880 The jump to the next instruction is optimized as a NOP.
4881 Do the same for local undefined symbols. */
4882 if (weak_undef_p && ! via_plt_p)
4884 bfd_putl32 (INSN_NOP, hit_data);
4885 return bfd_reloc_ok;
4888 /* If the call goes through a PLT entry, make sure to
4889 check distance to the right destination address. */
4892 value = (splt->output_section->vma
4893 + splt->output_offset + h->plt.offset);
4894 *unresolved_reloc_p = FALSE;
4897 /* If the target symbol is global and marked as a function the
4898 relocation applies a function call or a tail call. In this
4899 situation we can veneer out of range branches. The veneers
4900 use IP0 and IP1 hence cannot be used arbitrary out of range
4901 branches that occur within the body of a function. */
4902 if (h && h->type == STT_FUNC)
4904 /* Check if a stub has to be inserted because the destination
4906 if (! aarch64_valid_branch_p (value, place))
4908 /* The target is out of reach, so redirect the branch to
4909 the local stub for this function. */
4910 struct elf_aarch64_stub_hash_entry *stub_entry;
4911 stub_entry = elfNN_aarch64_get_stub_entry (input_section,
4914 if (stub_entry != NULL)
4915 value = (stub_entry->stub_offset
4916 + stub_entry->stub_sec->output_offset
4917 + stub_entry->stub_sec->output_section->vma);
4921 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4922 signed_addend, weak_undef_p);
4925 case BFD_RELOC_AARCH64_16_PCREL:
4926 case BFD_RELOC_AARCH64_32_PCREL:
4927 case BFD_RELOC_AARCH64_64_PCREL:
4928 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4929 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4930 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4931 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4933 && (input_section->flags & SEC_ALLOC) != 0
4934 && (input_section->flags & SEC_READONLY) != 0
4938 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4940 (*_bfd_error_handler)
4941 (_("%B: relocation %s against external symbol `%s' can not be used"
4942 " when making a shared object; recompile with -fPIC"),
4943 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
4944 h->root.root.string);
4945 bfd_set_error (bfd_error_bad_value);
4949 case BFD_RELOC_AARCH64_16:
4951 case BFD_RELOC_AARCH64_32:
4953 case BFD_RELOC_AARCH64_ADD_LO12:
4954 case BFD_RELOC_AARCH64_BRANCH19:
4955 case BFD_RELOC_AARCH64_LDST128_LO12:
4956 case BFD_RELOC_AARCH64_LDST16_LO12:
4957 case BFD_RELOC_AARCH64_LDST32_LO12:
4958 case BFD_RELOC_AARCH64_LDST64_LO12:
4959 case BFD_RELOC_AARCH64_LDST8_LO12:
4960 case BFD_RELOC_AARCH64_MOVW_G0:
4961 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4962 case BFD_RELOC_AARCH64_MOVW_G0_S:
4963 case BFD_RELOC_AARCH64_MOVW_G1:
4964 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4965 case BFD_RELOC_AARCH64_MOVW_G1_S:
4966 case BFD_RELOC_AARCH64_MOVW_G2:
4967 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4968 case BFD_RELOC_AARCH64_MOVW_G2_S:
4969 case BFD_RELOC_AARCH64_MOVW_G3:
4970 case BFD_RELOC_AARCH64_TSTBR14:
4971 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4972 signed_addend, weak_undef_p);
4975 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4976 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4977 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4978 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4979 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4980 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4981 if (globals->root.sgot == NULL)
4982 BFD_ASSERT (h != NULL);
4987 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4989 unresolved_reloc_p);
4990 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4991 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4992 addend = (globals->root.sgot->output_section->vma
4993 + globals->root.sgot->output_offset);
4994 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4995 addend, weak_undef_p);
5000 struct elf_aarch64_local_symbol *locals
5001 = elf_aarch64_locals (input_bfd);
5005 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
5006 (*_bfd_error_handler)
5007 (_("%B: Local symbol descriptor table be NULL when applying "
5008 "relocation %s against local symbol"),
5009 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
5013 off = symbol_got_offset (input_bfd, h, r_symndx);
5014 base_got = globals->root.sgot;
5015 bfd_vma got_entry_addr = (base_got->output_section->vma
5016 + base_got->output_offset + off);
5018 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5020 bfd_put_64 (output_bfd, value, base_got->contents + off);
5025 Elf_Internal_Rela outrel;
5027 /* For local symbol, we have done absolute relocation in static
5028 linking stageh. While for share library, we need to update
5029 the content of GOT entry according to the share objects
5030 loading base address. So we need to generate a
5031 R_AARCH64_RELATIVE reloc for dynamic linker. */
5032 s = globals->root.srelgot;
5036 outrel.r_offset = got_entry_addr;
5037 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
5038 outrel.r_addend = value;
5039 elf_append_rela (output_bfd, s, &outrel);
5042 symbol_got_offset_mark (input_bfd, h, r_symndx);
5045 /* Update the relocation value to GOT entry addr as we have transformed
5046 the direct data access into indirect data access through GOT. */
5047 value = got_entry_addr;
5049 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5050 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
5051 addend = base_got->output_section->vma + base_got->output_offset;
5053 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5054 addend, weak_undef_p);
5059 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5060 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5061 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5062 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5063 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5064 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5065 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5066 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5067 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5068 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5069 if (globals->root.sgot == NULL)
5070 return bfd_reloc_notsupported;
5072 value = (symbol_got_offset (input_bfd, h, r_symndx)
5073 + globals->root.sgot->output_section->vma
5074 + globals->root.sgot->output_offset);
5076 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5078 *unresolved_reloc_p = FALSE;
5081 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5082 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5083 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5084 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5085 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5086 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5087 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5088 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5089 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5090 signed_addend - tpoff_base (info),
5092 *unresolved_reloc_p = FALSE;
5095 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5096 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5097 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5098 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5099 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5100 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5101 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5102 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5103 if (globals->root.sgot == NULL)
5104 return bfd_reloc_notsupported;
5105 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
5106 + globals->root.sgotplt->output_section->vma
5107 + globals->root.sgotplt->output_offset
5108 + globals->sgotplt_jump_table_size);
5110 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5112 *unresolved_reloc_p = FALSE;
5116 return bfd_reloc_notsupported;
5120 *saved_addend = value;
5122 /* Only apply the final relocation in a sequence. */
5124 return bfd_reloc_continue;
5126 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5130 /* Handle TLS relaxations. Relaxing is possible for symbols that use
5131 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
5134 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
5135 is to then call final_link_relocate. Return other values in the
5138 static bfd_reloc_status_type
5139 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
5140 bfd *input_bfd, bfd_byte *contents,
5141 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
5143 bfd_boolean is_local = h == NULL;
5144 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
5147 BFD_ASSERT (globals && input_bfd && contents && rel);
5149 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5151 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5152 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5155 /* GD->LE relaxation:
5156 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
5158 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
5160 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5161 return bfd_reloc_continue;
5165 /* GD->IE relaxation:
5166 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
5168 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
5170 return bfd_reloc_continue;
5173 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5177 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5180 /* Tiny TLSDESC->LE relaxation:
5181 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
5182 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
5186 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5187 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5189 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5190 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
5191 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5193 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5194 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
5195 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5196 return bfd_reloc_continue;
5200 /* Tiny TLSDESC->IE relaxation:
5201 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
5202 adr x0, :tlsdesc:var => nop
5206 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5207 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5209 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5210 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5212 bfd_putl32 (0x58000000, contents + rel->r_offset);
5213 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
5214 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5215 return bfd_reloc_continue;
5218 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5221 /* Tiny GD->LE relaxation:
5222 adr x0, :tlsgd:var => mrs x1, tpidr_el0
5223 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
5224 nop => add x0, x0, #:tprel_lo12_nc:x
5227 /* First kill the tls_get_addr reloc on the bl instruction. */
5228 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5230 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
5231 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
5232 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
5234 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5235 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
5236 rel[1].r_offset = rel->r_offset + 8;
5238 /* Move the current relocation to the second instruction in
5241 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5242 AARCH64_R (TLSLE_ADD_TPREL_HI12));
5243 return bfd_reloc_continue;
5247 /* Tiny GD->IE relaxation:
5248 adr x0, :tlsgd:var => ldr x0, :gottprel:var
5249 bl __tls_get_addr => mrs x1, tpidr_el0
5250 nop => add x0, x0, x1
5253 /* First kill the tls_get_addr reloc on the bl instruction. */
5254 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5255 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5257 bfd_putl32 (0x58000000, contents + rel->r_offset);
5258 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5259 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5260 return bfd_reloc_continue;
5263 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5264 return bfd_reloc_continue;
5266 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5269 /* GD->LE relaxation:
5270 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
5272 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5273 return bfd_reloc_continue;
5277 /* GD->IE relaxation:
5278 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
5280 insn = bfd_getl32 (contents + rel->r_offset);
5282 bfd_putl32 (insn, contents + rel->r_offset);
5283 return bfd_reloc_continue;
5286 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5289 /* GD->LE relaxation
5290 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
5291 bl __tls_get_addr => mrs x1, tpidr_el0
5292 nop => add x0, x1, x0
5295 /* First kill the tls_get_addr reloc on the bl instruction. */
5296 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5297 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5299 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5300 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5301 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5302 return bfd_reloc_continue;
5306 /* GD->IE relaxation
5307 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
5308 BL __tls_get_addr => mrs x1, tpidr_el0
5310 NOP => add x0, x1, x0
5313 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
5315 /* Remove the relocation on the BL instruction. */
5316 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5318 bfd_putl32 (0xf9400000, contents + rel->r_offset);
5320 /* We choose to fixup the BL and NOP instructions using the
5321 offset from the second relocation to allow flexibility in
5322 scheduling instructions between the ADD and BL. */
5323 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
5324 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
5325 return bfd_reloc_continue;
5328 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5329 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5330 /* GD->IE/LE relaxation:
5331 add x0, x0, #:tlsdesc_lo12:var => nop
5334 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
5335 return bfd_reloc_ok;
5337 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5338 /* IE->LE relaxation:
5339 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
5343 insn = bfd_getl32 (contents + rel->r_offset);
5344 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
5346 return bfd_reloc_continue;
5348 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5349 /* IE->LE relaxation:
5350 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
5354 insn = bfd_getl32 (contents + rel->r_offset);
5355 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
5357 return bfd_reloc_continue;
5360 return bfd_reloc_continue;
5363 return bfd_reloc_ok;
5366 /* Relocate an AArch64 ELF section. */
5369 elfNN_aarch64_relocate_section (bfd *output_bfd,
5370 struct bfd_link_info *info,
5372 asection *input_section,
5374 Elf_Internal_Rela *relocs,
5375 Elf_Internal_Sym *local_syms,
5376 asection **local_sections)
5378 Elf_Internal_Shdr *symtab_hdr;
5379 struct elf_link_hash_entry **sym_hashes;
5380 Elf_Internal_Rela *rel;
5381 Elf_Internal_Rela *relend;
5383 struct elf_aarch64_link_hash_table *globals;
5384 bfd_boolean save_addend = FALSE;
5387 globals = elf_aarch64_hash_table (info);
5389 symtab_hdr = &elf_symtab_hdr (input_bfd);
5390 sym_hashes = elf_sym_hashes (input_bfd);
5393 relend = relocs + input_section->reloc_count;
5394 for (; rel < relend; rel++)
5396 unsigned int r_type;
5397 bfd_reloc_code_real_type bfd_r_type;
5398 bfd_reloc_code_real_type relaxed_bfd_r_type;
5399 reloc_howto_type *howto;
5400 unsigned long r_symndx;
5401 Elf_Internal_Sym *sym;
5403 struct elf_link_hash_entry *h;
5405 bfd_reloc_status_type r;
5408 bfd_boolean unresolved_reloc = FALSE;
5409 char *error_message = NULL;
5411 r_symndx = ELFNN_R_SYM (rel->r_info);
5412 r_type = ELFNN_R_TYPE (rel->r_info);
5414 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
5415 howto = bfd_reloc.howto;
5419 (*_bfd_error_handler)
5420 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
5421 input_bfd, input_section, r_type);
5424 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
5430 if (r_symndx < symtab_hdr->sh_info)
5432 sym = local_syms + r_symndx;
5433 sym_type = ELFNN_ST_TYPE (sym->st_info);
5434 sec = local_sections[r_symndx];
5436 /* An object file might have a reference to a local
5437 undefined symbol. This is a daft object file, but we
5438 should at least do something about it. */
5439 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
5440 && bfd_is_und_section (sec)
5441 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
5443 if (!info->callbacks->undefined_symbol
5444 (info, bfd_elf_string_from_elf_section
5445 (input_bfd, symtab_hdr->sh_link, sym->st_name),
5446 input_bfd, input_section, rel->r_offset, TRUE))
5450 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
5452 /* Relocate against local STT_GNU_IFUNC symbol. */
5453 if (!info->relocatable
5454 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
5456 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
5461 /* Set STT_GNU_IFUNC symbol value. */
5462 h->root.u.def.value = sym->st_value;
5463 h->root.u.def.section = sec;
5468 bfd_boolean warned, ignored;
5470 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
5471 r_symndx, symtab_hdr, sym_hashes,
5473 unresolved_reloc, warned, ignored);
5478 if (sec != NULL && discarded_section (sec))
5479 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
5480 rel, 1, relend, howto, 0, contents);
5482 if (info->relocatable)
5486 name = h->root.root.string;
5489 name = (bfd_elf_string_from_elf_section
5490 (input_bfd, symtab_hdr->sh_link, sym->st_name));
5491 if (name == NULL || *name == '\0')
5492 name = bfd_section_name (input_bfd, sec);
5496 && r_type != R_AARCH64_NONE
5497 && r_type != R_AARCH64_NULL
5499 || h->root.type == bfd_link_hash_defined
5500 || h->root.type == bfd_link_hash_defweak)
5501 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
5503 (*_bfd_error_handler)
5504 ((sym_type == STT_TLS
5505 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
5506 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
5508 input_section, (long) rel->r_offset, howto->name, name);
5511 /* We relax only if we can see that there can be a valid transition
5512 from a reloc type to another.
5513 We call elfNN_aarch64_final_link_relocate unless we're completely
5514 done, i.e., the relaxation produced the final output we want. */
5516 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
5518 if (relaxed_bfd_r_type != bfd_r_type)
5520 bfd_r_type = relaxed_bfd_r_type;
5521 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
5522 BFD_ASSERT (howto != NULL);
5523 r_type = howto->type;
5524 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
5525 unresolved_reloc = 0;
5528 r = bfd_reloc_continue;
5530 /* There may be multiple consecutive relocations for the
5531 same offset. In that case we are supposed to treat the
5532 output of each relocation as the addend for the next. */
5533 if (rel + 1 < relend
5534 && rel->r_offset == rel[1].r_offset
5535 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
5536 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
5539 save_addend = FALSE;
5541 if (r == bfd_reloc_continue)
5542 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
5543 input_section, contents, rel,
5544 relocation, info, sec,
5545 h, &unresolved_reloc,
5546 save_addend, &addend, sym);
5548 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5550 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5551 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5552 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5553 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5554 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5555 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5556 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5558 bfd_boolean need_relocs = FALSE;
5563 off = symbol_got_offset (input_bfd, h, r_symndx);
5564 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5567 (info->shared || indx != 0) &&
5569 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5570 || h->root.type != bfd_link_hash_undefweak);
5572 BFD_ASSERT (globals->root.srelgot != NULL);
5576 Elf_Internal_Rela rela;
5577 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
5579 rela.r_offset = globals->root.sgot->output_section->vma +
5580 globals->root.sgot->output_offset + off;
5583 loc = globals->root.srelgot->contents;
5584 loc += globals->root.srelgot->reloc_count++
5585 * RELOC_SIZE (htab);
5586 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5588 bfd_reloc_code_real_type real_type =
5589 elfNN_aarch64_bfd_reloc_from_type (r_type);
5591 if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21
5592 || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21
5593 || real_type == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC)
5595 /* For local dynamic, don't generate DTPREL in any case.
5596 Initialize the DTPREL slot into zero, so we get module
5597 base address when invoke runtime TLS resolver. */
5598 bfd_put_NN (output_bfd, 0,
5599 globals->root.sgot->contents + off
5604 bfd_put_NN (output_bfd,
5605 relocation - dtpoff_base (info),
5606 globals->root.sgot->contents + off
5611 /* This TLS symbol is global. We emit a
5612 relocation to fixup the tls offset at load
5615 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
5618 (globals->root.sgot->output_section->vma
5619 + globals->root.sgot->output_offset + off
5622 loc = globals->root.srelgot->contents;
5623 loc += globals->root.srelgot->reloc_count++
5624 * RELOC_SIZE (globals);
5625 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5626 bfd_put_NN (output_bfd, (bfd_vma) 0,
5627 globals->root.sgot->contents + off
5633 bfd_put_NN (output_bfd, (bfd_vma) 1,
5634 globals->root.sgot->contents + off);
5635 bfd_put_NN (output_bfd,
5636 relocation - dtpoff_base (info),
5637 globals->root.sgot->contents + off
5641 symbol_got_offset_mark (input_bfd, h, r_symndx);
5645 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5646 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5647 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5648 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5650 bfd_boolean need_relocs = FALSE;
5655 off = symbol_got_offset (input_bfd, h, r_symndx);
5657 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5660 (info->shared || indx != 0) &&
5662 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5663 || h->root.type != bfd_link_hash_undefweak);
5665 BFD_ASSERT (globals->root.srelgot != NULL);
5669 Elf_Internal_Rela rela;
5672 rela.r_addend = relocation - dtpoff_base (info);
5676 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5677 rela.r_offset = globals->root.sgot->output_section->vma +
5678 globals->root.sgot->output_offset + off;
5680 loc = globals->root.srelgot->contents;
5681 loc += globals->root.srelgot->reloc_count++
5682 * RELOC_SIZE (htab);
5684 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5686 bfd_put_NN (output_bfd, rela.r_addend,
5687 globals->root.sgot->contents + off);
5690 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5691 globals->root.sgot->contents + off);
5693 symbol_got_offset_mark (input_bfd, h, r_symndx);
5697 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5698 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5699 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5700 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5701 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5702 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5703 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5704 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5707 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5708 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5709 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5710 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5711 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5712 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5714 bfd_boolean need_relocs = FALSE;
5715 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5716 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5718 need_relocs = (h == NULL
5719 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5720 || h->root.type != bfd_link_hash_undefweak);
5722 BFD_ASSERT (globals->root.srelgot != NULL);
5723 BFD_ASSERT (globals->root.sgot != NULL);
5728 Elf_Internal_Rela rela;
5729 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5732 rela.r_offset = (globals->root.sgotplt->output_section->vma
5733 + globals->root.sgotplt->output_offset
5734 + off + globals->sgotplt_jump_table_size);
5737 rela.r_addend = relocation - dtpoff_base (info);
5739 /* Allocate the next available slot in the PLT reloc
5740 section to hold our R_AARCH64_TLSDESC, the next
5741 available slot is determined from reloc_count,
5742 which we step. But note, reloc_count was
5743 artifically moved down while allocating slots for
5744 real PLT relocs such that all of the PLT relocs
5745 will fit above the initial reloc_count and the
5746 extra stuff will fit below. */
5747 loc = globals->root.srelplt->contents;
5748 loc += globals->root.srelplt->reloc_count++
5749 * RELOC_SIZE (globals);
5751 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5753 bfd_put_NN (output_bfd, (bfd_vma) 0,
5754 globals->root.sgotplt->contents + off +
5755 globals->sgotplt_jump_table_size);
5756 bfd_put_NN (output_bfd, (bfd_vma) 0,
5757 globals->root.sgotplt->contents + off +
5758 globals->sgotplt_jump_table_size +
5762 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5773 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5774 because such sections are not SEC_ALLOC and thus ld.so will
5775 not process them. */
5776 if (unresolved_reloc
5777 && !((input_section->flags & SEC_DEBUGGING) != 0
5779 && _bfd_elf_section_offset (output_bfd, info, input_section,
5780 +rel->r_offset) != (bfd_vma) - 1)
5782 (*_bfd_error_handler)
5784 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5785 input_bfd, input_section, (long) rel->r_offset, howto->name,
5786 h->root.root.string);
5790 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5794 case bfd_reloc_overflow:
5795 if (!(*info->callbacks->reloc_overflow)
5796 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
5797 input_bfd, input_section, rel->r_offset))
5801 case bfd_reloc_undefined:
5802 if (!((*info->callbacks->undefined_symbol)
5803 (info, name, input_bfd, input_section,
5804 rel->r_offset, TRUE)))
5808 case bfd_reloc_outofrange:
5809 error_message = _("out of range");
5812 case bfd_reloc_notsupported:
5813 error_message = _("unsupported relocation");
5816 case bfd_reloc_dangerous:
5817 /* error_message should already be set. */
5821 error_message = _("unknown error");
5825 BFD_ASSERT (error_message != NULL);
5826 if (!((*info->callbacks->reloc_dangerous)
5827 (info, error_message, input_bfd, input_section,
5838 /* Set the right machine number. */
5841 elfNN_aarch64_object_p (bfd *abfd)
5844 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5846 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5851 /* Function to keep AArch64 specific flags in the ELF header. */
5854 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5856 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5861 elf_elfheader (abfd)->e_flags = flags;
5862 elf_flags_init (abfd) = TRUE;
5868 /* Merge backend specific data from an object file to the output
5869 object file when linking. */
5872 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5876 bfd_boolean flags_compatible = TRUE;
5879 /* Check if we have the same endianess. */
5880 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5883 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5886 /* The input BFD must have had its flags initialised. */
5887 /* The following seems bogus to me -- The flags are initialized in
5888 the assembler but I don't think an elf_flags_init field is
5889 written into the object. */
5890 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5892 in_flags = elf_elfheader (ibfd)->e_flags;
5893 out_flags = elf_elfheader (obfd)->e_flags;
5895 if (!elf_flags_init (obfd))
5897 /* If the input is the default architecture and had the default
5898 flags then do not bother setting the flags for the output
5899 architecture, instead allow future merges to do this. If no
5900 future merges ever set these flags then they will retain their
5901 uninitialised values, which surprise surprise, correspond
5902 to the default values. */
5903 if (bfd_get_arch_info (ibfd)->the_default
5904 && elf_elfheader (ibfd)->e_flags == 0)
5907 elf_flags_init (obfd) = TRUE;
5908 elf_elfheader (obfd)->e_flags = in_flags;
5910 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5911 && bfd_get_arch_info (obfd)->the_default)
5912 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5913 bfd_get_mach (ibfd));
5918 /* Identical flags must be compatible. */
5919 if (in_flags == out_flags)
5922 /* Check to see if the input BFD actually contains any sections. If
5923 not, its flags may not have been initialised either, but it
5924 cannot actually cause any incompatiblity. Do not short-circuit
5925 dynamic objects; their section list may be emptied by
5926 elf_link_add_object_symbols.
5928 Also check to see if there are no code sections in the input.
5929 In this case there is no need to check for code specific flags.
5930 XXX - do we need to worry about floating-point format compatability
5931 in data sections ? */
5932 if (!(ibfd->flags & DYNAMIC))
5934 bfd_boolean null_input_bfd = TRUE;
5935 bfd_boolean only_data_sections = TRUE;
5937 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5939 if ((bfd_get_section_flags (ibfd, sec)
5940 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5941 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5942 only_data_sections = FALSE;
5944 null_input_bfd = FALSE;
5948 if (null_input_bfd || only_data_sections)
5952 return flags_compatible;
5955 /* Display the flags field. */
5958 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5960 FILE *file = (FILE *) ptr;
5961 unsigned long flags;
5963 BFD_ASSERT (abfd != NULL && ptr != NULL);
5965 /* Print normal ELF private data. */
5966 _bfd_elf_print_private_bfd_data (abfd, ptr);
5968 flags = elf_elfheader (abfd)->e_flags;
5969 /* Ignore init flag - it may not be set, despite the flags field
5970 containing valid data. */
5972 /* xgettext:c-format */
5973 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5976 fprintf (file, _("<Unrecognised flag bits set>"));
5983 /* Update the got entry reference counts for the section being removed. */
5986 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
5987 struct bfd_link_info *info,
5989 const Elf_Internal_Rela * relocs)
5991 struct elf_aarch64_link_hash_table *htab;
5992 Elf_Internal_Shdr *symtab_hdr;
5993 struct elf_link_hash_entry **sym_hashes;
5994 struct elf_aarch64_local_symbol *locals;
5995 const Elf_Internal_Rela *rel, *relend;
5997 if (info->relocatable)
6000 htab = elf_aarch64_hash_table (info);
6005 elf_section_data (sec)->local_dynrel = NULL;
6007 symtab_hdr = &elf_symtab_hdr (abfd);
6008 sym_hashes = elf_sym_hashes (abfd);
6010 locals = elf_aarch64_locals (abfd);
6012 relend = relocs + sec->reloc_count;
6013 for (rel = relocs; rel < relend; rel++)
6015 unsigned long r_symndx;
6016 unsigned int r_type;
6017 struct elf_link_hash_entry *h = NULL;
6019 r_symndx = ELFNN_R_SYM (rel->r_info);
6021 if (r_symndx >= symtab_hdr->sh_info)
6024 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6025 while (h->root.type == bfd_link_hash_indirect
6026 || h->root.type == bfd_link_hash_warning)
6027 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6031 Elf_Internal_Sym *isym;
6033 /* A local symbol. */
6034 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6037 /* Check relocation against local STT_GNU_IFUNC symbol. */
6039 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6041 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
6049 struct elf_aarch64_link_hash_entry *eh;
6050 struct elf_dyn_relocs **pp;
6051 struct elf_dyn_relocs *p;
6053 eh = (struct elf_aarch64_link_hash_entry *) h;
6055 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
6058 /* Everything must go for SEC. */
6064 r_type = ELFNN_R_TYPE (rel->r_info);
6065 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
6067 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6068 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6069 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6070 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6071 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6072 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6073 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6074 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6075 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6076 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6077 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6078 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6079 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6080 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6081 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6082 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6083 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6084 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6085 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6086 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6087 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6088 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6089 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6090 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6091 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6092 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6093 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6094 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6095 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6096 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6099 if (h->got.refcount > 0)
6100 h->got.refcount -= 1;
6102 if (h->type == STT_GNU_IFUNC)
6104 if (h->plt.refcount > 0)
6105 h->plt.refcount -= 1;
6108 else if (locals != NULL)
6110 if (locals[r_symndx].got_refcount > 0)
6111 locals[r_symndx].got_refcount -= 1;
6115 case BFD_RELOC_AARCH64_CALL26:
6116 case BFD_RELOC_AARCH64_JUMP26:
6117 /* If this is a local symbol then we resolve it
6118 directly without creating a PLT entry. */
6122 if (h->plt.refcount > 0)
6123 h->plt.refcount -= 1;
6126 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6127 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6128 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6129 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6130 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6131 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6132 case BFD_RELOC_AARCH64_MOVW_G3:
6133 case BFD_RELOC_AARCH64_NN:
6134 if (h != NULL && info->executable)
6136 if (h->plt.refcount > 0)
6137 h->plt.refcount -= 1;
6149 /* Adjust a symbol defined by a dynamic object and referenced by a
6150 regular object. The current definition is in some section of the
6151 dynamic object, but we're not including those sections. We have to
6152 change the definition to something the rest of the link can
6156 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
6157 struct elf_link_hash_entry *h)
6159 struct elf_aarch64_link_hash_table *htab;
6162 /* If this is a function, put it in the procedure linkage table. We
6163 will fill in the contents of the procedure linkage table later,
6164 when we know the address of the .got section. */
6165 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
6167 if (h->plt.refcount <= 0
6168 || (h->type != STT_GNU_IFUNC
6169 && (SYMBOL_CALLS_LOCAL (info, h)
6170 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
6171 && h->root.type == bfd_link_hash_undefweak))))
6173 /* This case can occur if we saw a CALL26 reloc in
6174 an input file, but the symbol wasn't referred to
6175 by a dynamic object or all references were
6176 garbage collected. In which case we can end up
6178 h->plt.offset = (bfd_vma) - 1;
6185 /* Otherwise, reset to -1. */
6186 h->plt.offset = (bfd_vma) - 1;
6189 /* If this is a weak symbol, and there is a real definition, the
6190 processor independent code will have arranged for us to see the
6191 real definition first, and we can just use the same value. */
6192 if (h->u.weakdef != NULL)
6194 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
6195 || h->u.weakdef->root.type == bfd_link_hash_defweak);
6196 h->root.u.def.section = h->u.weakdef->root.u.def.section;
6197 h->root.u.def.value = h->u.weakdef->root.u.def.value;
6198 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
6199 h->non_got_ref = h->u.weakdef->non_got_ref;
6203 /* If we are creating a shared library, we must presume that the
6204 only references to the symbol are via the global offset table.
6205 For such cases we need not do anything here; the relocations will
6206 be handled correctly by relocate_section. */
6210 /* If there are no references to this symbol that do not use the
6211 GOT, we don't need to generate a copy reloc. */
6212 if (!h->non_got_ref)
6215 /* If -z nocopyreloc was given, we won't generate them either. */
6216 if (info->nocopyreloc)
6222 /* We must allocate the symbol in our .dynbss section, which will
6223 become part of the .bss section of the executable. There will be
6224 an entry for this symbol in the .dynsym section. The dynamic
6225 object will contain position independent code, so all references
6226 from the dynamic object to this symbol will go through the global
6227 offset table. The dynamic linker will use the .dynsym entry to
6228 determine the address it must put in the global offset table, so
6229 both the dynamic object and the regular object will refer to the
6230 same memory location for the variable. */
6232 htab = elf_aarch64_hash_table (info);
6234 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
6235 to copy the initial value out of the dynamic object and into the
6236 runtime process image. */
6237 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
6239 htab->srelbss->size += RELOC_SIZE (htab);
6245 return _bfd_elf_adjust_dynamic_copy (info, h, s);
6250 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
6252 struct elf_aarch64_local_symbol *locals;
6253 locals = elf_aarch64_locals (abfd);
6256 locals = (struct elf_aarch64_local_symbol *)
6257 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
6260 elf_aarch64_locals (abfd) = locals;
6265 /* Create the .got section to hold the global offset table. */
6268 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
6270 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6273 struct elf_link_hash_entry *h;
6274 struct elf_link_hash_table *htab = elf_hash_table (info);
6276 /* This function may be called more than once. */
6277 s = bfd_get_linker_section (abfd, ".got");
6281 flags = bed->dynamic_sec_flags;
6283 s = bfd_make_section_anyway_with_flags (abfd,
6284 (bed->rela_plts_and_copies_p
6285 ? ".rela.got" : ".rel.got"),
6286 (bed->dynamic_sec_flags
6289 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6293 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
6295 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6298 htab->sgot->size += GOT_ENTRY_SIZE;
6300 if (bed->want_got_sym)
6302 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
6303 (or .got.plt) section. We don't do this in the linker script
6304 because we don't want to define the symbol if we are not creating
6305 a global offset table. */
6306 h = _bfd_elf_define_linkage_sym (abfd, info, s,
6307 "_GLOBAL_OFFSET_TABLE_");
6308 elf_hash_table (info)->hgot = h;
6313 if (bed->want_got_plt)
6315 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
6317 || !bfd_set_section_alignment (abfd, s,
6318 bed->s->log_file_align))
6323 /* The first bit of the global offset table is the header. */
6324 s->size += bed->got_header_size;
6329 /* Look through the relocs for a section during the first phase. */
6332 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
6333 asection *sec, const Elf_Internal_Rela *relocs)
6335 Elf_Internal_Shdr *symtab_hdr;
6336 struct elf_link_hash_entry **sym_hashes;
6337 const Elf_Internal_Rela *rel;
6338 const Elf_Internal_Rela *rel_end;
6341 struct elf_aarch64_link_hash_table *htab;
6343 if (info->relocatable)
6346 BFD_ASSERT (is_aarch64_elf (abfd));
6348 htab = elf_aarch64_hash_table (info);
6351 symtab_hdr = &elf_symtab_hdr (abfd);
6352 sym_hashes = elf_sym_hashes (abfd);
6354 rel_end = relocs + sec->reloc_count;
6355 for (rel = relocs; rel < rel_end; rel++)
6357 struct elf_link_hash_entry *h;
6358 unsigned long r_symndx;
6359 unsigned int r_type;
6360 bfd_reloc_code_real_type bfd_r_type;
6361 Elf_Internal_Sym *isym;
6363 r_symndx = ELFNN_R_SYM (rel->r_info);
6364 r_type = ELFNN_R_TYPE (rel->r_info);
6366 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
6368 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
6373 if (r_symndx < symtab_hdr->sh_info)
6375 /* A local symbol. */
6376 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6381 /* Check relocation against local STT_GNU_IFUNC symbol. */
6382 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6384 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
6389 /* Fake a STT_GNU_IFUNC symbol. */
6390 h->type = STT_GNU_IFUNC;
6393 h->forced_local = 1;
6394 h->root.type = bfd_link_hash_defined;
6401 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6402 while (h->root.type == bfd_link_hash_indirect
6403 || h->root.type == bfd_link_hash_warning)
6404 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6406 /* PR15323, ref flags aren't set for references in the same
6408 h->root.non_ir_ref = 1;
6411 /* Could be done earlier, if h were already available. */
6412 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
6416 /* Create the ifunc sections for static executables. If we
6417 never see an indirect function symbol nor we are building
6418 a static executable, those sections will be empty and
6419 won't appear in output. */
6425 case BFD_RELOC_AARCH64_ADD_LO12:
6426 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6427 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6428 case BFD_RELOC_AARCH64_CALL26:
6429 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6430 case BFD_RELOC_AARCH64_JUMP26:
6431 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6432 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6433 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6434 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6435 case BFD_RELOC_AARCH64_NN:
6436 if (htab->root.dynobj == NULL)
6437 htab->root.dynobj = abfd;
6438 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
6443 /* It is referenced by a non-shared object. */
6445 h->root.non_ir_ref = 1;
6450 case BFD_RELOC_AARCH64_NN:
6452 /* We don't need to handle relocs into sections not going into
6453 the "real" output. */
6454 if ((sec->flags & SEC_ALLOC) == 0)
6462 h->plt.refcount += 1;
6463 h->pointer_equality_needed = 1;
6466 /* No need to do anything if we're not creating a shared
6472 struct elf_dyn_relocs *p;
6473 struct elf_dyn_relocs **head;
6475 /* We must copy these reloc types into the output file.
6476 Create a reloc section in dynobj and make room for
6480 if (htab->root.dynobj == NULL)
6481 htab->root.dynobj = abfd;
6483 sreloc = _bfd_elf_make_dynamic_reloc_section
6484 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
6490 /* If this is a global symbol, we count the number of
6491 relocations we need for this symbol. */
6494 struct elf_aarch64_link_hash_entry *eh;
6495 eh = (struct elf_aarch64_link_hash_entry *) h;
6496 head = &eh->dyn_relocs;
6500 /* Track dynamic relocs needed for local syms too.
6501 We really need local syms available to do this
6507 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6512 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
6516 /* Beware of type punned pointers vs strict aliasing
6518 vpp = &(elf_section_data (s)->local_dynrel);
6519 head = (struct elf_dyn_relocs **) vpp;
6523 if (p == NULL || p->sec != sec)
6525 bfd_size_type amt = sizeof *p;
6526 p = ((struct elf_dyn_relocs *)
6527 bfd_zalloc (htab->root.dynobj, amt));
6540 /* RR: We probably want to keep a consistency check that
6541 there are no dangling GOT_PAGE relocs. */
6542 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6543 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6544 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6545 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6546 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6547 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6548 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6549 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6550 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6551 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6552 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6553 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6554 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6555 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6556 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6557 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6558 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6559 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6560 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6561 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6562 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6563 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6564 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6565 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6566 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6567 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6568 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6569 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6570 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6571 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6574 unsigned old_got_type;
6576 got_type = aarch64_reloc_got_type (bfd_r_type);
6580 h->got.refcount += 1;
6581 old_got_type = elf_aarch64_hash_entry (h)->got_type;
6585 struct elf_aarch64_local_symbol *locals;
6587 if (!elfNN_aarch64_allocate_local_symbols
6588 (abfd, symtab_hdr->sh_info))
6591 locals = elf_aarch64_locals (abfd);
6592 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6593 locals[r_symndx].got_refcount += 1;
6594 old_got_type = locals[r_symndx].got_type;
6597 /* If a variable is accessed with both general dynamic TLS
6598 methods, two slots may be created. */
6599 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
6600 got_type |= old_got_type;
6602 /* We will already have issued an error message if there
6603 is a TLS/non-TLS mismatch, based on the symbol type.
6604 So just combine any TLS types needed. */
6605 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
6606 && got_type != GOT_NORMAL)
6607 got_type |= old_got_type;
6609 /* If the symbol is accessed by both IE and GD methods, we
6610 are able to relax. Turn off the GD flag, without
6611 messing up with any other kind of TLS types that may be
6613 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
6614 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
6616 if (old_got_type != got_type)
6619 elf_aarch64_hash_entry (h)->got_type = got_type;
6622 struct elf_aarch64_local_symbol *locals;
6623 locals = elf_aarch64_locals (abfd);
6624 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6625 locals[r_symndx].got_type = got_type;
6629 if (htab->root.dynobj == NULL)
6630 htab->root.dynobj = abfd;
6631 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
6636 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6637 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6638 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6639 case BFD_RELOC_AARCH64_MOVW_G3:
6642 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6643 (*_bfd_error_handler)
6644 (_("%B: relocation %s against `%s' can not be used when making "
6645 "a shared object; recompile with -fPIC"),
6646 abfd, elfNN_aarch64_howto_table[howto_index].name,
6647 (h) ? h->root.root.string : "a local symbol");
6648 bfd_set_error (bfd_error_bad_value);
6652 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6653 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6654 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6655 if (h != NULL && info->executable)
6657 /* If this reloc is in a read-only section, we might
6658 need a copy reloc. We can't check reliably at this
6659 stage whether the section is read-only, as input
6660 sections have not yet been mapped to output sections.
6661 Tentatively set the flag for now, and correct in
6662 adjust_dynamic_symbol. */
6664 h->plt.refcount += 1;
6665 h->pointer_equality_needed = 1;
6667 /* FIXME:: RR need to handle these in shared libraries
6668 and essentially bomb out as these being non-PIC
6669 relocations in shared libraries. */
6672 case BFD_RELOC_AARCH64_CALL26:
6673 case BFD_RELOC_AARCH64_JUMP26:
6674 /* If this is a local symbol then we resolve it
6675 directly without creating a PLT entry. */
6680 if (h->plt.refcount <= 0)
6681 h->plt.refcount = 1;
6683 h->plt.refcount += 1;
6694 /* Treat mapping symbols as special target symbols. */
6697 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6700 return bfd_is_aarch64_special_symbol_name (sym->name,
6701 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6704 /* This is a copy of elf_find_function () from elf.c except that
6705 AArch64 mapping symbols are ignored when looking for function names. */
6708 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6712 const char **filename_ptr,
6713 const char **functionname_ptr)
6715 const char *filename = NULL;
6716 asymbol *func = NULL;
6717 bfd_vma low_func = 0;
6720 for (p = symbols; *p != NULL; p++)
6724 q = (elf_symbol_type *) * p;
6726 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6731 filename = bfd_asymbol_name (&q->symbol);
6735 /* Skip mapping symbols. */
6736 if ((q->symbol.flags & BSF_LOCAL)
6737 && (bfd_is_aarch64_special_symbol_name
6738 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6741 if (bfd_get_section (&q->symbol) == section
6742 && q->symbol.value >= low_func && q->symbol.value <= offset)
6744 func = (asymbol *) q;
6745 low_func = q->symbol.value;
6755 *filename_ptr = filename;
6756 if (functionname_ptr)
6757 *functionname_ptr = bfd_asymbol_name (func);
6763 /* Find the nearest line to a particular section and offset, for error
6764 reporting. This code is a duplicate of the code in elf.c, except
6765 that it uses aarch64_elf_find_function. */
6768 elfNN_aarch64_find_nearest_line (bfd *abfd,
6772 const char **filename_ptr,
6773 const char **functionname_ptr,
6774 unsigned int *line_ptr,
6775 unsigned int *discriminator_ptr)
6777 bfd_boolean found = FALSE;
6779 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6780 filename_ptr, functionname_ptr,
6781 line_ptr, discriminator_ptr,
6782 dwarf_debug_sections, 0,
6783 &elf_tdata (abfd)->dwarf2_find_line_info))
6785 if (!*functionname_ptr)
6786 aarch64_elf_find_function (abfd, symbols, section, offset,
6787 *filename_ptr ? NULL : filename_ptr,
6793 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6794 toolchain uses DWARF1. */
6796 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6797 &found, filename_ptr,
6798 functionname_ptr, line_ptr,
6799 &elf_tdata (abfd)->line_info))
6802 if (found && (*functionname_ptr || *line_ptr))
6805 if (symbols == NULL)
6808 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6809 filename_ptr, functionname_ptr))
6817 elfNN_aarch64_find_inliner_info (bfd *abfd,
6818 const char **filename_ptr,
6819 const char **functionname_ptr,
6820 unsigned int *line_ptr)
6823 found = _bfd_dwarf2_find_inliner_info
6824 (abfd, filename_ptr,
6825 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6831 elfNN_aarch64_post_process_headers (bfd *abfd,
6832 struct bfd_link_info *link_info)
6834 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6836 i_ehdrp = elf_elfheader (abfd);
6837 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6839 _bfd_elf_post_process_headers (abfd, link_info);
6842 static enum elf_reloc_type_class
6843 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6844 const asection *rel_sec ATTRIBUTE_UNUSED,
6845 const Elf_Internal_Rela *rela)
6847 switch ((int) ELFNN_R_TYPE (rela->r_info))
6849 case AARCH64_R (RELATIVE):
6850 return reloc_class_relative;
6851 case AARCH64_R (JUMP_SLOT):
6852 return reloc_class_plt;
6853 case AARCH64_R (COPY):
6854 return reloc_class_copy;
6856 return reloc_class_normal;
6860 /* Handle an AArch64 specific section when reading an object file. This is
6861 called when bfd_section_from_shdr finds a section with an unknown
6865 elfNN_aarch64_section_from_shdr (bfd *abfd,
6866 Elf_Internal_Shdr *hdr,
6867 const char *name, int shindex)
6869 /* There ought to be a place to keep ELF backend specific flags, but
6870 at the moment there isn't one. We just keep track of the
6871 sections by their name, instead. Fortunately, the ABI gives
6872 names for all the AArch64 specific sections, so we will probably get
6874 switch (hdr->sh_type)
6876 case SHT_AARCH64_ATTRIBUTES:
6883 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6889 /* A structure used to record a list of sections, independently
6890 of the next and prev fields in the asection structure. */
6891 typedef struct section_list
6894 struct section_list *next;
6895 struct section_list *prev;
6899 /* Unfortunately we need to keep a list of sections for which
6900 an _aarch64_elf_section_data structure has been allocated. This
6901 is because it is possible for functions like elfNN_aarch64_write_section
6902 to be called on a section which has had an elf_data_structure
6903 allocated for it (and so the used_by_bfd field is valid) but
6904 for which the AArch64 extended version of this structure - the
6905 _aarch64_elf_section_data structure - has not been allocated. */
6906 static section_list *sections_with_aarch64_elf_section_data = NULL;
6909 record_section_with_aarch64_elf_section_data (asection *sec)
6911 struct section_list *entry;
6913 entry = bfd_malloc (sizeof (*entry));
6917 entry->next = sections_with_aarch64_elf_section_data;
6919 if (entry->next != NULL)
6920 entry->next->prev = entry;
6921 sections_with_aarch64_elf_section_data = entry;
6924 static struct section_list *
6925 find_aarch64_elf_section_entry (asection *sec)
6927 struct section_list *entry;
6928 static struct section_list *last_entry = NULL;
6930 /* This is a short cut for the typical case where the sections are added
6931 to the sections_with_aarch64_elf_section_data list in forward order and
6932 then looked up here in backwards order. This makes a real difference
6933 to the ld-srec/sec64k.exp linker test. */
6934 entry = sections_with_aarch64_elf_section_data;
6935 if (last_entry != NULL)
6937 if (last_entry->sec == sec)
6939 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6940 entry = last_entry->next;
6943 for (; entry; entry = entry->next)
6944 if (entry->sec == sec)
6948 /* Record the entry prior to this one - it is the entry we are
6949 most likely to want to locate next time. Also this way if we
6950 have been called from
6951 unrecord_section_with_aarch64_elf_section_data () we will not
6952 be caching a pointer that is about to be freed. */
6953 last_entry = entry->prev;
6959 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6961 struct section_list *entry;
6963 entry = find_aarch64_elf_section_entry (sec);
6967 if (entry->prev != NULL)
6968 entry->prev->next = entry->next;
6969 if (entry->next != NULL)
6970 entry->next->prev = entry->prev;
6971 if (entry == sections_with_aarch64_elf_section_data)
6972 sections_with_aarch64_elf_section_data = entry->next;
6981 struct bfd_link_info *info;
6984 int (*func) (void *, const char *, Elf_Internal_Sym *,
6985 asection *, struct elf_link_hash_entry *);
6986 } output_arch_syminfo;
6988 enum map_symbol_type
6995 /* Output a single mapping symbol. */
6998 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
6999 enum map_symbol_type type, bfd_vma offset)
7001 static const char *names[2] = { "$x", "$d" };
7002 Elf_Internal_Sym sym;
7004 sym.st_value = (osi->sec->output_section->vma
7005 + osi->sec->output_offset + offset);
7008 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7009 sym.st_shndx = osi->sec_shndx;
7010 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
7015 /* Output mapping symbols for PLT entries associated with H. */
7018 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
7020 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
7023 if (h->root.type == bfd_link_hash_indirect)
7026 if (h->root.type == bfd_link_hash_warning)
7027 /* When warning symbols are created, they **replace** the "real"
7028 entry in the hash table, thus we never get to see the real
7029 symbol in a hash traversal. So look at it now. */
7030 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7032 if (h->plt.offset == (bfd_vma) - 1)
7035 addr = h->plt.offset;
7038 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7045 /* Output a single local symbol for a generated stub. */
7048 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
7049 bfd_vma offset, bfd_vma size)
7051 Elf_Internal_Sym sym;
7053 sym.st_value = (osi->sec->output_section->vma
7054 + osi->sec->output_offset + offset);
7057 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7058 sym.st_shndx = osi->sec_shndx;
7059 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
7063 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
7065 struct elf_aarch64_stub_hash_entry *stub_entry;
7069 output_arch_syminfo *osi;
7071 /* Massage our args to the form they really have. */
7072 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
7073 osi = (output_arch_syminfo *) in_arg;
7075 stub_sec = stub_entry->stub_sec;
7077 /* Ensure this stub is attached to the current section being
7079 if (stub_sec != osi->sec)
7082 addr = (bfd_vma) stub_entry->stub_offset;
7084 stub_name = stub_entry->output_name;
7086 switch (stub_entry->stub_type)
7088 case aarch64_stub_adrp_branch:
7089 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7090 sizeof (aarch64_adrp_branch_stub)))
7092 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7095 case aarch64_stub_long_branch:
7096 if (!elfNN_aarch64_output_stub_sym
7097 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
7099 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7101 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
7104 case aarch64_stub_erratum_835769_veneer:
7105 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7106 sizeof (aarch64_erratum_835769_stub)))
7108 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7111 case aarch64_stub_erratum_843419_veneer:
7112 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7113 sizeof (aarch64_erratum_843419_stub)))
7115 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7126 /* Output mapping symbols for linker generated sections. */
7129 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
7130 struct bfd_link_info *info,
7132 int (*func) (void *, const char *,
7135 struct elf_link_hash_entry
7138 output_arch_syminfo osi;
7139 struct elf_aarch64_link_hash_table *htab;
7141 htab = elf_aarch64_hash_table (info);
7147 /* Long calls stubs. */
7148 if (htab->stub_bfd && htab->stub_bfd->sections)
7152 for (stub_sec = htab->stub_bfd->sections;
7153 stub_sec != NULL; stub_sec = stub_sec->next)
7155 /* Ignore non-stub sections. */
7156 if (!strstr (stub_sec->name, STUB_SUFFIX))
7161 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7162 (output_bfd, osi.sec->output_section);
7164 /* The first instruction in a stub is always a branch. */
7165 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
7168 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
7173 /* Finally, output mapping symbols for the PLT. */
7174 if (!htab->root.splt || htab->root.splt->size == 0)
7177 /* For now live without mapping symbols for the plt. */
7178 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7179 (output_bfd, htab->root.splt->output_section);
7180 osi.sec = htab->root.splt;
7182 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
7189 /* Allocate target specific section data. */
7192 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
7194 if (!sec->used_by_bfd)
7196 _aarch64_elf_section_data *sdata;
7197 bfd_size_type amt = sizeof (*sdata);
7199 sdata = bfd_zalloc (abfd, amt);
7202 sec->used_by_bfd = sdata;
7205 record_section_with_aarch64_elf_section_data (sec);
7207 return _bfd_elf_new_section_hook (abfd, sec);
7212 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
7214 void *ignore ATTRIBUTE_UNUSED)
7216 unrecord_section_with_aarch64_elf_section_data (sec);
7220 elfNN_aarch64_close_and_cleanup (bfd *abfd)
7223 bfd_map_over_sections (abfd,
7224 unrecord_section_via_map_over_sections, NULL);
7226 return _bfd_elf_close_and_cleanup (abfd);
7230 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
7233 bfd_map_over_sections (abfd,
7234 unrecord_section_via_map_over_sections, NULL);
7236 return _bfd_free_cached_info (abfd);
7239 /* Create dynamic sections. This is different from the ARM backend in that
7240 the got, plt, gotplt and their relocation sections are all created in the
7241 standard part of the bfd elf backend. */
7244 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
7245 struct bfd_link_info *info)
7247 struct elf_aarch64_link_hash_table *htab;
7249 /* We need to create .got section. */
7250 if (!aarch64_elf_create_got_section (dynobj, info))
7253 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
7256 htab = elf_aarch64_hash_table (info);
7257 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
7259 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
7261 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
7268 /* Allocate space in .plt, .got and associated reloc sections for
7272 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
7274 struct bfd_link_info *info;
7275 struct elf_aarch64_link_hash_table *htab;
7276 struct elf_aarch64_link_hash_entry *eh;
7277 struct elf_dyn_relocs *p;
7279 /* An example of a bfd_link_hash_indirect symbol is versioned
7280 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7281 -> __gxx_personality_v0(bfd_link_hash_defined)
7283 There is no need to process bfd_link_hash_indirect symbols here
7284 because we will also be presented with the concrete instance of
7285 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7286 called to copy all relevant data from the generic to the concrete
7289 if (h->root.type == bfd_link_hash_indirect)
7292 if (h->root.type == bfd_link_hash_warning)
7293 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7295 info = (struct bfd_link_info *) inf;
7296 htab = elf_aarch64_hash_table (info);
7298 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7299 here if it is defined and referenced in a non-shared object. */
7300 if (h->type == STT_GNU_IFUNC
7303 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
7305 /* Make sure this symbol is output as a dynamic symbol.
7306 Undefined weak syms won't yet be marked as dynamic. */
7307 if (h->dynindx == -1 && !h->forced_local)
7309 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7313 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
7315 asection *s = htab->root.splt;
7317 /* If this is the first .plt entry, make room for the special
7320 s->size += htab->plt_header_size;
7322 h->plt.offset = s->size;
7324 /* If this symbol is not defined in a regular file, and we are
7325 not generating a shared library, then set the symbol to this
7326 location in the .plt. This is required to make function
7327 pointers compare as equal between the normal executable and
7328 the shared library. */
7329 if (!info->shared && !h->def_regular)
7331 h->root.u.def.section = s;
7332 h->root.u.def.value = h->plt.offset;
7335 /* Make room for this entry. For now we only create the
7336 small model PLT entries. We later need to find a way
7337 of relaxing into these from the large model PLT entries. */
7338 s->size += PLT_SMALL_ENTRY_SIZE;
7340 /* We also need to make an entry in the .got.plt section, which
7341 will be placed in the .got section by the linker script. */
7342 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
7344 /* We also need to make an entry in the .rela.plt section. */
7345 htab->root.srelplt->size += RELOC_SIZE (htab);
7347 /* We need to ensure that all GOT entries that serve the PLT
7348 are consecutive with the special GOT slots [0] [1] and
7349 [2]. Any addtional relocations, such as
7350 R_AARCH64_TLSDESC, must be placed after the PLT related
7351 entries. We abuse the reloc_count such that during
7352 sizing we adjust reloc_count to indicate the number of
7353 PLT related reserved entries. In subsequent phases when
7354 filling in the contents of the reloc entries, PLT related
7355 entries are placed by computing their PLT index (0
7356 .. reloc_count). While other none PLT relocs are placed
7357 at the slot indicated by reloc_count and reloc_count is
7360 htab->root.srelplt->reloc_count++;
7364 h->plt.offset = (bfd_vma) - 1;
7370 h->plt.offset = (bfd_vma) - 1;
7374 eh = (struct elf_aarch64_link_hash_entry *) h;
7375 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7377 if (h->got.refcount > 0)
7380 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
7382 h->got.offset = (bfd_vma) - 1;
7384 dyn = htab->root.dynamic_sections_created;
7386 /* Make sure this symbol is output as a dynamic symbol.
7387 Undefined weak syms won't yet be marked as dynamic. */
7388 if (dyn && h->dynindx == -1 && !h->forced_local)
7390 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7394 if (got_type == GOT_UNKNOWN)
7397 else if (got_type == GOT_NORMAL)
7399 h->got.offset = htab->root.sgot->size;
7400 htab->root.sgot->size += GOT_ENTRY_SIZE;
7401 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7402 || h->root.type != bfd_link_hash_undefweak)
7404 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7406 htab->root.srelgot->size += RELOC_SIZE (htab);
7412 if (got_type & GOT_TLSDESC_GD)
7414 eh->tlsdesc_got_jump_table_offset =
7415 (htab->root.sgotplt->size
7416 - aarch64_compute_jump_table_size (htab));
7417 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7418 h->got.offset = (bfd_vma) - 2;
7421 if (got_type & GOT_TLS_GD)
7423 h->got.offset = htab->root.sgot->size;
7424 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7427 if (got_type & GOT_TLS_IE)
7429 h->got.offset = htab->root.sgot->size;
7430 htab->root.sgot->size += GOT_ENTRY_SIZE;
7433 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7434 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7435 || h->root.type != bfd_link_hash_undefweak)
7438 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7440 if (got_type & GOT_TLSDESC_GD)
7442 htab->root.srelplt->size += RELOC_SIZE (htab);
7443 /* Note reloc_count not incremented here! We have
7444 already adjusted reloc_count for this relocation
7447 /* TLSDESC PLT is now needed, but not yet determined. */
7448 htab->tlsdesc_plt = (bfd_vma) - 1;
7451 if (got_type & GOT_TLS_GD)
7452 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7454 if (got_type & GOT_TLS_IE)
7455 htab->root.srelgot->size += RELOC_SIZE (htab);
7461 h->got.offset = (bfd_vma) - 1;
7464 if (eh->dyn_relocs == NULL)
7467 /* In the shared -Bsymbolic case, discard space allocated for
7468 dynamic pc-relative relocs against symbols which turn out to be
7469 defined in regular objects. For the normal shared case, discard
7470 space for pc-relative relocs that have become local due to symbol
7471 visibility changes. */
7475 /* Relocs that use pc_count are those that appear on a call
7476 insn, or certain REL relocs that can generated via assembly.
7477 We want calls to protected symbols to resolve directly to the
7478 function rather than going via the plt. If people want
7479 function pointer comparisons to work as expected then they
7480 should avoid writing weird assembly. */
7481 if (SYMBOL_CALLS_LOCAL (info, h))
7483 struct elf_dyn_relocs **pp;
7485 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
7487 p->count -= p->pc_count;
7496 /* Also discard relocs on undefined weak syms with non-default
7498 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
7500 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7501 eh->dyn_relocs = NULL;
7503 /* Make sure undefined weak symbols are output as a dynamic
7505 else if (h->dynindx == -1
7507 && !bfd_elf_link_record_dynamic_symbol (info, h))
7512 else if (ELIMINATE_COPY_RELOCS)
7514 /* For the non-shared case, discard space for relocs against
7515 symbols which turn out to need copy relocs or are not
7521 || (htab->root.dynamic_sections_created
7522 && (h->root.type == bfd_link_hash_undefweak
7523 || h->root.type == bfd_link_hash_undefined))))
7525 /* Make sure this symbol is output as a dynamic symbol.
7526 Undefined weak syms won't yet be marked as dynamic. */
7527 if (h->dynindx == -1
7529 && !bfd_elf_link_record_dynamic_symbol (info, h))
7532 /* If that succeeded, we know we'll be keeping all the
7534 if (h->dynindx != -1)
7538 eh->dyn_relocs = NULL;
7543 /* Finally, allocate space. */
7544 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7548 sreloc = elf_section_data (p->sec)->sreloc;
7550 BFD_ASSERT (sreloc != NULL);
7552 sreloc->size += p->count * RELOC_SIZE (htab);
7558 /* Allocate space in .plt, .got and associated reloc sections for
7559 ifunc dynamic relocs. */
7562 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
7565 struct bfd_link_info *info;
7566 struct elf_aarch64_link_hash_table *htab;
7567 struct elf_aarch64_link_hash_entry *eh;
7569 /* An example of a bfd_link_hash_indirect symbol is versioned
7570 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7571 -> __gxx_personality_v0(bfd_link_hash_defined)
7573 There is no need to process bfd_link_hash_indirect symbols here
7574 because we will also be presented with the concrete instance of
7575 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7576 called to copy all relevant data from the generic to the concrete
7579 if (h->root.type == bfd_link_hash_indirect)
7582 if (h->root.type == bfd_link_hash_warning)
7583 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7585 info = (struct bfd_link_info *) inf;
7586 htab = elf_aarch64_hash_table (info);
7588 eh = (struct elf_aarch64_link_hash_entry *) h;
7590 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7591 here if it is defined and referenced in a non-shared object. */
7592 if (h->type == STT_GNU_IFUNC
7594 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
7596 htab->plt_entry_size,
7597 htab->plt_header_size,
7602 /* Allocate space in .plt, .got and associated reloc sections for
7603 local dynamic relocs. */
7606 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
7608 struct elf_link_hash_entry *h
7609 = (struct elf_link_hash_entry *) *slot;
7611 if (h->type != STT_GNU_IFUNC
7615 || h->root.type != bfd_link_hash_defined)
7618 return elfNN_aarch64_allocate_dynrelocs (h, inf);
7621 /* Allocate space in .plt, .got and associated reloc sections for
7622 local ifunc dynamic relocs. */
7625 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
7627 struct elf_link_hash_entry *h
7628 = (struct elf_link_hash_entry *) *slot;
7630 if (h->type != STT_GNU_IFUNC
7634 || h->root.type != bfd_link_hash_defined)
7637 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
7640 /* Find any dynamic relocs that apply to read-only sections. */
7643 aarch64_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
7645 struct elf_aarch64_link_hash_entry * eh;
7646 struct elf_dyn_relocs * p;
7648 eh = (struct elf_aarch64_link_hash_entry *) h;
7649 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7651 asection *s = p->sec;
7653 if (s != NULL && (s->flags & SEC_READONLY) != 0)
7655 struct bfd_link_info *info = (struct bfd_link_info *) inf;
7657 info->flags |= DF_TEXTREL;
7659 /* Not an error, just cut short the traversal. */
7666 /* This is the most important function of all . Innocuosly named
7669 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
7670 struct bfd_link_info *info)
7672 struct elf_aarch64_link_hash_table *htab;
7678 htab = elf_aarch64_hash_table ((info));
7679 dynobj = htab->root.dynobj;
7681 BFD_ASSERT (dynobj != NULL);
7683 if (htab->root.dynamic_sections_created)
7685 if (info->executable)
7687 s = bfd_get_linker_section (dynobj, ".interp");
7690 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7691 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7695 /* Set up .got offsets for local syms, and space for local dynamic
7697 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7699 struct elf_aarch64_local_symbol *locals = NULL;
7700 Elf_Internal_Shdr *symtab_hdr;
7704 if (!is_aarch64_elf (ibfd))
7707 for (s = ibfd->sections; s != NULL; s = s->next)
7709 struct elf_dyn_relocs *p;
7711 for (p = (struct elf_dyn_relocs *)
7712 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7714 if (!bfd_is_abs_section (p->sec)
7715 && bfd_is_abs_section (p->sec->output_section))
7717 /* Input section has been discarded, either because
7718 it is a copy of a linkonce section or due to
7719 linker script /DISCARD/, so we'll be discarding
7722 else if (p->count != 0)
7724 srel = elf_section_data (p->sec)->sreloc;
7725 srel->size += p->count * RELOC_SIZE (htab);
7726 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7727 info->flags |= DF_TEXTREL;
7732 locals = elf_aarch64_locals (ibfd);
7736 symtab_hdr = &elf_symtab_hdr (ibfd);
7737 srel = htab->root.srelgot;
7738 for (i = 0; i < symtab_hdr->sh_info; i++)
7740 locals[i].got_offset = (bfd_vma) - 1;
7741 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7742 if (locals[i].got_refcount > 0)
7744 unsigned got_type = locals[i].got_type;
7745 if (got_type & GOT_TLSDESC_GD)
7747 locals[i].tlsdesc_got_jump_table_offset =
7748 (htab->root.sgotplt->size
7749 - aarch64_compute_jump_table_size (htab));
7750 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7751 locals[i].got_offset = (bfd_vma) - 2;
7754 if (got_type & GOT_TLS_GD)
7756 locals[i].got_offset = htab->root.sgot->size;
7757 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7760 if (got_type & GOT_TLS_IE
7761 || got_type & GOT_NORMAL)
7763 locals[i].got_offset = htab->root.sgot->size;
7764 htab->root.sgot->size += GOT_ENTRY_SIZE;
7767 if (got_type == GOT_UNKNOWN)
7773 if (got_type & GOT_TLSDESC_GD)
7775 htab->root.srelplt->size += RELOC_SIZE (htab);
7776 /* Note RELOC_COUNT not incremented here! */
7777 htab->tlsdesc_plt = (bfd_vma) - 1;
7780 if (got_type & GOT_TLS_GD)
7781 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7783 if (got_type & GOT_TLS_IE
7784 || got_type & GOT_NORMAL)
7785 htab->root.srelgot->size += RELOC_SIZE (htab);
7790 locals[i].got_refcount = (bfd_vma) - 1;
7796 /* Allocate global sym .plt and .got entries, and space for global
7797 sym dynamic relocs. */
7798 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7801 /* Allocate global ifunc sym .plt and .got entries, and space for global
7802 ifunc sym dynamic relocs. */
7803 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7806 /* Allocate .plt and .got entries, and space for local symbols. */
7807 htab_traverse (htab->loc_hash_table,
7808 elfNN_aarch64_allocate_local_dynrelocs,
7811 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7812 htab_traverse (htab->loc_hash_table,
7813 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7816 /* For every jump slot reserved in the sgotplt, reloc_count is
7817 incremented. However, when we reserve space for TLS descriptors,
7818 it's not incremented, so in order to compute the space reserved
7819 for them, it suffices to multiply the reloc count by the jump
7822 if (htab->root.srelplt)
7823 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7825 if (htab->tlsdesc_plt)
7827 if (htab->root.splt->size == 0)
7828 htab->root.splt->size += PLT_ENTRY_SIZE;
7830 htab->tlsdesc_plt = htab->root.splt->size;
7831 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7833 /* If we're not using lazy TLS relocations, don't generate the
7834 GOT entry required. */
7835 if (!(info->flags & DF_BIND_NOW))
7837 htab->dt_tlsdesc_got = htab->root.sgot->size;
7838 htab->root.sgot->size += GOT_ENTRY_SIZE;
7842 /* Init mapping symbols information to use later to distingush between
7843 code and data while scanning for errata. */
7844 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
7845 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7847 if (!is_aarch64_elf (ibfd))
7849 bfd_elfNN_aarch64_init_maps (ibfd);
7852 /* We now have determined the sizes of the various dynamic sections.
7853 Allocate memory for them. */
7855 for (s = dynobj->sections; s != NULL; s = s->next)
7857 if ((s->flags & SEC_LINKER_CREATED) == 0)
7860 if (s == htab->root.splt
7861 || s == htab->root.sgot
7862 || s == htab->root.sgotplt
7863 || s == htab->root.iplt
7864 || s == htab->root.igotplt || s == htab->sdynbss)
7866 /* Strip this section if we don't need it; see the
7869 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7871 if (s->size != 0 && s != htab->root.srelplt)
7874 /* We use the reloc_count field as a counter if we need
7875 to copy relocs into the output file. */
7876 if (s != htab->root.srelplt)
7881 /* It's not one of our sections, so don't allocate space. */
7887 /* If we don't need this section, strip it from the
7888 output file. This is mostly to handle .rela.bss and
7889 .rela.plt. We must create both sections in
7890 create_dynamic_sections, because they must be created
7891 before the linker maps input sections to output
7892 sections. The linker does that before
7893 adjust_dynamic_symbol is called, and it is that
7894 function which decides whether anything needs to go
7895 into these sections. */
7897 s->flags |= SEC_EXCLUDE;
7901 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7904 /* Allocate memory for the section contents. We use bfd_zalloc
7905 here in case unused entries are not reclaimed before the
7906 section's contents are written out. This should not happen,
7907 but this way if it does, we get a R_AARCH64_NONE reloc instead
7909 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7910 if (s->contents == NULL)
7914 if (htab->root.dynamic_sections_created)
7916 /* Add some entries to the .dynamic section. We fill in the
7917 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7918 must add the entries now so that we get the correct size for
7919 the .dynamic section. The DT_DEBUG entry is filled in by the
7920 dynamic linker and used by the debugger. */
7921 #define add_dynamic_entry(TAG, VAL) \
7922 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7924 if (info->executable)
7926 if (!add_dynamic_entry (DT_DEBUG, 0))
7930 if (htab->root.splt->size != 0)
7932 if (!add_dynamic_entry (DT_PLTGOT, 0)
7933 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7934 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7935 || !add_dynamic_entry (DT_JMPREL, 0))
7938 if (htab->tlsdesc_plt
7939 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7940 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7946 if (!add_dynamic_entry (DT_RELA, 0)
7947 || !add_dynamic_entry (DT_RELASZ, 0)
7948 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7951 /* If any dynamic relocs apply to a read-only section,
7952 then we need a DT_TEXTREL entry. */
7953 if ((info->flags & DF_TEXTREL) == 0)
7954 elf_link_hash_traverse (& htab->root, aarch64_readonly_dynrelocs,
7957 if ((info->flags & DF_TEXTREL) != 0)
7959 if (!add_dynamic_entry (DT_TEXTREL, 0))
7964 #undef add_dynamic_entry
7970 elf_aarch64_update_plt_entry (bfd *output_bfd,
7971 bfd_reloc_code_real_type r_type,
7972 bfd_byte *plt_entry, bfd_vma value)
7974 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
7976 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
7980 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
7981 struct elf_aarch64_link_hash_table
7982 *htab, bfd *output_bfd,
7983 struct bfd_link_info *info)
7985 bfd_byte *plt_entry;
7988 bfd_vma gotplt_entry_address;
7989 bfd_vma plt_entry_address;
7990 Elf_Internal_Rela rela;
7992 asection *plt, *gotplt, *relplt;
7994 /* When building a static executable, use .iplt, .igot.plt and
7995 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7996 if (htab->root.splt != NULL)
7998 plt = htab->root.splt;
7999 gotplt = htab->root.sgotplt;
8000 relplt = htab->root.srelplt;
8004 plt = htab->root.iplt;
8005 gotplt = htab->root.igotplt;
8006 relplt = htab->root.irelplt;
8009 /* Get the index in the procedure linkage table which
8010 corresponds to this symbol. This is the index of this symbol
8011 in all the symbols for which we are making plt entries. The
8012 first entry in the procedure linkage table is reserved.
8014 Get the offset into the .got table of the entry that
8015 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
8016 bytes. The first three are reserved for the dynamic linker.
8018 For static executables, we don't reserve anything. */
8020 if (plt == htab->root.splt)
8022 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
8023 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
8027 plt_index = h->plt.offset / htab->plt_entry_size;
8028 got_offset = plt_index * GOT_ENTRY_SIZE;
8031 plt_entry = plt->contents + h->plt.offset;
8032 plt_entry_address = plt->output_section->vma
8033 + plt->output_offset + h->plt.offset;
8034 gotplt_entry_address = gotplt->output_section->vma +
8035 gotplt->output_offset + got_offset;
8037 /* Copy in the boiler-plate for the PLTn entry. */
8038 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
8040 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8041 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8042 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8044 PG (gotplt_entry_address) -
8045 PG (plt_entry_address));
8047 /* Fill in the lo12 bits for the load from the pltgot. */
8048 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8050 PG_OFFSET (gotplt_entry_address));
8052 /* Fill in the lo12 bits for the add from the pltgot entry. */
8053 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8055 PG_OFFSET (gotplt_entry_address));
8057 /* All the GOTPLT Entries are essentially initialized to PLT0. */
8058 bfd_put_NN (output_bfd,
8059 plt->output_section->vma + plt->output_offset,
8060 gotplt->contents + got_offset);
8062 rela.r_offset = gotplt_entry_address;
8064 if (h->dynindx == -1
8065 || ((info->executable
8066 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
8068 && h->type == STT_GNU_IFUNC))
8070 /* If an STT_GNU_IFUNC symbol is locally defined, generate
8071 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
8072 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
8073 rela.r_addend = (h->root.u.def.value
8074 + h->root.u.def.section->output_section->vma
8075 + h->root.u.def.section->output_offset);
8079 /* Fill in the entry in the .rela.plt section. */
8080 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
8084 /* Compute the relocation entry to used based on PLT index and do
8085 not adjust reloc_count. The reloc_count has already been adjusted
8086 to account for this entry. */
8087 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
8088 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8091 /* Size sections even though they're not dynamic. We use it to setup
8092 _TLS_MODULE_BASE_, if needed. */
8095 elfNN_aarch64_always_size_sections (bfd *output_bfd,
8096 struct bfd_link_info *info)
8100 if (info->relocatable)
8103 tls_sec = elf_hash_table (info)->tls_sec;
8107 struct elf_link_hash_entry *tlsbase;
8109 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
8110 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
8114 struct bfd_link_hash_entry *h = NULL;
8115 const struct elf_backend_data *bed =
8116 get_elf_backend_data (output_bfd);
8118 if (!(_bfd_generic_link_add_one_symbol
8119 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
8120 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
8123 tlsbase->type = STT_TLS;
8124 tlsbase = (struct elf_link_hash_entry *) h;
8125 tlsbase->def_regular = 1;
8126 tlsbase->other = STV_HIDDEN;
8127 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
8134 /* Finish up dynamic symbol handling. We set the contents of various
8135 dynamic sections here. */
8137 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
8138 struct bfd_link_info *info,
8139 struct elf_link_hash_entry *h,
8140 Elf_Internal_Sym *sym)
8142 struct elf_aarch64_link_hash_table *htab;
8143 htab = elf_aarch64_hash_table (info);
8145 if (h->plt.offset != (bfd_vma) - 1)
8147 asection *plt, *gotplt, *relplt;
8149 /* This symbol has an entry in the procedure linkage table. Set
8152 /* When building a static executable, use .iplt, .igot.plt and
8153 .rela.iplt sections for STT_GNU_IFUNC symbols. */
8154 if (htab->root.splt != NULL)
8156 plt = htab->root.splt;
8157 gotplt = htab->root.sgotplt;
8158 relplt = htab->root.srelplt;
8162 plt = htab->root.iplt;
8163 gotplt = htab->root.igotplt;
8164 relplt = htab->root.irelplt;
8167 /* This symbol has an entry in the procedure linkage table. Set
8169 if ((h->dynindx == -1
8170 && !((h->forced_local || info->executable)
8172 && h->type == STT_GNU_IFUNC))
8178 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
8179 if (!h->def_regular)
8181 /* Mark the symbol as undefined, rather than as defined in
8182 the .plt section. */
8183 sym->st_shndx = SHN_UNDEF;
8184 /* If the symbol is weak we need to clear the value.
8185 Otherwise, the PLT entry would provide a definition for
8186 the symbol even if the symbol wasn't defined anywhere,
8187 and so the symbol would never be NULL. Leave the value if
8188 there were any relocations where pointer equality matters
8189 (this is a clue for the dynamic linker, to make function
8190 pointer comparisons work between an application and shared
8192 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
8197 if (h->got.offset != (bfd_vma) - 1
8198 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
8200 Elf_Internal_Rela rela;
8203 /* This symbol has an entry in the global offset table. Set it
8205 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
8208 rela.r_offset = (htab->root.sgot->output_section->vma
8209 + htab->root.sgot->output_offset
8210 + (h->got.offset & ~(bfd_vma) 1));
8213 && h->type == STT_GNU_IFUNC)
8217 /* Generate R_AARCH64_GLOB_DAT. */
8224 if (!h->pointer_equality_needed)
8227 /* For non-shared object, we can't use .got.plt, which
8228 contains the real function address if we need pointer
8229 equality. We load the GOT entry with the PLT entry. */
8230 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
8231 bfd_put_NN (output_bfd, (plt->output_section->vma
8232 + plt->output_offset
8234 htab->root.sgot->contents
8235 + (h->got.offset & ~(bfd_vma) 1));
8239 else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
8241 if (!h->def_regular)
8244 BFD_ASSERT ((h->got.offset & 1) != 0);
8245 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
8246 rela.r_addend = (h->root.u.def.value
8247 + h->root.u.def.section->output_section->vma
8248 + h->root.u.def.section->output_offset);
8253 BFD_ASSERT ((h->got.offset & 1) == 0);
8254 bfd_put_NN (output_bfd, (bfd_vma) 0,
8255 htab->root.sgot->contents + h->got.offset);
8256 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
8260 loc = htab->root.srelgot->contents;
8261 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
8262 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8267 Elf_Internal_Rela rela;
8270 /* This symbol needs a copy reloc. Set it up. */
8272 if (h->dynindx == -1
8273 || (h->root.type != bfd_link_hash_defined
8274 && h->root.type != bfd_link_hash_defweak)
8275 || htab->srelbss == NULL)
8278 rela.r_offset = (h->root.u.def.value
8279 + h->root.u.def.section->output_section->vma
8280 + h->root.u.def.section->output_offset);
8281 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
8283 loc = htab->srelbss->contents;
8284 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
8285 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8288 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
8289 be NULL for local symbols. */
8291 && (h == elf_hash_table (info)->hdynamic
8292 || h == elf_hash_table (info)->hgot))
8293 sym->st_shndx = SHN_ABS;
8298 /* Finish up local dynamic symbol handling. We set the contents of
8299 various dynamic sections here. */
8302 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
8304 struct elf_link_hash_entry *h
8305 = (struct elf_link_hash_entry *) *slot;
8306 struct bfd_link_info *info
8307 = (struct bfd_link_info *) inf;
8309 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
8314 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
8315 struct elf_aarch64_link_hash_table
8318 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
8319 small and large plts and at the minute just generates
8322 /* PLT0 of the small PLT looks like this in ELF64 -
8323 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
8324 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
8325 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
8327 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
8328 // GOTPLT entry for this.
8330 PLT0 will be slightly different in ELF32 due to different got entry
8333 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
8337 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
8339 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
8342 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
8343 + htab->root.sgotplt->output_offset
8344 + GOT_ENTRY_SIZE * 2);
8346 plt_base = htab->root.splt->output_section->vma +
8347 htab->root.splt->output_offset;
8349 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8350 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8351 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8352 htab->root.splt->contents + 4,
8353 PG (plt_got_2nd_ent) - PG (plt_base + 4));
8355 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8356 htab->root.splt->contents + 8,
8357 PG_OFFSET (plt_got_2nd_ent));
8359 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8360 htab->root.splt->contents + 12,
8361 PG_OFFSET (plt_got_2nd_ent));
8365 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
8366 struct bfd_link_info *info)
8368 struct elf_aarch64_link_hash_table *htab;
8372 htab = elf_aarch64_hash_table (info);
8373 dynobj = htab->root.dynobj;
8374 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
8376 if (htab->root.dynamic_sections_created)
8378 ElfNN_External_Dyn *dyncon, *dynconend;
8380 if (sdyn == NULL || htab->root.sgot == NULL)
8383 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
8384 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
8385 for (; dyncon < dynconend; dyncon++)
8387 Elf_Internal_Dyn dyn;
8390 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
8398 s = htab->root.sgotplt;
8399 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
8403 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
8407 s = htab->root.srelplt;
8408 dyn.d_un.d_val = s->size;
8412 /* The procedure linkage table relocs (DT_JMPREL) should
8413 not be included in the overall relocs (DT_RELA).
8414 Therefore, we override the DT_RELASZ entry here to
8415 make it not include the JMPREL relocs. Since the
8416 linker script arranges for .rela.plt to follow all
8417 other relocation sections, we don't have to worry
8418 about changing the DT_RELA entry. */
8419 if (htab->root.srelplt != NULL)
8421 s = htab->root.srelplt;
8422 dyn.d_un.d_val -= s->size;
8426 case DT_TLSDESC_PLT:
8427 s = htab->root.splt;
8428 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8429 + htab->tlsdesc_plt;
8432 case DT_TLSDESC_GOT:
8433 s = htab->root.sgot;
8434 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8435 + htab->dt_tlsdesc_got;
8439 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
8444 /* Fill in the special first entry in the procedure linkage table. */
8445 if (htab->root.splt && htab->root.splt->size > 0)
8447 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
8449 elf_section_data (htab->root.splt->output_section)->
8450 this_hdr.sh_entsize = htab->plt_entry_size;
8453 if (htab->tlsdesc_plt)
8455 bfd_put_NN (output_bfd, (bfd_vma) 0,
8456 htab->root.sgot->contents + htab->dt_tlsdesc_got);
8458 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
8459 elfNN_aarch64_tlsdesc_small_plt_entry,
8460 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
8463 bfd_vma adrp1_addr =
8464 htab->root.splt->output_section->vma
8465 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
8467 bfd_vma adrp2_addr = adrp1_addr + 4;
8470 htab->root.sgot->output_section->vma
8471 + htab->root.sgot->output_offset;
8473 bfd_vma pltgot_addr =
8474 htab->root.sgotplt->output_section->vma
8475 + htab->root.sgotplt->output_offset;
8477 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
8479 bfd_byte *plt_entry =
8480 htab->root.splt->contents + htab->tlsdesc_plt;
8482 /* adrp x2, DT_TLSDESC_GOT */
8483 elf_aarch64_update_plt_entry (output_bfd,
8484 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8486 (PG (dt_tlsdesc_got)
8487 - PG (adrp1_addr)));
8490 elf_aarch64_update_plt_entry (output_bfd,
8491 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8494 - PG (adrp2_addr)));
8496 /* ldr x2, [x2, #0] */
8497 elf_aarch64_update_plt_entry (output_bfd,
8498 BFD_RELOC_AARCH64_LDSTNN_LO12,
8500 PG_OFFSET (dt_tlsdesc_got));
8503 elf_aarch64_update_plt_entry (output_bfd,
8504 BFD_RELOC_AARCH64_ADD_LO12,
8506 PG_OFFSET (pltgot_addr));
8511 if (htab->root.sgotplt)
8513 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
8515 (*_bfd_error_handler)
8516 (_("discarded output section: `%A'"), htab->root.sgotplt);
8520 /* Fill in the first three entries in the global offset table. */
8521 if (htab->root.sgotplt->size > 0)
8523 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
8525 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
8526 bfd_put_NN (output_bfd,
8528 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
8529 bfd_put_NN (output_bfd,
8531 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
8534 if (htab->root.sgot)
8536 if (htab->root.sgot->size > 0)
8539 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
8540 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
8544 elf_section_data (htab->root.sgotplt->output_section)->
8545 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
8548 if (htab->root.sgot && htab->root.sgot->size > 0)
8549 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
8552 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
8553 htab_traverse (htab->loc_hash_table,
8554 elfNN_aarch64_finish_local_dynamic_symbol,
8560 /* Return address for Ith PLT stub in section PLT, for relocation REL
8561 or (bfd_vma) -1 if it should not be included. */
8564 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
8565 const arelent *rel ATTRIBUTE_UNUSED)
8567 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
8571 /* We use this so we can override certain functions
8572 (though currently we don't). */
8574 const struct elf_size_info elfNN_aarch64_size_info =
8576 sizeof (ElfNN_External_Ehdr),
8577 sizeof (ElfNN_External_Phdr),
8578 sizeof (ElfNN_External_Shdr),
8579 sizeof (ElfNN_External_Rel),
8580 sizeof (ElfNN_External_Rela),
8581 sizeof (ElfNN_External_Sym),
8582 sizeof (ElfNN_External_Dyn),
8583 sizeof (Elf_External_Note),
8584 4, /* Hash table entry size. */
8585 1, /* Internal relocs per external relocs. */
8586 ARCH_SIZE, /* Arch size. */
8587 LOG_FILE_ALIGN, /* Log_file_align. */
8588 ELFCLASSNN, EV_CURRENT,
8589 bfd_elfNN_write_out_phdrs,
8590 bfd_elfNN_write_shdrs_and_ehdr,
8591 bfd_elfNN_checksum_contents,
8592 bfd_elfNN_write_relocs,
8593 bfd_elfNN_swap_symbol_in,
8594 bfd_elfNN_swap_symbol_out,
8595 bfd_elfNN_slurp_reloc_table,
8596 bfd_elfNN_slurp_symbol_table,
8597 bfd_elfNN_swap_dyn_in,
8598 bfd_elfNN_swap_dyn_out,
8599 bfd_elfNN_swap_reloc_in,
8600 bfd_elfNN_swap_reloc_out,
8601 bfd_elfNN_swap_reloca_in,
8602 bfd_elfNN_swap_reloca_out
8605 #define ELF_ARCH bfd_arch_aarch64
8606 #define ELF_MACHINE_CODE EM_AARCH64
8607 #define ELF_MAXPAGESIZE 0x10000
8608 #define ELF_MINPAGESIZE 0x1000
8609 #define ELF_COMMONPAGESIZE 0x1000
8611 #define bfd_elfNN_close_and_cleanup \
8612 elfNN_aarch64_close_and_cleanup
8614 #define bfd_elfNN_bfd_free_cached_info \
8615 elfNN_aarch64_bfd_free_cached_info
8617 #define bfd_elfNN_bfd_is_target_special_symbol \
8618 elfNN_aarch64_is_target_special_symbol
8620 #define bfd_elfNN_bfd_link_hash_table_create \
8621 elfNN_aarch64_link_hash_table_create
8623 #define bfd_elfNN_bfd_merge_private_bfd_data \
8624 elfNN_aarch64_merge_private_bfd_data
8626 #define bfd_elfNN_bfd_print_private_bfd_data \
8627 elfNN_aarch64_print_private_bfd_data
8629 #define bfd_elfNN_bfd_reloc_type_lookup \
8630 elfNN_aarch64_reloc_type_lookup
8632 #define bfd_elfNN_bfd_reloc_name_lookup \
8633 elfNN_aarch64_reloc_name_lookup
8635 #define bfd_elfNN_bfd_set_private_flags \
8636 elfNN_aarch64_set_private_flags
8638 #define bfd_elfNN_find_inliner_info \
8639 elfNN_aarch64_find_inliner_info
8641 #define bfd_elfNN_find_nearest_line \
8642 elfNN_aarch64_find_nearest_line
8644 #define bfd_elfNN_mkobject \
8645 elfNN_aarch64_mkobject
8647 #define bfd_elfNN_new_section_hook \
8648 elfNN_aarch64_new_section_hook
8650 #define elf_backend_adjust_dynamic_symbol \
8651 elfNN_aarch64_adjust_dynamic_symbol
8653 #define elf_backend_always_size_sections \
8654 elfNN_aarch64_always_size_sections
8656 #define elf_backend_check_relocs \
8657 elfNN_aarch64_check_relocs
8659 #define elf_backend_copy_indirect_symbol \
8660 elfNN_aarch64_copy_indirect_symbol
8662 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
8663 to them in our hash. */
8664 #define elf_backend_create_dynamic_sections \
8665 elfNN_aarch64_create_dynamic_sections
8667 #define elf_backend_init_index_section \
8668 _bfd_elf_init_2_index_sections
8670 #define elf_backend_finish_dynamic_sections \
8671 elfNN_aarch64_finish_dynamic_sections
8673 #define elf_backend_finish_dynamic_symbol \
8674 elfNN_aarch64_finish_dynamic_symbol
8676 #define elf_backend_gc_sweep_hook \
8677 elfNN_aarch64_gc_sweep_hook
8679 #define elf_backend_object_p \
8680 elfNN_aarch64_object_p
8682 #define elf_backend_output_arch_local_syms \
8683 elfNN_aarch64_output_arch_local_syms
8685 #define elf_backend_plt_sym_val \
8686 elfNN_aarch64_plt_sym_val
8688 #define elf_backend_post_process_headers \
8689 elfNN_aarch64_post_process_headers
8691 #define elf_backend_relocate_section \
8692 elfNN_aarch64_relocate_section
8694 #define elf_backend_reloc_type_class \
8695 elfNN_aarch64_reloc_type_class
8697 #define elf_backend_section_from_shdr \
8698 elfNN_aarch64_section_from_shdr
8700 #define elf_backend_size_dynamic_sections \
8701 elfNN_aarch64_size_dynamic_sections
8703 #define elf_backend_size_info \
8704 elfNN_aarch64_size_info
8706 #define elf_backend_write_section \
8707 elfNN_aarch64_write_section
8709 #define elf_backend_can_refcount 1
8710 #define elf_backend_can_gc_sections 1
8711 #define elf_backend_plt_readonly 1
8712 #define elf_backend_want_got_plt 1
8713 #define elf_backend_want_plt_sym 0
8714 #define elf_backend_may_use_rel_p 0
8715 #define elf_backend_may_use_rela_p 1
8716 #define elf_backend_default_use_rela_p 1
8717 #define elf_backend_rela_normal 1
8718 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8719 #define elf_backend_default_execstack 0
8720 #define elf_backend_extern_protected_data 1
8722 #undef elf_backend_obj_attrs_section
8723 #define elf_backend_obj_attrs_section ".ARM.attributes"
8725 #include "elfNN-target.h"