1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 /* Notes on implementation:
23 Thread Local Store (TLS)
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
95 elfNN_aarch64_check_relocs()
97 This function is invoked for each relocation.
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
107 elfNN_aarch64_allocate_dynrelocs ()
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
115 elfNN_aarch64_size_dynamic_sections ()
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
122 elfNN_aarch64_relocate_section ()
124 Calls elfNN_aarch64_final_link_relocate ()
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
134 elfNN_aarch64_final_link_relocate ()
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
140 #include "libiberty.h"
142 #include "bfd_stdint.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
188 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
190 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
191 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC)
204 #define ELIMINATE_COPY_RELOCS 0
206 /* Return size of a relocation entry. HTAB is the bfd's
207 elf_aarch64_link_hash_entry. */
208 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
210 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
211 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
212 #define PLT_ENTRY_SIZE (32)
213 #define PLT_SMALL_ENTRY_SIZE (16)
214 #define PLT_TLSDESC_ENTRY_SIZE (32)
216 /* Encoding of the nop instruction */
217 #define INSN_NOP 0xd503201f
219 #define aarch64_compute_jump_table_size(htab) \
220 (((htab)->root.srelplt == NULL) ? 0 \
221 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
223 /* The first entry in a procedure linkage table looks like this
224 if the distance between the PLTGOT and the PLT is < 4GB use
225 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
226 in x16 and needs to work out PLTGOT[1] by using an address of
227 [x16,#-GOT_ENTRY_SIZE]. */
228 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
230 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
231 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
233 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
234 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
236 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
237 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
239 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
240 0x1f, 0x20, 0x03, 0xd5, /* nop */
241 0x1f, 0x20, 0x03, 0xd5, /* nop */
242 0x1f, 0x20, 0x03, 0xd5, /* nop */
245 /* Per function entry in a procedure linkage table looks like this
246 if the distance between the PLTGOT and the PLT is < 4GB use
247 these PLT entries. */
248 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
250 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
252 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
253 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
255 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
256 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
258 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
261 static const bfd_byte
262 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
264 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
265 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
266 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
268 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
269 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
271 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
272 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
274 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
275 0x1f, 0x20, 0x03, 0xd5, /* nop */
276 0x1f, 0x20, 0x03, 0xd5, /* nop */
279 #define elf_info_to_howto elfNN_aarch64_info_to_howto
280 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
282 #define AARCH64_ELF_ABI_VERSION 0
284 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
285 #define ALL_ONES (~ (bfd_vma) 0)
287 /* Indexed by the bfd interal reloc enumerators.
288 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
291 static reloc_howto_type elfNN_aarch64_howto_table[] =
295 /* Basic data relocations. */
298 HOWTO (R_AARCH64_NULL, /* type */
300 3, /* size (0 = byte, 1 = short, 2 = long) */
302 FALSE, /* pc_relative */
304 complain_overflow_dont, /* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_AARCH64_NULL", /* name */
307 FALSE, /* partial_inplace */
310 FALSE), /* pcrel_offset */
312 HOWTO (R_AARCH64_NONE, /* type */
314 3, /* size (0 = byte, 1 = short, 2 = long) */
316 FALSE, /* pc_relative */
318 complain_overflow_dont, /* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_AARCH64_NONE", /* name */
321 FALSE, /* partial_inplace */
324 FALSE), /* pcrel_offset */
328 HOWTO64 (AARCH64_R (ABS64), /* type */
330 4, /* size (4 = long long) */
332 FALSE, /* pc_relative */
334 complain_overflow_unsigned, /* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 AARCH64_R_STR (ABS64), /* name */
337 FALSE, /* partial_inplace */
338 ALL_ONES, /* src_mask */
339 ALL_ONES, /* dst_mask */
340 FALSE), /* pcrel_offset */
343 HOWTO (AARCH64_R (ABS32), /* type */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
347 FALSE, /* pc_relative */
349 complain_overflow_unsigned, /* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 AARCH64_R_STR (ABS32), /* name */
352 FALSE, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 FALSE), /* pcrel_offset */
358 HOWTO (AARCH64_R (ABS16), /* type */
360 1, /* size (0 = byte, 1 = short, 2 = long) */
362 FALSE, /* pc_relative */
364 complain_overflow_unsigned, /* complain_on_overflow */
365 bfd_elf_generic_reloc, /* special_function */
366 AARCH64_R_STR (ABS16), /* name */
367 FALSE, /* partial_inplace */
368 0xffff, /* src_mask */
369 0xffff, /* dst_mask */
370 FALSE), /* pcrel_offset */
372 /* .xword: (S+A-P) */
373 HOWTO64 (AARCH64_R (PREL64), /* type */
375 4, /* size (4 = long long) */
377 TRUE, /* pc_relative */
379 complain_overflow_signed, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 AARCH64_R_STR (PREL64), /* name */
382 FALSE, /* partial_inplace */
383 ALL_ONES, /* src_mask */
384 ALL_ONES, /* dst_mask */
385 TRUE), /* pcrel_offset */
388 HOWTO (AARCH64_R (PREL32), /* type */
390 2, /* size (0 = byte, 1 = short, 2 = long) */
392 TRUE, /* pc_relative */
394 complain_overflow_signed, /* complain_on_overflow */
395 bfd_elf_generic_reloc, /* special_function */
396 AARCH64_R_STR (PREL32), /* name */
397 FALSE, /* partial_inplace */
398 0xffffffff, /* src_mask */
399 0xffffffff, /* dst_mask */
400 TRUE), /* pcrel_offset */
403 HOWTO (AARCH64_R (PREL16), /* type */
405 1, /* size (0 = byte, 1 = short, 2 = long) */
407 TRUE, /* pc_relative */
409 complain_overflow_signed, /* complain_on_overflow */
410 bfd_elf_generic_reloc, /* special_function */
411 AARCH64_R_STR (PREL16), /* name */
412 FALSE, /* partial_inplace */
413 0xffff, /* src_mask */
414 0xffff, /* dst_mask */
415 TRUE), /* pcrel_offset */
417 /* Group relocations to create a 16, 32, 48 or 64 bit
418 unsigned data or abs address inline. */
420 /* MOVZ: ((S+A) >> 0) & 0xffff */
421 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
423 2, /* size (0 = byte, 1 = short, 2 = long) */
425 FALSE, /* pc_relative */
427 complain_overflow_unsigned, /* complain_on_overflow */
428 bfd_elf_generic_reloc, /* special_function */
429 AARCH64_R_STR (MOVW_UABS_G0), /* name */
430 FALSE, /* partial_inplace */
431 0xffff, /* src_mask */
432 0xffff, /* dst_mask */
433 FALSE), /* pcrel_offset */
435 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
436 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
438 2, /* size (0 = byte, 1 = short, 2 = long) */
440 FALSE, /* pc_relative */
442 complain_overflow_dont, /* complain_on_overflow */
443 bfd_elf_generic_reloc, /* special_function */
444 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
445 FALSE, /* partial_inplace */
446 0xffff, /* src_mask */
447 0xffff, /* dst_mask */
448 FALSE), /* pcrel_offset */
450 /* MOVZ: ((S+A) >> 16) & 0xffff */
451 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
453 2, /* size (0 = byte, 1 = short, 2 = long) */
455 FALSE, /* pc_relative */
457 complain_overflow_unsigned, /* complain_on_overflow */
458 bfd_elf_generic_reloc, /* special_function */
459 AARCH64_R_STR (MOVW_UABS_G1), /* name */
460 FALSE, /* partial_inplace */
461 0xffff, /* src_mask */
462 0xffff, /* dst_mask */
463 FALSE), /* pcrel_offset */
465 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
466 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
470 FALSE, /* pc_relative */
472 complain_overflow_dont, /* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
475 FALSE, /* partial_inplace */
476 0xffff, /* src_mask */
477 0xffff, /* dst_mask */
478 FALSE), /* pcrel_offset */
480 /* MOVZ: ((S+A) >> 32) & 0xffff */
481 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
483 2, /* size (0 = byte, 1 = short, 2 = long) */
485 FALSE, /* pc_relative */
487 complain_overflow_unsigned, /* complain_on_overflow */
488 bfd_elf_generic_reloc, /* special_function */
489 AARCH64_R_STR (MOVW_UABS_G2), /* name */
490 FALSE, /* partial_inplace */
491 0xffff, /* src_mask */
492 0xffff, /* dst_mask */
493 FALSE), /* pcrel_offset */
495 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
496 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
500 FALSE, /* pc_relative */
502 complain_overflow_dont, /* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
505 FALSE, /* partial_inplace */
506 0xffff, /* src_mask */
507 0xffff, /* dst_mask */
508 FALSE), /* pcrel_offset */
510 /* MOVZ: ((S+A) >> 48) & 0xffff */
511 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
513 2, /* size (0 = byte, 1 = short, 2 = long) */
515 FALSE, /* pc_relative */
517 complain_overflow_unsigned, /* complain_on_overflow */
518 bfd_elf_generic_reloc, /* special_function */
519 AARCH64_R_STR (MOVW_UABS_G3), /* name */
520 FALSE, /* partial_inplace */
521 0xffff, /* src_mask */
522 0xffff, /* dst_mask */
523 FALSE), /* pcrel_offset */
525 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
526 signed data or abs address inline. Will change instruction
527 to MOVN or MOVZ depending on sign of calculated value. */
529 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
530 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
532 2, /* size (0 = byte, 1 = short, 2 = long) */
534 FALSE, /* pc_relative */
536 complain_overflow_signed, /* complain_on_overflow */
537 bfd_elf_generic_reloc, /* special_function */
538 AARCH64_R_STR (MOVW_SABS_G0), /* name */
539 FALSE, /* partial_inplace */
540 0xffff, /* src_mask */
541 0xffff, /* dst_mask */
542 FALSE), /* pcrel_offset */
544 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
545 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
547 2, /* size (0 = byte, 1 = short, 2 = long) */
549 FALSE, /* pc_relative */
551 complain_overflow_signed, /* complain_on_overflow */
552 bfd_elf_generic_reloc, /* special_function */
553 AARCH64_R_STR (MOVW_SABS_G1), /* name */
554 FALSE, /* partial_inplace */
555 0xffff, /* src_mask */
556 0xffff, /* dst_mask */
557 FALSE), /* pcrel_offset */
559 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
560 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
562 2, /* size (0 = byte, 1 = short, 2 = long) */
564 FALSE, /* pc_relative */
566 complain_overflow_signed, /* complain_on_overflow */
567 bfd_elf_generic_reloc, /* special_function */
568 AARCH64_R_STR (MOVW_SABS_G2), /* name */
569 FALSE, /* partial_inplace */
570 0xffff, /* src_mask */
571 0xffff, /* dst_mask */
572 FALSE), /* pcrel_offset */
574 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
575 addresses: PG(x) is (x & ~0xfff). */
577 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
578 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
582 TRUE, /* pc_relative */
584 complain_overflow_signed, /* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 AARCH64_R_STR (LD_PREL_LO19), /* name */
587 FALSE, /* partial_inplace */
588 0x7ffff, /* src_mask */
589 0x7ffff, /* dst_mask */
590 TRUE), /* pcrel_offset */
592 /* ADR: (S+A-P) & 0x1fffff */
593 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
595 2, /* size (0 = byte, 1 = short, 2 = long) */
597 TRUE, /* pc_relative */
599 complain_overflow_signed, /* complain_on_overflow */
600 bfd_elf_generic_reloc, /* special_function */
601 AARCH64_R_STR (ADR_PREL_LO21), /* name */
602 FALSE, /* partial_inplace */
603 0x1fffff, /* src_mask */
604 0x1fffff, /* dst_mask */
605 TRUE), /* pcrel_offset */
607 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
608 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
612 TRUE, /* pc_relative */
614 complain_overflow_signed, /* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
617 FALSE, /* partial_inplace */
618 0x1fffff, /* src_mask */
619 0x1fffff, /* dst_mask */
620 TRUE), /* pcrel_offset */
622 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
623 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
625 2, /* size (0 = byte, 1 = short, 2 = long) */
627 TRUE, /* pc_relative */
629 complain_overflow_dont, /* complain_on_overflow */
630 bfd_elf_generic_reloc, /* special_function */
631 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
632 FALSE, /* partial_inplace */
633 0x1fffff, /* src_mask */
634 0x1fffff, /* dst_mask */
635 TRUE), /* pcrel_offset */
637 /* ADD: (S+A) & 0xfff [no overflow check] */
638 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
642 FALSE, /* pc_relative */
644 complain_overflow_dont, /* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
647 FALSE, /* partial_inplace */
648 0x3ffc00, /* src_mask */
649 0x3ffc00, /* dst_mask */
650 FALSE), /* pcrel_offset */
652 /* LD/ST8: (S+A) & 0xfff */
653 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
657 FALSE, /* pc_relative */
659 complain_overflow_dont, /* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
662 FALSE, /* partial_inplace */
663 0xfff, /* src_mask */
664 0xfff, /* dst_mask */
665 FALSE), /* pcrel_offset */
667 /* Relocations for control-flow instructions. */
669 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
670 HOWTO (AARCH64_R (TSTBR14), /* type */
672 2, /* size (0 = byte, 1 = short, 2 = long) */
674 TRUE, /* pc_relative */
676 complain_overflow_signed, /* complain_on_overflow */
677 bfd_elf_generic_reloc, /* special_function */
678 AARCH64_R_STR (TSTBR14), /* name */
679 FALSE, /* partial_inplace */
680 0x3fff, /* src_mask */
681 0x3fff, /* dst_mask */
682 TRUE), /* pcrel_offset */
684 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
685 HOWTO (AARCH64_R (CONDBR19), /* type */
687 2, /* size (0 = byte, 1 = short, 2 = long) */
689 TRUE, /* pc_relative */
691 complain_overflow_signed, /* complain_on_overflow */
692 bfd_elf_generic_reloc, /* special_function */
693 AARCH64_R_STR (CONDBR19), /* name */
694 FALSE, /* partial_inplace */
695 0x7ffff, /* src_mask */
696 0x7ffff, /* dst_mask */
697 TRUE), /* pcrel_offset */
699 /* B: ((S+A-P) >> 2) & 0x3ffffff */
700 HOWTO (AARCH64_R (JUMP26), /* type */
702 2, /* size (0 = byte, 1 = short, 2 = long) */
704 TRUE, /* pc_relative */
706 complain_overflow_signed, /* complain_on_overflow */
707 bfd_elf_generic_reloc, /* special_function */
708 AARCH64_R_STR (JUMP26), /* name */
709 FALSE, /* partial_inplace */
710 0x3ffffff, /* src_mask */
711 0x3ffffff, /* dst_mask */
712 TRUE), /* pcrel_offset */
714 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
715 HOWTO (AARCH64_R (CALL26), /* type */
717 2, /* size (0 = byte, 1 = short, 2 = long) */
719 TRUE, /* pc_relative */
721 complain_overflow_signed, /* complain_on_overflow */
722 bfd_elf_generic_reloc, /* special_function */
723 AARCH64_R_STR (CALL26), /* name */
724 FALSE, /* partial_inplace */
725 0x3ffffff, /* src_mask */
726 0x3ffffff, /* dst_mask */
727 TRUE), /* pcrel_offset */
729 /* LD/ST16: (S+A) & 0xffe */
730 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
732 2, /* size (0 = byte, 1 = short, 2 = long) */
734 FALSE, /* pc_relative */
736 complain_overflow_dont, /* complain_on_overflow */
737 bfd_elf_generic_reloc, /* special_function */
738 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
739 FALSE, /* partial_inplace */
740 0xffe, /* src_mask */
741 0xffe, /* dst_mask */
742 FALSE), /* pcrel_offset */
744 /* LD/ST32: (S+A) & 0xffc */
745 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
747 2, /* size (0 = byte, 1 = short, 2 = long) */
749 FALSE, /* pc_relative */
751 complain_overflow_dont, /* complain_on_overflow */
752 bfd_elf_generic_reloc, /* special_function */
753 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
754 FALSE, /* partial_inplace */
755 0xffc, /* src_mask */
756 0xffc, /* dst_mask */
757 FALSE), /* pcrel_offset */
759 /* LD/ST64: (S+A) & 0xff8 */
760 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
764 FALSE, /* pc_relative */
766 complain_overflow_dont, /* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
769 FALSE, /* partial_inplace */
770 0xff8, /* src_mask */
771 0xff8, /* dst_mask */
772 FALSE), /* pcrel_offset */
774 /* LD/ST128: (S+A) & 0xff0 */
775 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
777 2, /* size (0 = byte, 1 = short, 2 = long) */
779 FALSE, /* pc_relative */
781 complain_overflow_dont, /* complain_on_overflow */
782 bfd_elf_generic_reloc, /* special_function */
783 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
784 FALSE, /* partial_inplace */
785 0xff0, /* src_mask */
786 0xff0, /* dst_mask */
787 FALSE), /* pcrel_offset */
789 /* Set a load-literal immediate field to bits
790 0x1FFFFC of G(S)-P */
791 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
793 2, /* size (0 = byte,1 = short,2 = long) */
795 TRUE, /* pc_relative */
797 complain_overflow_signed, /* complain_on_overflow */
798 bfd_elf_generic_reloc, /* special_function */
799 AARCH64_R_STR (GOT_LD_PREL19), /* name */
800 FALSE, /* partial_inplace */
801 0xffffe0, /* src_mask */
802 0xffffe0, /* dst_mask */
803 TRUE), /* pcrel_offset */
805 /* Get to the page for the GOT entry for the symbol
806 (G(S) - P) using an ADRP instruction. */
807 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
811 TRUE, /* pc_relative */
813 complain_overflow_dont, /* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
816 FALSE, /* partial_inplace */
817 0x1fffff, /* src_mask */
818 0x1fffff, /* dst_mask */
819 TRUE), /* pcrel_offset */
821 /* LD64: GOT offset G(S) & 0xff8 */
822 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
824 2, /* size (0 = byte, 1 = short, 2 = long) */
826 FALSE, /* pc_relative */
828 complain_overflow_dont, /* complain_on_overflow */
829 bfd_elf_generic_reloc, /* special_function */
830 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
831 FALSE, /* partial_inplace */
832 0xff8, /* src_mask */
833 0xff8, /* dst_mask */
834 FALSE), /* pcrel_offset */
836 /* LD32: GOT offset G(S) & 0xffc */
837 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
841 FALSE, /* pc_relative */
843 complain_overflow_dont, /* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
846 FALSE, /* partial_inplace */
847 0xffc, /* src_mask */
848 0xffc, /* dst_mask */
849 FALSE), /* pcrel_offset */
851 /* Get to the page for the GOT entry for the symbol
852 (G(S) - P) using an ADRP instruction. */
853 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
855 2, /* size (0 = byte, 1 = short, 2 = long) */
857 TRUE, /* pc_relative */
859 complain_overflow_dont, /* complain_on_overflow */
860 bfd_elf_generic_reloc, /* special_function */
861 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
862 FALSE, /* partial_inplace */
863 0x1fffff, /* src_mask */
864 0x1fffff, /* dst_mask */
865 TRUE), /* pcrel_offset */
867 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
869 2, /* size (0 = byte, 1 = short, 2 = long) */
871 TRUE, /* pc_relative */
873 complain_overflow_dont, /* complain_on_overflow */
874 bfd_elf_generic_reloc, /* special_function */
875 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
876 FALSE, /* partial_inplace */
877 0x1fffff, /* src_mask */
878 0x1fffff, /* dst_mask */
879 TRUE), /* pcrel_offset */
881 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
882 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
884 2, /* size (0 = byte, 1 = short, 2 = long) */
886 FALSE, /* pc_relative */
888 complain_overflow_dont, /* complain_on_overflow */
889 bfd_elf_generic_reloc, /* special_function */
890 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
891 FALSE, /* partial_inplace */
892 0xfff, /* src_mask */
893 0xfff, /* dst_mask */
894 FALSE), /* pcrel_offset */
896 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
900 FALSE, /* pc_relative */
902 complain_overflow_dont, /* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
905 FALSE, /* partial_inplace */
906 0xffff, /* src_mask */
907 0xffff, /* dst_mask */
908 FALSE), /* pcrel_offset */
910 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
914 FALSE, /* pc_relative */
916 complain_overflow_dont, /* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
919 FALSE, /* partial_inplace */
920 0xffff, /* src_mask */
921 0xffff, /* dst_mask */
922 FALSE), /* pcrel_offset */
924 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
928 FALSE, /* pc_relative */
930 complain_overflow_dont, /* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
933 FALSE, /* partial_inplace */
934 0x1fffff, /* src_mask */
935 0x1fffff, /* dst_mask */
936 FALSE), /* pcrel_offset */
938 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
942 FALSE, /* pc_relative */
944 complain_overflow_dont, /* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
947 FALSE, /* partial_inplace */
948 0xff8, /* src_mask */
949 0xff8, /* dst_mask */
950 FALSE), /* pcrel_offset */
952 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
956 FALSE, /* pc_relative */
958 complain_overflow_dont, /* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
961 FALSE, /* partial_inplace */
962 0xffc, /* src_mask */
963 0xffc, /* dst_mask */
964 FALSE), /* pcrel_offset */
966 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
970 FALSE, /* pc_relative */
972 complain_overflow_dont, /* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
975 FALSE, /* partial_inplace */
976 0x1ffffc, /* src_mask */
977 0x1ffffc, /* dst_mask */
978 FALSE), /* pcrel_offset */
980 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
984 FALSE, /* pc_relative */
986 complain_overflow_unsigned, /* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
989 FALSE, /* partial_inplace */
990 0xffff, /* src_mask */
991 0xffff, /* dst_mask */
992 FALSE), /* pcrel_offset */
994 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
998 FALSE, /* pc_relative */
1000 complain_overflow_dont, /* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1003 FALSE, /* partial_inplace */
1004 0xffff, /* src_mask */
1005 0xffff, /* dst_mask */
1006 FALSE), /* pcrel_offset */
1008 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1009 16, /* rightshift */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1012 FALSE, /* pc_relative */
1014 complain_overflow_dont, /* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1017 FALSE, /* partial_inplace */
1018 0xffff, /* src_mask */
1019 0xffff, /* dst_mask */
1020 FALSE), /* pcrel_offset */
1022 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1026 FALSE, /* pc_relative */
1028 complain_overflow_dont, /* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1031 FALSE, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE), /* pcrel_offset */
1036 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1040 FALSE, /* pc_relative */
1042 complain_overflow_dont, /* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1045 FALSE, /* partial_inplace */
1046 0xffff, /* src_mask */
1047 0xffff, /* dst_mask */
1048 FALSE), /* pcrel_offset */
1050 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1051 12, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1054 FALSE, /* pc_relative */
1056 complain_overflow_unsigned, /* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1059 FALSE, /* partial_inplace */
1060 0xfff, /* src_mask */
1061 0xfff, /* dst_mask */
1062 FALSE), /* pcrel_offset */
1064 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1068 FALSE, /* pc_relative */
1070 complain_overflow_dont, /* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1073 FALSE, /* partial_inplace */
1074 0xfff, /* src_mask */
1075 0xfff, /* dst_mask */
1076 FALSE), /* pcrel_offset */
1078 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1082 FALSE, /* pc_relative */
1084 complain_overflow_dont, /* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1087 FALSE, /* partial_inplace */
1088 0xfff, /* src_mask */
1089 0xfff, /* dst_mask */
1090 FALSE), /* pcrel_offset */
1092 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1096 TRUE, /* pc_relative */
1098 complain_overflow_dont, /* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1101 FALSE, /* partial_inplace */
1102 0x0ffffe0, /* src_mask */
1103 0x0ffffe0, /* dst_mask */
1104 TRUE), /* pcrel_offset */
1106 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1110 TRUE, /* pc_relative */
1112 complain_overflow_dont, /* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1115 FALSE, /* partial_inplace */
1116 0x1fffff, /* src_mask */
1117 0x1fffff, /* dst_mask */
1118 TRUE), /* pcrel_offset */
1120 /* Get to the page for the GOT entry for the symbol
1121 (G(S) - P) using an ADRP instruction. */
1122 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1123 12, /* rightshift */
1124 2, /* size (0 = byte, 1 = short, 2 = long) */
1126 TRUE, /* pc_relative */
1128 complain_overflow_dont, /* complain_on_overflow */
1129 bfd_elf_generic_reloc, /* special_function */
1130 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1131 FALSE, /* partial_inplace */
1132 0x1fffff, /* src_mask */
1133 0x1fffff, /* dst_mask */
1134 TRUE), /* pcrel_offset */
1136 /* LD64: GOT offset G(S) & 0xff8. */
1137 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1139 2, /* size (0 = byte, 1 = short, 2 = long) */
1141 FALSE, /* pc_relative */
1143 complain_overflow_dont, /* complain_on_overflow */
1144 bfd_elf_generic_reloc, /* special_function */
1145 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1146 FALSE, /* partial_inplace */
1147 0xff8, /* src_mask */
1148 0xff8, /* dst_mask */
1149 FALSE), /* pcrel_offset */
1151 /* LD32: GOT offset G(S) & 0xffc. */
1152 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1154 2, /* size (0 = byte, 1 = short, 2 = long) */
1156 FALSE, /* pc_relative */
1158 complain_overflow_dont, /* complain_on_overflow */
1159 bfd_elf_generic_reloc, /* special_function */
1160 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1161 FALSE, /* partial_inplace */
1162 0xffc, /* src_mask */
1163 0xffc, /* dst_mask */
1164 FALSE), /* pcrel_offset */
1166 /* ADD: GOT offset G(S) & 0xfff. */
1167 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1169 2, /* size (0 = byte, 1 = short, 2 = long) */
1171 FALSE, /* pc_relative */
1173 complain_overflow_dont, /* complain_on_overflow */
1174 bfd_elf_generic_reloc, /* special_function */
1175 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1176 FALSE, /* partial_inplace */
1177 0xfff, /* src_mask */
1178 0xfff, /* dst_mask */
1179 FALSE), /* pcrel_offset */
1181 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1182 16, /* rightshift */
1183 2, /* size (0 = byte, 1 = short, 2 = long) */
1185 FALSE, /* pc_relative */
1187 complain_overflow_dont, /* complain_on_overflow */
1188 bfd_elf_generic_reloc, /* special_function */
1189 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1190 FALSE, /* partial_inplace */
1191 0xffff, /* src_mask */
1192 0xffff, /* dst_mask */
1193 FALSE), /* pcrel_offset */
1195 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1197 2, /* size (0 = byte, 1 = short, 2 = long) */
1199 FALSE, /* pc_relative */
1201 complain_overflow_dont, /* complain_on_overflow */
1202 bfd_elf_generic_reloc, /* special_function */
1203 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1204 FALSE, /* partial_inplace */
1205 0xffff, /* src_mask */
1206 0xffff, /* dst_mask */
1207 FALSE), /* pcrel_offset */
1209 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1211 2, /* size (0 = byte, 1 = short, 2 = long) */
1213 FALSE, /* pc_relative */
1215 complain_overflow_dont, /* complain_on_overflow */
1216 bfd_elf_generic_reloc, /* special_function */
1217 AARCH64_R_STR (TLSDESC_LDR), /* name */
1218 FALSE, /* partial_inplace */
1221 FALSE), /* pcrel_offset */
1223 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1225 2, /* size (0 = byte, 1 = short, 2 = long) */
1227 FALSE, /* pc_relative */
1229 complain_overflow_dont, /* complain_on_overflow */
1230 bfd_elf_generic_reloc, /* special_function */
1231 AARCH64_R_STR (TLSDESC_ADD), /* name */
1232 FALSE, /* partial_inplace */
1235 FALSE), /* pcrel_offset */
1237 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1239 2, /* size (0 = byte, 1 = short, 2 = long) */
1241 FALSE, /* pc_relative */
1243 complain_overflow_dont, /* complain_on_overflow */
1244 bfd_elf_generic_reloc, /* special_function */
1245 AARCH64_R_STR (TLSDESC_CALL), /* name */
1246 FALSE, /* partial_inplace */
1249 FALSE), /* pcrel_offset */
1251 HOWTO (AARCH64_R (COPY), /* type */
1253 2, /* size (0 = byte, 1 = short, 2 = long) */
1255 FALSE, /* pc_relative */
1257 complain_overflow_bitfield, /* complain_on_overflow */
1258 bfd_elf_generic_reloc, /* special_function */
1259 AARCH64_R_STR (COPY), /* name */
1260 TRUE, /* partial_inplace */
1261 0xffffffff, /* src_mask */
1262 0xffffffff, /* dst_mask */
1263 FALSE), /* pcrel_offset */
1265 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1267 2, /* size (0 = byte, 1 = short, 2 = long) */
1269 FALSE, /* pc_relative */
1271 complain_overflow_bitfield, /* complain_on_overflow */
1272 bfd_elf_generic_reloc, /* special_function */
1273 AARCH64_R_STR (GLOB_DAT), /* name */
1274 TRUE, /* partial_inplace */
1275 0xffffffff, /* src_mask */
1276 0xffffffff, /* dst_mask */
1277 FALSE), /* pcrel_offset */
1279 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1281 2, /* size (0 = byte, 1 = short, 2 = long) */
1283 FALSE, /* pc_relative */
1285 complain_overflow_bitfield, /* complain_on_overflow */
1286 bfd_elf_generic_reloc, /* special_function */
1287 AARCH64_R_STR (JUMP_SLOT), /* name */
1288 TRUE, /* partial_inplace */
1289 0xffffffff, /* src_mask */
1290 0xffffffff, /* dst_mask */
1291 FALSE), /* pcrel_offset */
1293 HOWTO (AARCH64_R (RELATIVE), /* type */
1295 2, /* size (0 = byte, 1 = short, 2 = long) */
1297 FALSE, /* pc_relative */
1299 complain_overflow_bitfield, /* complain_on_overflow */
1300 bfd_elf_generic_reloc, /* special_function */
1301 AARCH64_R_STR (RELATIVE), /* name */
1302 TRUE, /* partial_inplace */
1303 ALL_ONES, /* src_mask */
1304 ALL_ONES, /* dst_mask */
1305 FALSE), /* pcrel_offset */
1307 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1309 2, /* size (0 = byte, 1 = short, 2 = long) */
1311 FALSE, /* pc_relative */
1313 complain_overflow_dont, /* complain_on_overflow */
1314 bfd_elf_generic_reloc, /* special_function */
1316 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1318 AARCH64_R_STR (TLS_DTPMOD), /* name */
1320 FALSE, /* partial_inplace */
1322 ALL_ONES, /* dst_mask */
1323 FALSE), /* pc_reloffset */
1325 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1327 2, /* size (0 = byte, 1 = short, 2 = long) */
1329 FALSE, /* pc_relative */
1331 complain_overflow_dont, /* complain_on_overflow */
1332 bfd_elf_generic_reloc, /* special_function */
1334 AARCH64_R_STR (TLS_DTPREL64), /* name */
1336 AARCH64_R_STR (TLS_DTPREL), /* name */
1338 FALSE, /* partial_inplace */
1340 ALL_ONES, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1343 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1347 FALSE, /* pc_relative */
1349 complain_overflow_dont, /* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1352 AARCH64_R_STR (TLS_TPREL64), /* name */
1354 AARCH64_R_STR (TLS_TPREL), /* name */
1356 FALSE, /* partial_inplace */
1358 ALL_ONES, /* dst_mask */
1359 FALSE), /* pcrel_offset */
1361 HOWTO (AARCH64_R (TLSDESC), /* type */
1363 2, /* size (0 = byte, 1 = short, 2 = long) */
1365 FALSE, /* pc_relative */
1367 complain_overflow_dont, /* complain_on_overflow */
1368 bfd_elf_generic_reloc, /* special_function */
1369 AARCH64_R_STR (TLSDESC), /* name */
1370 FALSE, /* partial_inplace */
1372 ALL_ONES, /* dst_mask */
1373 FALSE), /* pcrel_offset */
1375 HOWTO (AARCH64_R (IRELATIVE), /* type */
1377 2, /* size (0 = byte, 1 = short, 2 = long) */
1379 FALSE, /* pc_relative */
1381 complain_overflow_bitfield, /* complain_on_overflow */
1382 bfd_elf_generic_reloc, /* special_function */
1383 AARCH64_R_STR (IRELATIVE), /* name */
1384 FALSE, /* partial_inplace */
1386 ALL_ONES, /* dst_mask */
1387 FALSE), /* pcrel_offset */
1392 static reloc_howto_type elfNN_aarch64_howto_none =
1393 HOWTO (R_AARCH64_NONE, /* type */
1395 3, /* size (0 = byte, 1 = short, 2 = long) */
1397 FALSE, /* pc_relative */
1399 complain_overflow_dont,/* complain_on_overflow */
1400 bfd_elf_generic_reloc, /* special_function */
1401 "R_AARCH64_NONE", /* name */
1402 FALSE, /* partial_inplace */
1405 FALSE); /* pcrel_offset */
1407 /* Given HOWTO, return the bfd internal relocation enumerator. */
1409 static bfd_reloc_code_real_type
1410 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1413 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1414 const ptrdiff_t offset
1415 = howto - elfNN_aarch64_howto_table;
1417 if (offset > 0 && offset < size - 1)
1418 return BFD_RELOC_AARCH64_RELOC_START + offset;
1420 if (howto == &elfNN_aarch64_howto_none)
1421 return BFD_RELOC_AARCH64_NONE;
1423 return BFD_RELOC_AARCH64_RELOC_START;
1426 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1428 static bfd_reloc_code_real_type
1429 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1431 static bfd_boolean initialized_p = FALSE;
1432 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1433 static unsigned int offsets[R_AARCH64_end];
1435 if (initialized_p == FALSE)
1439 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1440 if (elfNN_aarch64_howto_table[i].type != 0)
1441 offsets[elfNN_aarch64_howto_table[i].type] = i;
1443 initialized_p = TRUE;
1446 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1447 return BFD_RELOC_AARCH64_NONE;
1449 /* PR 17512: file: b371e70a. */
1450 if (r_type >= R_AARCH64_end)
1452 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1453 bfd_set_error (bfd_error_bad_value);
1454 return BFD_RELOC_AARCH64_NONE;
1457 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1460 struct elf_aarch64_reloc_map
1462 bfd_reloc_code_real_type from;
1463 bfd_reloc_code_real_type to;
1466 /* Map bfd generic reloc to AArch64-specific reloc. */
1467 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1469 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1471 /* Basic data relocations. */
1472 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1473 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1474 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1475 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1476 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1477 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1478 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1481 /* Given the bfd internal relocation enumerator in CODE, return the
1482 corresponding howto entry. */
1484 static reloc_howto_type *
1485 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1489 /* Convert bfd generic reloc to AArch64-specific reloc. */
1490 if (code < BFD_RELOC_AARCH64_RELOC_START
1491 || code > BFD_RELOC_AARCH64_RELOC_END)
1492 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1493 if (elf_aarch64_reloc_map[i].from == code)
1495 code = elf_aarch64_reloc_map[i].to;
1499 if (code > BFD_RELOC_AARCH64_RELOC_START
1500 && code < BFD_RELOC_AARCH64_RELOC_END)
1501 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1502 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1504 if (code == BFD_RELOC_AARCH64_NONE)
1505 return &elfNN_aarch64_howto_none;
1510 static reloc_howto_type *
1511 elfNN_aarch64_howto_from_type (unsigned int r_type)
1513 bfd_reloc_code_real_type val;
1514 reloc_howto_type *howto;
1519 bfd_set_error (bfd_error_bad_value);
1524 if (r_type == R_AARCH64_NONE)
1525 return &elfNN_aarch64_howto_none;
1527 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1528 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1533 bfd_set_error (bfd_error_bad_value);
1538 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1539 Elf_Internal_Rela *elf_reloc)
1541 unsigned int r_type;
1543 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1544 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1547 static reloc_howto_type *
1548 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1549 bfd_reloc_code_real_type code)
1551 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1556 bfd_set_error (bfd_error_bad_value);
1560 static reloc_howto_type *
1561 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1566 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1567 if (elfNN_aarch64_howto_table[i].name != NULL
1568 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1569 return &elfNN_aarch64_howto_table[i];
1574 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1575 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1576 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1577 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1579 /* The linker script knows the section names for placement.
1580 The entry_names are used to do simple name mangling on the stubs.
1581 Given a function name, and its type, the stub can be found. The
1582 name can be changed. The only requirement is the %s be present. */
1583 #define STUB_ENTRY_NAME "__%s_veneer"
1585 /* The name of the dynamic interpreter. This is put in the .interp
1587 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1589 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1590 (((1 << 25) - 1) << 2)
1591 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1594 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1595 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1598 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1600 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1601 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1605 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1607 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1608 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1609 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1612 static const uint32_t aarch64_adrp_branch_stub [] =
1614 0x90000010, /* adrp ip0, X */
1615 /* R_AARCH64_ADR_HI21_PCREL(X) */
1616 0x91000210, /* add ip0, ip0, :lo12:X */
1617 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1618 0xd61f0200, /* br ip0 */
1621 static const uint32_t aarch64_long_branch_stub[] =
1624 0x58000090, /* ldr ip0, 1f */
1626 0x18000090, /* ldr wip0, 1f */
1628 0x10000011, /* adr ip1, #0 */
1629 0x8b110210, /* add ip0, ip0, ip1 */
1630 0xd61f0200, /* br ip0 */
1631 0x00000000, /* 1: .xword or .word
1632 R_AARCH64_PRELNN(X) + 12
1637 static const uint32_t aarch64_erratum_835769_stub[] =
1639 0x00000000, /* Placeholder for multiply accumulate. */
1640 0x14000000, /* b <label> */
1643 /* Section name for stubs is the associated section name plus this
1645 #define STUB_SUFFIX ".stub"
1647 enum elf_aarch64_stub_type
1650 aarch64_stub_adrp_branch,
1651 aarch64_stub_long_branch,
1652 aarch64_stub_erratum_835769_veneer,
1655 struct elf_aarch64_stub_hash_entry
1657 /* Base hash table entry structure. */
1658 struct bfd_hash_entry root;
1660 /* The stub section. */
1663 /* Offset within stub_sec of the beginning of this stub. */
1664 bfd_vma stub_offset;
1666 /* Given the symbol's value and its section we can determine its final
1667 value when building the stubs (so the stub knows where to jump). */
1668 bfd_vma target_value;
1669 asection *target_section;
1671 enum elf_aarch64_stub_type stub_type;
1673 /* The symbol table entry, if any, that this was derived from. */
1674 struct elf_aarch64_link_hash_entry *h;
1676 /* Destination symbol type */
1677 unsigned char st_type;
1679 /* Where this stub is being called from, or, in the case of combined
1680 stub sections, the first input section in the group. */
1683 /* The name for the local symbol at the start of this stub. The
1684 stub name in the hash table has to be unique; this does not, so
1685 it can be friendlier. */
1688 /* The instruction which caused this stub to be generated (only valid for
1689 erratum 835769 workaround stubs at present). */
1690 uint32_t veneered_insn;
1693 /* Used to build a map of a section. This is required for mixed-endian
1696 typedef struct elf_elf_section_map
1701 elf_aarch64_section_map;
1704 typedef struct _aarch64_elf_section_data
1706 struct bfd_elf_section_data elf;
1707 unsigned int mapcount;
1708 unsigned int mapsize;
1709 elf_aarch64_section_map *map;
1711 _aarch64_elf_section_data;
1713 #define elf_aarch64_section_data(sec) \
1714 ((_aarch64_elf_section_data *) elf_section_data (sec))
1716 /* The size of the thread control block which is defined to be two pointers. */
1717 #define TCB_SIZE (ARCH_SIZE/8)*2
1719 struct elf_aarch64_local_symbol
1721 unsigned int got_type;
1722 bfd_signed_vma got_refcount;
1725 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1726 offset is from the end of the jump table and reserved entries
1729 The magic value (bfd_vma) -1 indicates that an offset has not be
1731 bfd_vma tlsdesc_got_jump_table_offset;
1734 struct elf_aarch64_obj_tdata
1736 struct elf_obj_tdata root;
1738 /* local symbol descriptors */
1739 struct elf_aarch64_local_symbol *locals;
1741 /* Zero to warn when linking objects with incompatible enum sizes. */
1742 int no_enum_size_warning;
1744 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1745 int no_wchar_size_warning;
1748 #define elf_aarch64_tdata(bfd) \
1749 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1751 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1753 #define is_aarch64_elf(bfd) \
1754 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1755 && elf_tdata (bfd) != NULL \
1756 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1759 elfNN_aarch64_mkobject (bfd *abfd)
1761 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1765 #define elf_aarch64_hash_entry(ent) \
1766 ((struct elf_aarch64_link_hash_entry *)(ent))
1768 #define GOT_UNKNOWN 0
1769 #define GOT_NORMAL 1
1770 #define GOT_TLS_GD 2
1771 #define GOT_TLS_IE 4
1772 #define GOT_TLSDESC_GD 8
1774 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1776 /* AArch64 ELF linker hash entry. */
1777 struct elf_aarch64_link_hash_entry
1779 struct elf_link_hash_entry root;
1781 /* Track dynamic relocs copied for this symbol. */
1782 struct elf_dyn_relocs *dyn_relocs;
1784 /* Since PLT entries have variable size, we need to record the
1785 index into .got.plt instead of recomputing it from the PLT
1787 bfd_signed_vma plt_got_offset;
1789 /* Bit mask representing the type of GOT entry(s) if any required by
1791 unsigned int got_type;
1793 /* A pointer to the most recently used stub hash entry against this
1795 struct elf_aarch64_stub_hash_entry *stub_cache;
1797 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1798 is from the end of the jump table and reserved entries within the PLTGOT.
1800 The magic value (bfd_vma) -1 indicates that an offset has not
1802 bfd_vma tlsdesc_got_jump_table_offset;
1806 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1808 unsigned long r_symndx)
1811 return elf_aarch64_hash_entry (h)->got_type;
1813 if (! elf_aarch64_locals (abfd))
1816 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1819 /* Get the AArch64 elf linker hash table from a link_info structure. */
1820 #define elf_aarch64_hash_table(info) \
1821 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1823 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1824 ((struct elf_aarch64_stub_hash_entry *) \
1825 bfd_hash_lookup ((table), (string), (create), (copy)))
1827 /* AArch64 ELF linker hash table. */
1828 struct elf_aarch64_link_hash_table
1830 /* The main hash table. */
1831 struct elf_link_hash_table root;
1833 /* Nonzero to force PIC branch veneers. */
1836 /* Fix erratum 835769. */
1837 int fix_erratum_835769;
1839 /* The number of bytes in the initial entry in the PLT. */
1840 bfd_size_type plt_header_size;
1842 /* The number of bytes in the subsequent PLT etries. */
1843 bfd_size_type plt_entry_size;
1845 /* Short-cuts to get to dynamic linker sections. */
1849 /* Small local sym cache. */
1850 struct sym_cache sym_cache;
1852 /* For convenience in allocate_dynrelocs. */
1855 /* The amount of space used by the reserved portion of the sgotplt
1856 section, plus whatever space is used by the jump slots. */
1857 bfd_vma sgotplt_jump_table_size;
1859 /* The stub hash table. */
1860 struct bfd_hash_table stub_hash_table;
1862 /* Linker stub bfd. */
1865 /* Linker call-backs. */
1866 asection *(*add_stub_section) (const char *, asection *);
1867 void (*layout_sections_again) (void);
1869 /* Array to keep track of which stub sections have been created, and
1870 information on stub grouping. */
1873 /* This is the section to which stubs in the group will be
1876 /* The stub section. */
1880 /* Assorted information used by elfNN_aarch64_size_stubs. */
1881 unsigned int bfd_count;
1883 asection **input_list;
1885 /* The offset into splt of the PLT entry for the TLS descriptor
1886 resolver. Special values are 0, if not necessary (or not found
1887 to be necessary yet), and -1 if needed but not determined
1889 bfd_vma tlsdesc_plt;
1891 /* The GOT offset for the lazy trampoline. Communicated to the
1892 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1893 indicates an offset is not allocated. */
1894 bfd_vma dt_tlsdesc_got;
1896 /* Used by local STT_GNU_IFUNC symbols. */
1897 htab_t loc_hash_table;
1898 void * loc_hash_memory;
1901 /* Create an entry in an AArch64 ELF linker hash table. */
1903 static struct bfd_hash_entry *
1904 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1905 struct bfd_hash_table *table,
1908 struct elf_aarch64_link_hash_entry *ret =
1909 (struct elf_aarch64_link_hash_entry *) entry;
1911 /* Allocate the structure if it has not already been allocated by a
1914 ret = bfd_hash_allocate (table,
1915 sizeof (struct elf_aarch64_link_hash_entry));
1917 return (struct bfd_hash_entry *) ret;
1919 /* Call the allocation method of the superclass. */
1920 ret = ((struct elf_aarch64_link_hash_entry *)
1921 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1925 ret->dyn_relocs = NULL;
1926 ret->got_type = GOT_UNKNOWN;
1927 ret->plt_got_offset = (bfd_vma) - 1;
1928 ret->stub_cache = NULL;
1929 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1932 return (struct bfd_hash_entry *) ret;
1935 /* Initialize an entry in the stub hash table. */
1937 static struct bfd_hash_entry *
1938 stub_hash_newfunc (struct bfd_hash_entry *entry,
1939 struct bfd_hash_table *table, const char *string)
1941 /* Allocate the structure if it has not already been allocated by a
1945 entry = bfd_hash_allocate (table,
1947 elf_aarch64_stub_hash_entry));
1952 /* Call the allocation method of the superclass. */
1953 entry = bfd_hash_newfunc (entry, table, string);
1956 struct elf_aarch64_stub_hash_entry *eh;
1958 /* Initialize the local fields. */
1959 eh = (struct elf_aarch64_stub_hash_entry *) entry;
1960 eh->stub_sec = NULL;
1961 eh->stub_offset = 0;
1962 eh->target_value = 0;
1963 eh->target_section = NULL;
1964 eh->stub_type = aarch64_stub_none;
1972 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
1973 for local symbol so that we can handle local STT_GNU_IFUNC symbols
1974 as global symbol. We reuse indx and dynstr_index for local symbol
1975 hash since they aren't used by global symbols in this backend. */
1978 elfNN_aarch64_local_htab_hash (const void *ptr)
1980 struct elf_link_hash_entry *h
1981 = (struct elf_link_hash_entry *) ptr;
1982 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
1985 /* Compare local hash entries. */
1988 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
1990 struct elf_link_hash_entry *h1
1991 = (struct elf_link_hash_entry *) ptr1;
1992 struct elf_link_hash_entry *h2
1993 = (struct elf_link_hash_entry *) ptr2;
1995 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
1998 /* Find and/or create a hash entry for local symbol. */
2000 static struct elf_link_hash_entry *
2001 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2002 bfd *abfd, const Elf_Internal_Rela *rel,
2005 struct elf_aarch64_link_hash_entry e, *ret;
2006 asection *sec = abfd->sections;
2007 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2008 ELFNN_R_SYM (rel->r_info));
2011 e.root.indx = sec->id;
2012 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2013 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2014 create ? INSERT : NO_INSERT);
2021 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2025 ret = (struct elf_aarch64_link_hash_entry *)
2026 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2027 sizeof (struct elf_aarch64_link_hash_entry));
2030 memset (ret, 0, sizeof (*ret));
2031 ret->root.indx = sec->id;
2032 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2033 ret->root.dynindx = -1;
2039 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2042 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2043 struct elf_link_hash_entry *dir,
2044 struct elf_link_hash_entry *ind)
2046 struct elf_aarch64_link_hash_entry *edir, *eind;
2048 edir = (struct elf_aarch64_link_hash_entry *) dir;
2049 eind = (struct elf_aarch64_link_hash_entry *) ind;
2051 if (eind->dyn_relocs != NULL)
2053 if (edir->dyn_relocs != NULL)
2055 struct elf_dyn_relocs **pp;
2056 struct elf_dyn_relocs *p;
2058 /* Add reloc counts against the indirect sym to the direct sym
2059 list. Merge any entries against the same section. */
2060 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2062 struct elf_dyn_relocs *q;
2064 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2065 if (q->sec == p->sec)
2067 q->pc_count += p->pc_count;
2068 q->count += p->count;
2075 *pp = edir->dyn_relocs;
2078 edir->dyn_relocs = eind->dyn_relocs;
2079 eind->dyn_relocs = NULL;
2082 if (ind->root.type == bfd_link_hash_indirect)
2084 /* Copy over PLT info. */
2085 if (dir->got.refcount <= 0)
2087 edir->got_type = eind->got_type;
2088 eind->got_type = GOT_UNKNOWN;
2092 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2095 /* Destroy an AArch64 elf linker hash table. */
2098 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2100 struct elf_aarch64_link_hash_table *ret
2101 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2103 if (ret->loc_hash_table)
2104 htab_delete (ret->loc_hash_table);
2105 if (ret->loc_hash_memory)
2106 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2108 bfd_hash_table_free (&ret->stub_hash_table);
2109 _bfd_elf_link_hash_table_free (obfd);
2112 /* Create an AArch64 elf linker hash table. */
2114 static struct bfd_link_hash_table *
2115 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2117 struct elf_aarch64_link_hash_table *ret;
2118 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2120 ret = bfd_zmalloc (amt);
2124 if (!_bfd_elf_link_hash_table_init
2125 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2126 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2132 ret->plt_header_size = PLT_ENTRY_SIZE;
2133 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2135 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2137 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2138 sizeof (struct elf_aarch64_stub_hash_entry)))
2140 _bfd_elf_link_hash_table_free (abfd);
2144 ret->loc_hash_table = htab_try_create (1024,
2145 elfNN_aarch64_local_htab_hash,
2146 elfNN_aarch64_local_htab_eq,
2148 ret->loc_hash_memory = objalloc_create ();
2149 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2151 elfNN_aarch64_link_hash_table_free (abfd);
2154 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2156 return &ret->root.root;
2160 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2161 bfd_vma offset, bfd_vma value)
2163 reloc_howto_type *howto;
2166 howto = elfNN_aarch64_howto_from_type (r_type);
2167 place = (input_section->output_section->vma + input_section->output_offset
2170 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2171 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2172 return _bfd_aarch64_elf_put_addend (input_bfd,
2173 input_section->contents + offset, r_type,
2177 static enum elf_aarch64_stub_type
2178 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2180 if (aarch64_valid_for_adrp_p (value, place))
2181 return aarch64_stub_adrp_branch;
2182 return aarch64_stub_long_branch;
2185 /* Determine the type of stub needed, if any, for a call. */
2187 static enum elf_aarch64_stub_type
2188 aarch64_type_of_stub (struct bfd_link_info *info,
2189 asection *input_sec,
2190 const Elf_Internal_Rela *rel,
2191 unsigned char st_type,
2192 struct elf_aarch64_link_hash_entry *hash,
2193 bfd_vma destination)
2196 bfd_signed_vma branch_offset;
2197 unsigned int r_type;
2198 struct elf_aarch64_link_hash_table *globals;
2199 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2200 bfd_boolean via_plt_p;
2202 if (st_type != STT_FUNC)
2205 globals = elf_aarch64_hash_table (info);
2206 via_plt_p = (globals->root.splt != NULL && hash != NULL
2207 && hash->root.plt.offset != (bfd_vma) - 1);
2212 /* Determine where the call point is. */
2213 location = (input_sec->output_offset
2214 + input_sec->output_section->vma + rel->r_offset);
2216 branch_offset = (bfd_signed_vma) (destination - location);
2218 r_type = ELFNN_R_TYPE (rel->r_info);
2220 /* We don't want to redirect any old unconditional jump in this way,
2221 only one which is being used for a sibcall, where it is
2222 acceptable for the IP0 and IP1 registers to be clobbered. */
2223 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2224 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2225 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2227 stub_type = aarch64_stub_long_branch;
2233 /* Build a name for an entry in the stub hash table. */
2236 elfNN_aarch64_stub_name (const asection *input_section,
2237 const asection *sym_sec,
2238 const struct elf_aarch64_link_hash_entry *hash,
2239 const Elf_Internal_Rela *rel)
2246 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2247 stub_name = bfd_malloc (len);
2248 if (stub_name != NULL)
2249 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2250 (unsigned int) input_section->id,
2251 hash->root.root.root.string,
2256 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2257 stub_name = bfd_malloc (len);
2258 if (stub_name != NULL)
2259 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2260 (unsigned int) input_section->id,
2261 (unsigned int) sym_sec->id,
2262 (unsigned int) ELFNN_R_SYM (rel->r_info),
2269 /* Look up an entry in the stub hash. Stub entries are cached because
2270 creating the stub name takes a bit of time. */
2272 static struct elf_aarch64_stub_hash_entry *
2273 elfNN_aarch64_get_stub_entry (const asection *input_section,
2274 const asection *sym_sec,
2275 struct elf_link_hash_entry *hash,
2276 const Elf_Internal_Rela *rel,
2277 struct elf_aarch64_link_hash_table *htab)
2279 struct elf_aarch64_stub_hash_entry *stub_entry;
2280 struct elf_aarch64_link_hash_entry *h =
2281 (struct elf_aarch64_link_hash_entry *) hash;
2282 const asection *id_sec;
2284 if ((input_section->flags & SEC_CODE) == 0)
2287 /* If this input section is part of a group of sections sharing one
2288 stub section, then use the id of the first section in the group.
2289 Stub names need to include a section id, as there may well be
2290 more than one stub used to reach say, printf, and we need to
2291 distinguish between them. */
2292 id_sec = htab->stub_group[input_section->id].link_sec;
2294 if (h != NULL && h->stub_cache != NULL
2295 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2297 stub_entry = h->stub_cache;
2303 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2304 if (stub_name == NULL)
2307 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2308 stub_name, FALSE, FALSE);
2310 h->stub_cache = stub_entry;
2319 /* Create a stub section. */
2322 _bfd_aarch64_create_stub_section (asection *section,
2323 struct elf_aarch64_link_hash_table *htab)
2329 namelen = strlen (section->name);
2330 len = namelen + sizeof (STUB_SUFFIX);
2331 s_name = bfd_alloc (htab->stub_bfd, len);
2335 memcpy (s_name, section->name, namelen);
2336 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2337 return (*htab->add_stub_section) (s_name, section);
2341 /* Find or create a stub section for a link section.
2343 Fix or create the stub section used to collect stubs attached to
2344 the specified link section. */
2347 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
2348 struct elf_aarch64_link_hash_table *htab)
2350 if (htab->stub_group[link_section->id].stub_sec == NULL)
2351 htab->stub_group[link_section->id].stub_sec
2352 = _bfd_aarch64_create_stub_section (link_section, htab);
2353 return htab->stub_group[link_section->id].stub_sec;
2357 /* Find or create a stub section in the stub group for an input
2361 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2362 struct elf_aarch64_link_hash_table *htab)
2364 asection *link_sec = htab->stub_group[section->id].link_sec;
2365 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
2369 /* Add a new stub entry in the stub group associated with an input
2370 section to the stub hash. Not all fields of the new stub entry are
2373 static struct elf_aarch64_stub_hash_entry *
2374 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2376 struct elf_aarch64_link_hash_table *htab)
2380 struct elf_aarch64_stub_hash_entry *stub_entry;
2382 link_sec = htab->stub_group[section->id].link_sec;
2383 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2385 /* Enter this entry into the linker stub hash table. */
2386 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2388 if (stub_entry == NULL)
2390 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2391 section->owner, stub_name);
2395 stub_entry->stub_sec = stub_sec;
2396 stub_entry->stub_offset = 0;
2397 stub_entry->id_sec = link_sec;
2403 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2404 void *in_arg ATTRIBUTE_UNUSED)
2406 struct elf_aarch64_stub_hash_entry *stub_entry;
2411 bfd_vma veneered_insn_loc;
2412 bfd_vma veneer_entry_loc;
2413 bfd_signed_vma branch_offset = 0;
2414 unsigned int template_size;
2415 const uint32_t *template;
2418 /* Massage our args to the form they really have. */
2419 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2421 stub_sec = stub_entry->stub_sec;
2423 /* Make a note of the offset within the stubs for this entry. */
2424 stub_entry->stub_offset = stub_sec->size;
2425 loc = stub_sec->contents + stub_entry->stub_offset;
2427 stub_bfd = stub_sec->owner;
2429 /* This is the address of the stub destination. */
2430 sym_value = (stub_entry->target_value
2431 + stub_entry->target_section->output_offset
2432 + stub_entry->target_section->output_section->vma);
2434 if (stub_entry->stub_type == aarch64_stub_long_branch)
2436 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2437 + stub_sec->output_offset);
2439 /* See if we can relax the stub. */
2440 if (aarch64_valid_for_adrp_p (sym_value, place))
2441 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2444 switch (stub_entry->stub_type)
2446 case aarch64_stub_adrp_branch:
2447 template = aarch64_adrp_branch_stub;
2448 template_size = sizeof (aarch64_adrp_branch_stub);
2450 case aarch64_stub_long_branch:
2451 template = aarch64_long_branch_stub;
2452 template_size = sizeof (aarch64_long_branch_stub);
2454 case aarch64_stub_erratum_835769_veneer:
2455 template = aarch64_erratum_835769_stub;
2456 template_size = sizeof (aarch64_erratum_835769_stub);
2462 for (i = 0; i < (template_size / sizeof template[0]); i++)
2464 bfd_putl32 (template[i], loc);
2468 template_size = (template_size + 7) & ~7;
2469 stub_sec->size += template_size;
2471 switch (stub_entry->stub_type)
2473 case aarch64_stub_adrp_branch:
2474 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2475 stub_entry->stub_offset, sym_value))
2476 /* The stub would not have been relaxed if the offset was out
2480 _bfd_final_link_relocate
2481 (elfNN_aarch64_howto_from_type (AARCH64_R (ADD_ABS_LO12_NC)),
2485 stub_entry->stub_offset + 4,
2490 case aarch64_stub_long_branch:
2491 /* We want the value relative to the address 12 bytes back from the
2493 _bfd_final_link_relocate (elfNN_aarch64_howto_from_type
2494 (AARCH64_R (PRELNN)), stub_bfd, stub_sec,
2496 stub_entry->stub_offset + 16,
2500 case aarch64_stub_erratum_835769_veneer:
2501 veneered_insn_loc = stub_entry->target_section->output_section->vma
2502 + stub_entry->target_section->output_offset
2503 + stub_entry->target_value;
2504 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2505 + stub_entry->stub_sec->output_offset
2506 + stub_entry->stub_offset;
2507 branch_offset = veneered_insn_loc - veneer_entry_loc;
2508 branch_offset >>= 2;
2509 branch_offset &= 0x3ffffff;
2510 bfd_putl32 (stub_entry->veneered_insn,
2511 stub_sec->contents + stub_entry->stub_offset);
2512 bfd_putl32 (template[1] | branch_offset,
2513 stub_sec->contents + stub_entry->stub_offset + 4);
2523 /* As above, but don't actually build the stub. Just bump offset so
2524 we know stub section sizes. */
2527 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2528 void *in_arg ATTRIBUTE_UNUSED)
2530 struct elf_aarch64_stub_hash_entry *stub_entry;
2533 /* Massage our args to the form they really have. */
2534 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2536 switch (stub_entry->stub_type)
2538 case aarch64_stub_adrp_branch:
2539 size = sizeof (aarch64_adrp_branch_stub);
2541 case aarch64_stub_long_branch:
2542 size = sizeof (aarch64_long_branch_stub);
2544 case aarch64_stub_erratum_835769_veneer:
2545 size = sizeof (aarch64_erratum_835769_stub);
2551 size = (size + 7) & ~7;
2552 stub_entry->stub_sec->size += size;
2556 /* External entry points for sizing and building linker stubs. */
2558 /* Set up various things so that we can make a list of input sections
2559 for each output section included in the link. Returns -1 on error,
2560 0 when no stubs will be needed, and 1 on success. */
2563 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2564 struct bfd_link_info *info)
2567 unsigned int bfd_count;
2568 int top_id, top_index;
2570 asection **input_list, **list;
2572 struct elf_aarch64_link_hash_table *htab =
2573 elf_aarch64_hash_table (info);
2575 if (!is_elf_hash_table (htab))
2578 /* Count the number of input BFDs and find the top input section id. */
2579 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2580 input_bfd != NULL; input_bfd = input_bfd->link.next)
2583 for (section = input_bfd->sections;
2584 section != NULL; section = section->next)
2586 if (top_id < section->id)
2587 top_id = section->id;
2590 htab->bfd_count = bfd_count;
2592 amt = sizeof (struct map_stub) * (top_id + 1);
2593 htab->stub_group = bfd_zmalloc (amt);
2594 if (htab->stub_group == NULL)
2597 /* We can't use output_bfd->section_count here to find the top output
2598 section index as some sections may have been removed, and
2599 _bfd_strip_section_from_output doesn't renumber the indices. */
2600 for (section = output_bfd->sections, top_index = 0;
2601 section != NULL; section = section->next)
2603 if (top_index < section->index)
2604 top_index = section->index;
2607 htab->top_index = top_index;
2608 amt = sizeof (asection *) * (top_index + 1);
2609 input_list = bfd_malloc (amt);
2610 htab->input_list = input_list;
2611 if (input_list == NULL)
2614 /* For sections we aren't interested in, mark their entries with a
2615 value we can check later. */
2616 list = input_list + top_index;
2618 *list = bfd_abs_section_ptr;
2619 while (list-- != input_list);
2621 for (section = output_bfd->sections;
2622 section != NULL; section = section->next)
2624 if ((section->flags & SEC_CODE) != 0)
2625 input_list[section->index] = NULL;
2631 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2632 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2634 /* The linker repeatedly calls this function for each input section,
2635 in the order that input sections are linked into output sections.
2636 Build lists of input sections to determine groupings between which
2637 we may insert linker stubs. */
2640 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2642 struct elf_aarch64_link_hash_table *htab =
2643 elf_aarch64_hash_table (info);
2645 if (isec->output_section->index <= htab->top_index)
2647 asection **list = htab->input_list + isec->output_section->index;
2649 if (*list != bfd_abs_section_ptr)
2651 /* Steal the link_sec pointer for our list. */
2652 /* This happens to make the list in reverse order,
2653 which is what we want. */
2654 PREV_SEC (isec) = *list;
2660 /* See whether we can group stub sections together. Grouping stub
2661 sections may result in fewer stubs. More importantly, we need to
2662 put all .init* and .fini* stubs at the beginning of the .init or
2663 .fini output sections respectively, because glibc splits the
2664 _init and _fini functions into multiple parts. Putting a stub in
2665 the middle of a function is not a good idea. */
2668 group_sections (struct elf_aarch64_link_hash_table *htab,
2669 bfd_size_type stub_group_size,
2670 bfd_boolean stubs_always_before_branch)
2672 asection **list = htab->input_list + htab->top_index;
2676 asection *tail = *list;
2678 if (tail == bfd_abs_section_ptr)
2681 while (tail != NULL)
2685 bfd_size_type total;
2689 while ((prev = PREV_SEC (curr)) != NULL
2690 && ((total += curr->output_offset - prev->output_offset)
2694 /* OK, the size from the start of CURR to the end is less
2695 than stub_group_size and thus can be handled by one stub
2696 section. (Or the tail section is itself larger than
2697 stub_group_size, in which case we may be toast.)
2698 We should really be keeping track of the total size of
2699 stubs added here, as stubs contribute to the final output
2703 prev = PREV_SEC (tail);
2704 /* Set up this stub group. */
2705 htab->stub_group[tail->id].link_sec = curr;
2707 while (tail != curr && (tail = prev) != NULL);
2709 /* But wait, there's more! Input sections up to stub_group_size
2710 bytes before the stub section can be handled by it too. */
2711 if (!stubs_always_before_branch)
2715 && ((total += tail->output_offset - prev->output_offset)
2719 prev = PREV_SEC (tail);
2720 htab->stub_group[tail->id].link_sec = curr;
2726 while (list-- != htab->input_list);
2728 free (htab->input_list);
2733 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2735 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2736 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2737 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2738 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2739 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2740 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2742 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2743 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2744 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2745 #define AARCH64_ZR 0x1f
2747 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2748 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2750 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2751 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2752 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2753 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2754 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2755 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2756 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2757 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2758 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2759 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2760 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2761 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2762 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2763 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2764 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2765 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2766 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2767 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2769 /* Classify an INSN if it is indeed a load/store.
2771 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2773 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2776 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2781 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2782 bfd_boolean *pair, bfd_boolean *load)
2790 /* Bail out quickly if INSN doesn't fall into the the load-store
2792 if (!AARCH64_LDST (insn))
2797 if (AARCH64_LDST_EX (insn))
2799 *rt = AARCH64_RT (insn);
2801 if (AARCH64_BIT (insn, 21) == 1)
2804 *rt2 = AARCH64_RT2 (insn);
2806 *load = AARCH64_LD (insn);
2809 else if (AARCH64_LDST_NAP (insn)
2810 || AARCH64_LDSTP_PI (insn)
2811 || AARCH64_LDSTP_O (insn)
2812 || AARCH64_LDSTP_PRE (insn))
2815 *rt = AARCH64_RT (insn);
2816 *rt2 = AARCH64_RT2 (insn);
2817 *load = AARCH64_LD (insn);
2820 else if (AARCH64_LDST_PCREL (insn)
2821 || AARCH64_LDST_UI (insn)
2822 || AARCH64_LDST_PIIMM (insn)
2823 || AARCH64_LDST_U (insn)
2824 || AARCH64_LDST_PREIMM (insn)
2825 || AARCH64_LDST_RO (insn)
2826 || AARCH64_LDST_UIMM (insn))
2828 *rt = AARCH64_RT (insn);
2830 if (AARCH64_LDST_PCREL (insn))
2832 opc = AARCH64_BITS (insn, 22, 2);
2833 v = AARCH64_BIT (insn, 26);
2834 opc_v = opc | (v << 2);
2835 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
2836 || opc_v == 5 || opc_v == 7);
2839 else if (AARCH64_LDST_SIMD_M (insn)
2840 || AARCH64_LDST_SIMD_M_PI (insn))
2842 *rt = AARCH64_RT (insn);
2843 *load = AARCH64_BIT (insn, 22);
2844 opcode = (insn >> 12) & 0xf;
2871 else if (AARCH64_LDST_SIMD_S (insn)
2872 || AARCH64_LDST_SIMD_S_PI (insn))
2874 *rt = AARCH64_RT (insn);
2875 r = (insn >> 21) & 1;
2876 *load = AARCH64_BIT (insn, 22);
2877 opcode = (insn >> 13) & 0x7;
2889 *rt2 = *rt + (r == 0 ? 2 : 3);
2897 *rt2 = *rt + (r == 0 ? 2 : 3);
2909 /* Return TRUE if INSN is multiply-accumulate. */
2912 aarch64_mlxl_p (uint32_t insn)
2914 uint32_t op31 = AARCH64_OP31 (insn);
2916 if (AARCH64_MAC (insn)
2917 && (op31 == 0 || op31 == 1 || op31 == 5)
2918 /* Exclude MUL instructions which are encoded as a multiple accumulate
2920 && AARCH64_RA (insn) != AARCH64_ZR)
2926 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
2927 it is possible for a 64-bit multiply-accumulate instruction to generate an
2928 incorrect result. The details are quite complex and hard to
2929 determine statically, since branches in the code may exist in some
2930 circumstances, but all cases end with a memory (load, store, or
2931 prefetch) instruction followed immediately by the multiply-accumulate
2932 operation. We employ a linker patching technique, by moving the potentially
2933 affected multiply-accumulate instruction into a patch region and replacing
2934 the original instruction with a branch to the patch. This function checks
2935 if INSN_1 is the memory operation followed by a multiply-accumulate
2936 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
2937 if INSN_1 and INSN_2 are safe. */
2940 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
2950 if (aarch64_mlxl_p (insn_2)
2951 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
2953 /* Any SIMD memory op is independent of the subsequent MLA
2954 by definition of the erratum. */
2955 if (AARCH64_BIT (insn_1, 26))
2958 /* If not SIMD, check for integer memory ops and MLA relationship. */
2959 rn = AARCH64_RN (insn_2);
2960 ra = AARCH64_RA (insn_2);
2961 rm = AARCH64_RM (insn_2);
2963 /* If this is a load and there's a true(RAW) dependency, we are safe
2964 and this is not an erratum sequence. */
2966 (rt == rn || rt == rm || rt == ra
2967 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
2970 /* We conservatively put out stubs for all other cases (including
2978 /* Used to order a list of mapping symbols by address. */
2981 elf_aarch64_compare_mapping (const void *a, const void *b)
2983 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
2984 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
2986 if (amap->vma > bmap->vma)
2988 else if (amap->vma < bmap->vma)
2990 else if (amap->type > bmap->type)
2991 /* Ensure results do not depend on the host qsort for objects with
2992 multiple mapping symbols at the same address by sorting on type
2995 else if (amap->type < bmap->type)
3003 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3005 char *stub_name = (char *) bfd_malloc
3006 (strlen ("__erratum_835769_veneer_") + 16);
3007 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3011 /* Scan for cortex-a53 erratum 835769 sequence.
3013 Return TRUE else FALSE on abnormal termination. */
3016 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3017 struct bfd_link_info *info,
3018 unsigned int *num_fixes_p)
3021 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3022 unsigned int num_fixes = *num_fixes_p;
3027 for (section = input_bfd->sections;
3029 section = section->next)
3031 bfd_byte *contents = NULL;
3032 struct _aarch64_elf_section_data *sec_data;
3035 if (elf_section_type (section) != SHT_PROGBITS
3036 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3037 || (section->flags & SEC_EXCLUDE) != 0
3038 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3039 || (section->output_section == bfd_abs_section_ptr))
3042 if (elf_section_data (section)->this_hdr.contents != NULL)
3043 contents = elf_section_data (section)->this_hdr.contents;
3044 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3047 sec_data = elf_aarch64_section_data (section);
3049 qsort (sec_data->map, sec_data->mapcount,
3050 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3052 for (span = 0; span < sec_data->mapcount; span++)
3054 unsigned int span_start = sec_data->map[span].vma;
3055 unsigned int span_end = ((span == sec_data->mapcount - 1)
3056 ? sec_data->map[0].vma + section->size
3057 : sec_data->map[span + 1].vma);
3059 char span_type = sec_data->map[span].type;
3061 if (span_type == 'd')
3064 for (i = span_start; i + 4 < span_end; i += 4)
3066 uint32_t insn_1 = bfd_getl32 (contents + i);
3067 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3069 if (aarch64_erratum_sequence (insn_1, insn_2))
3071 struct elf_aarch64_stub_hash_entry *stub_entry;
3072 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3076 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3082 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3083 stub_entry->target_section = section;
3084 stub_entry->target_value = i + 4;
3085 stub_entry->veneered_insn = insn_2;
3086 stub_entry->output_name = stub_name;
3091 if (elf_section_data (section)->this_hdr.contents == NULL)
3095 *num_fixes_p = num_fixes;
3101 /* Resize all stub sections. */
3104 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
3108 /* OK, we've added some stubs. Find out the new size of the
3110 for (section = htab->stub_bfd->sections;
3111 section != NULL; section = section->next)
3113 /* Ignore non-stub sections. */
3114 if (!strstr (section->name, STUB_SUFFIX))
3119 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3123 /* Determine and set the size of the stub section for a final link.
3125 The basic idea here is to examine all the relocations looking for
3126 PC-relative calls to a target that is unreachable with a "bl"
3130 elfNN_aarch64_size_stubs (bfd *output_bfd,
3132 struct bfd_link_info *info,
3133 bfd_signed_vma group_size,
3134 asection * (*add_stub_section) (const char *,
3136 void (*layout_sections_again) (void))
3138 bfd_size_type stub_group_size;
3139 bfd_boolean stubs_always_before_branch;
3140 bfd_boolean stub_changed = FALSE;
3141 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3142 unsigned int num_erratum_835769_fixes = 0;
3144 /* Propagate mach to stub bfd, because it may not have been
3145 finalized when we created stub_bfd. */
3146 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3147 bfd_get_mach (output_bfd));
3149 /* Stash our params away. */
3150 htab->stub_bfd = stub_bfd;
3151 htab->add_stub_section = add_stub_section;
3152 htab->layout_sections_again = layout_sections_again;
3153 stubs_always_before_branch = group_size < 0;
3155 stub_group_size = -group_size;
3157 stub_group_size = group_size;
3159 if (stub_group_size == 1)
3161 /* Default values. */
3162 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3163 stub_group_size = 127 * 1024 * 1024;
3166 group_sections (htab, stub_group_size, stubs_always_before_branch);
3168 if (htab->fix_erratum_835769)
3172 for (input_bfd = info->input_bfds;
3173 input_bfd != NULL; input_bfd = input_bfd->link.next)
3174 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
3175 &num_erratum_835769_fixes))
3178 stub_changed = TRUE;
3185 for (input_bfd = info->input_bfds;
3186 input_bfd != NULL; input_bfd = input_bfd->link.next)
3188 Elf_Internal_Shdr *symtab_hdr;
3190 Elf_Internal_Sym *local_syms = NULL;
3192 /* We'll need the symbol table in a second. */
3193 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3194 if (symtab_hdr->sh_info == 0)
3197 /* Walk over each section attached to the input bfd. */
3198 for (section = input_bfd->sections;
3199 section != NULL; section = section->next)
3201 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3203 /* If there aren't any relocs, then there's nothing more
3205 if ((section->flags & SEC_RELOC) == 0
3206 || section->reloc_count == 0
3207 || (section->flags & SEC_CODE) == 0)
3210 /* If this section is a link-once section that will be
3211 discarded, then don't create any stubs. */
3212 if (section->output_section == NULL
3213 || section->output_section->owner != output_bfd)
3216 /* Get the relocs. */
3218 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3219 NULL, info->keep_memory);
3220 if (internal_relocs == NULL)
3221 goto error_ret_free_local;
3223 /* Now examine each relocation. */
3224 irela = internal_relocs;
3225 irelaend = irela + section->reloc_count;
3226 for (; irela < irelaend; irela++)
3228 unsigned int r_type, r_indx;
3229 enum elf_aarch64_stub_type stub_type;
3230 struct elf_aarch64_stub_hash_entry *stub_entry;
3233 bfd_vma destination;
3234 struct elf_aarch64_link_hash_entry *hash;
3235 const char *sym_name;
3237 const asection *id_sec;
3238 unsigned char st_type;
3241 r_type = ELFNN_R_TYPE (irela->r_info);
3242 r_indx = ELFNN_R_SYM (irela->r_info);
3244 if (r_type >= (unsigned int) R_AARCH64_end)
3246 bfd_set_error (bfd_error_bad_value);
3247 error_ret_free_internal:
3248 if (elf_section_data (section)->relocs == NULL)
3249 free (internal_relocs);
3250 goto error_ret_free_local;
3253 /* Only look for stubs on unconditional branch and
3254 branch and link instructions. */
3255 if (r_type != (unsigned int) AARCH64_R (CALL26)
3256 && r_type != (unsigned int) AARCH64_R (JUMP26))
3259 /* Now determine the call target, its name, value,
3266 if (r_indx < symtab_hdr->sh_info)
3268 /* It's a local symbol. */
3269 Elf_Internal_Sym *sym;
3270 Elf_Internal_Shdr *hdr;
3272 if (local_syms == NULL)
3275 = (Elf_Internal_Sym *) symtab_hdr->contents;
3276 if (local_syms == NULL)
3278 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3279 symtab_hdr->sh_info, 0,
3281 if (local_syms == NULL)
3282 goto error_ret_free_internal;
3285 sym = local_syms + r_indx;
3286 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3287 sym_sec = hdr->bfd_section;
3289 /* This is an undefined symbol. It can never
3293 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3294 sym_value = sym->st_value;
3295 destination = (sym_value + irela->r_addend
3296 + sym_sec->output_offset
3297 + sym_sec->output_section->vma);
3298 st_type = ELF_ST_TYPE (sym->st_info);
3300 = bfd_elf_string_from_elf_section (input_bfd,
3301 symtab_hdr->sh_link,
3308 e_indx = r_indx - symtab_hdr->sh_info;
3309 hash = ((struct elf_aarch64_link_hash_entry *)
3310 elf_sym_hashes (input_bfd)[e_indx]);
3312 while (hash->root.root.type == bfd_link_hash_indirect
3313 || hash->root.root.type == bfd_link_hash_warning)
3314 hash = ((struct elf_aarch64_link_hash_entry *)
3315 hash->root.root.u.i.link);
3317 if (hash->root.root.type == bfd_link_hash_defined
3318 || hash->root.root.type == bfd_link_hash_defweak)
3320 struct elf_aarch64_link_hash_table *globals =
3321 elf_aarch64_hash_table (info);
3322 sym_sec = hash->root.root.u.def.section;
3323 sym_value = hash->root.root.u.def.value;
3324 /* For a destination in a shared library,
3325 use the PLT stub as target address to
3326 decide whether a branch stub is
3328 if (globals->root.splt != NULL && hash != NULL
3329 && hash->root.plt.offset != (bfd_vma) - 1)
3331 sym_sec = globals->root.splt;
3332 sym_value = hash->root.plt.offset;
3333 if (sym_sec->output_section != NULL)
3334 destination = (sym_value
3335 + sym_sec->output_offset
3337 sym_sec->output_section->vma);
3339 else if (sym_sec->output_section != NULL)
3340 destination = (sym_value + irela->r_addend
3341 + sym_sec->output_offset
3342 + sym_sec->output_section->vma);
3344 else if (hash->root.root.type == bfd_link_hash_undefined
3345 || (hash->root.root.type
3346 == bfd_link_hash_undefweak))
3348 /* For a shared library, use the PLT stub as
3349 target address to decide whether a long
3350 branch stub is needed.
3351 For absolute code, they cannot be handled. */
3352 struct elf_aarch64_link_hash_table *globals =
3353 elf_aarch64_hash_table (info);
3355 if (globals->root.splt != NULL && hash != NULL
3356 && hash->root.plt.offset != (bfd_vma) - 1)
3358 sym_sec = globals->root.splt;
3359 sym_value = hash->root.plt.offset;
3360 if (sym_sec->output_section != NULL)
3361 destination = (sym_value
3362 + sym_sec->output_offset
3364 sym_sec->output_section->vma);
3371 bfd_set_error (bfd_error_bad_value);
3372 goto error_ret_free_internal;
3374 st_type = ELF_ST_TYPE (hash->root.type);
3375 sym_name = hash->root.root.root.string;
3378 /* Determine what (if any) linker stub is needed. */
3379 stub_type = aarch64_type_of_stub
3380 (info, section, irela, st_type, hash, destination);
3381 if (stub_type == aarch64_stub_none)
3384 /* Support for grouping stub sections. */
3385 id_sec = htab->stub_group[section->id].link_sec;
3387 /* Get the name of this stub. */
3388 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3391 goto error_ret_free_internal;
3394 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3395 stub_name, FALSE, FALSE);
3396 if (stub_entry != NULL)
3398 /* The proper stub has already been created. */
3403 stub_entry = _bfd_aarch64_add_stub_entry_in_group
3404 (stub_name, section, htab);
3405 if (stub_entry == NULL)
3408 goto error_ret_free_internal;
3411 stub_entry->target_value = sym_value;
3412 stub_entry->target_section = sym_sec;
3413 stub_entry->stub_type = stub_type;
3414 stub_entry->h = hash;
3415 stub_entry->st_type = st_type;
3417 if (sym_name == NULL)
3418 sym_name = "unnamed";
3419 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3420 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3421 if (stub_entry->output_name == NULL)
3424 goto error_ret_free_internal;
3427 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3430 stub_changed = TRUE;
3433 /* We're done with the internal relocs, free them. */
3434 if (elf_section_data (section)->relocs == NULL)
3435 free (internal_relocs);
3442 _bfd_aarch64_resize_stubs (htab);
3444 /* Ask the linker to do its stuff. */
3445 (*htab->layout_sections_again) ();
3446 stub_changed = FALSE;
3451 error_ret_free_local:
3455 /* Build all the stubs associated with the current output file. The
3456 stubs are kept in a hash table attached to the main linker hash
3457 table. We also set up the .plt entries for statically linked PIC
3458 functions here. This function is called via aarch64_elf_finish in the
3462 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3465 struct bfd_hash_table *table;
3466 struct elf_aarch64_link_hash_table *htab;
3468 htab = elf_aarch64_hash_table (info);
3470 for (stub_sec = htab->stub_bfd->sections;
3471 stub_sec != NULL; stub_sec = stub_sec->next)
3475 /* Ignore non-stub sections. */
3476 if (!strstr (stub_sec->name, STUB_SUFFIX))
3479 /* Allocate memory to hold the linker stubs. */
3480 size = stub_sec->size;
3481 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3482 if (stub_sec->contents == NULL && size != 0)
3487 /* Build the stubs as directed by the stub hash table. */
3488 table = &htab->stub_hash_table;
3489 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3495 /* Add an entry to the code/data map for section SEC. */
3498 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3500 struct _aarch64_elf_section_data *sec_data =
3501 elf_aarch64_section_data (sec);
3502 unsigned int newidx;
3504 if (sec_data->map == NULL)
3506 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3507 sec_data->mapcount = 0;
3508 sec_data->mapsize = 1;
3511 newidx = sec_data->mapcount++;
3513 if (sec_data->mapcount > sec_data->mapsize)
3515 sec_data->mapsize *= 2;
3516 sec_data->map = bfd_realloc_or_free
3517 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3522 sec_data->map[newidx].vma = vma;
3523 sec_data->map[newidx].type = type;
3528 /* Initialise maps of insn/data for input BFDs. */
3530 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3532 Elf_Internal_Sym *isymbuf;
3533 Elf_Internal_Shdr *hdr;
3534 unsigned int i, localsyms;
3536 /* Make sure that we are dealing with an AArch64 elf binary. */
3537 if (!is_aarch64_elf (abfd))
3540 if ((abfd->flags & DYNAMIC) != 0)
3543 hdr = &elf_symtab_hdr (abfd);
3544 localsyms = hdr->sh_info;
3546 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3547 should contain the number of local symbols, which should come before any
3548 global symbols. Mapping symbols are always local. */
3549 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3551 /* No internal symbols read? Skip this BFD. */
3552 if (isymbuf == NULL)
3555 for (i = 0; i < localsyms; i++)
3557 Elf_Internal_Sym *isym = &isymbuf[i];
3558 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3561 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3563 name = bfd_elf_string_from_elf_section (abfd,
3567 if (bfd_is_aarch64_special_symbol_name
3568 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3569 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
3574 /* Set option values needed during linking. */
3576 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
3577 struct bfd_link_info *link_info,
3579 int no_wchar_warn, int pic_veneer,
3580 int fix_erratum_835769)
3582 struct elf_aarch64_link_hash_table *globals;
3584 globals = elf_aarch64_hash_table (link_info);
3585 globals->pic_veneer = pic_veneer;
3586 globals->fix_erratum_835769 = fix_erratum_835769;
3588 BFD_ASSERT (is_aarch64_elf (output_bfd));
3589 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3590 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3594 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3595 struct elf_aarch64_link_hash_table
3596 *globals, struct bfd_link_info *info,
3597 bfd_vma value, bfd *output_bfd,
3598 bfd_boolean *unresolved_reloc_p)
3600 bfd_vma off = (bfd_vma) - 1;
3601 asection *basegot = globals->root.sgot;
3602 bfd_boolean dyn = globals->root.dynamic_sections_created;
3606 BFD_ASSERT (basegot != NULL);
3607 off = h->got.offset;
3608 BFD_ASSERT (off != (bfd_vma) - 1);
3609 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3611 && SYMBOL_REFERENCES_LOCAL (info, h))
3612 || (ELF_ST_VISIBILITY (h->other)
3613 && h->root.type == bfd_link_hash_undefweak))
3615 /* This is actually a static link, or it is a -Bsymbolic link
3616 and the symbol is defined locally. We must initialize this
3617 entry in the global offset table. Since the offset must
3618 always be a multiple of 8 (4 in the case of ILP32), we use
3619 the least significant bit to record whether we have
3620 initialized it already.
3621 When doing a dynamic link, we create a .rel(a).got relocation
3622 entry to initialize the value. This is done in the
3623 finish_dynamic_symbol routine. */
3628 bfd_put_NN (output_bfd, value, basegot->contents + off);
3633 *unresolved_reloc_p = FALSE;
3635 off = off + basegot->output_section->vma + basegot->output_offset;
3641 /* Change R_TYPE to a more efficient access model where possible,
3642 return the new reloc type. */
3644 static bfd_reloc_code_real_type
3645 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
3646 struct elf_link_hash_entry *h)
3648 bfd_boolean is_local = h == NULL;
3652 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3653 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3655 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3656 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
3658 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3660 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3663 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3665 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3666 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
3668 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3669 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
3671 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3672 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
3674 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3675 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3677 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
3678 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3680 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3683 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3685 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
3686 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
3688 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3689 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3690 /* Instructions with these relocations will become NOPs. */
3691 return BFD_RELOC_AARCH64_NONE;
3701 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
3705 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3706 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3707 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3708 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3711 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3712 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3713 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3716 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3717 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3718 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3719 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3720 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
3721 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3722 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3723 return GOT_TLSDESC_GD;
3725 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3726 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3727 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3728 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3731 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3732 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3733 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3734 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3735 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3736 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3737 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3738 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3748 aarch64_can_relax_tls (bfd *input_bfd,
3749 struct bfd_link_info *info,
3750 bfd_reloc_code_real_type r_type,
3751 struct elf_link_hash_entry *h,
3752 unsigned long r_symndx)
3754 unsigned int symbol_got_type;
3755 unsigned int reloc_got_type;
3757 if (! IS_AARCH64_TLS_RELOC (r_type))
3760 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3761 reloc_got_type = aarch64_reloc_got_type (r_type);
3763 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3769 if (h && h->root.type == bfd_link_hash_undefweak)
3775 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
3778 static bfd_reloc_code_real_type
3779 aarch64_tls_transition (bfd *input_bfd,
3780 struct bfd_link_info *info,
3781 unsigned int r_type,
3782 struct elf_link_hash_entry *h,
3783 unsigned long r_symndx)
3785 bfd_reloc_code_real_type bfd_r_type
3786 = elfNN_aarch64_bfd_reloc_from_type (r_type);
3788 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
3791 return aarch64_tls_transition_without_check (bfd_r_type, h);
3794 /* Return the base VMA address which should be subtracted from real addresses
3795 when resolving R_AARCH64_TLS_DTPREL relocation. */
3798 dtpoff_base (struct bfd_link_info *info)
3800 /* If tls_sec is NULL, we should have signalled an error already. */
3801 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3802 return elf_hash_table (info)->tls_sec->vma;
3805 /* Return the base VMA address which should be subtracted from real addresses
3806 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3809 tpoff_base (struct bfd_link_info *info)
3811 struct elf_link_hash_table *htab = elf_hash_table (info);
3813 /* If tls_sec is NULL, we should have signalled an error already. */
3814 BFD_ASSERT (htab->tls_sec != NULL);
3816 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3817 htab->tls_sec->alignment_power);
3818 return htab->tls_sec->vma - base;
3822 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3823 unsigned long r_symndx)
3825 /* Calculate the address of the GOT entry for symbol
3826 referred to in h. */
3828 return &h->got.offset;
3832 struct elf_aarch64_local_symbol *l;
3834 l = elf_aarch64_locals (input_bfd);
3835 return &l[r_symndx].got_offset;
3840 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3841 unsigned long r_symndx)
3844 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3849 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3850 unsigned long r_symndx)
3853 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3858 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3859 unsigned long r_symndx)
3862 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3868 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3869 unsigned long r_symndx)
3871 /* Calculate the address of the GOT entry for symbol
3872 referred to in h. */
3875 struct elf_aarch64_link_hash_entry *eh;
3876 eh = (struct elf_aarch64_link_hash_entry *) h;
3877 return &eh->tlsdesc_got_jump_table_offset;
3882 struct elf_aarch64_local_symbol *l;
3884 l = elf_aarch64_locals (input_bfd);
3885 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3890 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3891 unsigned long r_symndx)
3894 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3899 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3900 struct elf_link_hash_entry *h,
3901 unsigned long r_symndx)
3904 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3909 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3910 unsigned long r_symndx)
3913 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3918 /* Data for make_branch_to_erratum_835769_stub(). */
3920 struct erratum_835769_branch_to_stub_data
3922 asection *output_section;
3926 /* Helper to insert branches to erratum 835769 stubs in the right
3927 places for a particular section. */
3930 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
3933 struct elf_aarch64_stub_hash_entry *stub_entry;
3934 struct erratum_835769_branch_to_stub_data *data;
3936 unsigned long branch_insn = 0;
3937 bfd_vma veneered_insn_loc, veneer_entry_loc;
3938 bfd_signed_vma branch_offset;
3939 unsigned int target;
3942 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
3943 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
3945 if (stub_entry->target_section != data->output_section
3946 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
3949 contents = data->contents;
3950 veneered_insn_loc = stub_entry->target_section->output_section->vma
3951 + stub_entry->target_section->output_offset
3952 + stub_entry->target_value;
3953 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
3954 + stub_entry->stub_sec->output_offset
3955 + stub_entry->stub_offset;
3956 branch_offset = veneer_entry_loc - veneered_insn_loc;
3958 abfd = stub_entry->target_section->owner;
3959 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
3960 (*_bfd_error_handler)
3961 (_("%B: error: Erratum 835769 stub out "
3962 "of range (input file too large)"), abfd);
3964 target = stub_entry->target_value;
3965 branch_insn = 0x14000000;
3966 branch_offset >>= 2;
3967 branch_offset &= 0x3ffffff;
3968 branch_insn |= branch_offset;
3969 bfd_putl32 (branch_insn, &contents[target]);
3975 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
3976 struct bfd_link_info *link_info,
3981 struct elf_aarch64_link_hash_table *globals =
3982 elf_aarch64_hash_table (link_info);
3984 if (globals == NULL)
3987 /* Fix code to point to erratum 835769 stubs. */
3988 if (globals->fix_erratum_835769)
3990 struct erratum_835769_branch_to_stub_data data;
3992 data.output_section = sec;
3993 data.contents = contents;
3994 bfd_hash_traverse (&globals->stub_hash_table,
3995 make_branch_to_erratum_835769_stub, &data);
4001 /* Perform a relocation as part of a final link. */
4002 static bfd_reloc_status_type
4003 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4006 asection *input_section,
4008 Elf_Internal_Rela *rel,
4010 struct bfd_link_info *info,
4012 struct elf_link_hash_entry *h,
4013 bfd_boolean *unresolved_reloc_p,
4014 bfd_boolean save_addend,
4015 bfd_vma *saved_addend,
4016 Elf_Internal_Sym *sym)
4018 Elf_Internal_Shdr *symtab_hdr;
4019 unsigned int r_type = howto->type;
4020 bfd_reloc_code_real_type bfd_r_type
4021 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4022 bfd_reloc_code_real_type new_bfd_r_type;
4023 unsigned long r_symndx;
4024 bfd_byte *hit_data = contents + rel->r_offset;
4026 bfd_signed_vma signed_addend;
4027 struct elf_aarch64_link_hash_table *globals;
4028 bfd_boolean weak_undef_p;
4030 globals = elf_aarch64_hash_table (info);
4032 symtab_hdr = &elf_symtab_hdr (input_bfd);
4034 BFD_ASSERT (is_aarch64_elf (input_bfd));
4036 r_symndx = ELFNN_R_SYM (rel->r_info);
4038 /* It is possible to have linker relaxations on some TLS access
4039 models. Update our information here. */
4040 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4041 if (new_bfd_r_type != bfd_r_type)
4043 bfd_r_type = new_bfd_r_type;
4044 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4045 BFD_ASSERT (howto != NULL);
4046 r_type = howto->type;
4049 place = input_section->output_section->vma
4050 + input_section->output_offset + rel->r_offset;
4052 /* Get addend, accumulating the addend for consecutive relocs
4053 which refer to the same offset. */
4054 signed_addend = saved_addend ? *saved_addend : 0;
4055 signed_addend += rel->r_addend;
4057 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4058 : bfd_is_und_section (sym_sec));
4060 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4061 it here if it is defined in a non-shared object. */
4063 && h->type == STT_GNU_IFUNC
4071 if ((input_section->flags & SEC_ALLOC) == 0
4072 || h->plt.offset == (bfd_vma) -1)
4075 /* STT_GNU_IFUNC symbol must go through PLT. */
4076 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4077 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4082 if (h->root.root.string)
4083 name = h->root.root.string;
4085 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4087 (*_bfd_error_handler)
4088 (_("%B: relocation %s against STT_GNU_IFUNC "
4089 "symbol `%s' isn't handled by %s"), input_bfd,
4090 howto->name, name, __FUNCTION__);
4091 bfd_set_error (bfd_error_bad_value);
4094 case BFD_RELOC_AARCH64_NN:
4095 if (rel->r_addend != 0)
4097 if (h->root.root.string)
4098 name = h->root.root.string;
4100 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4102 (*_bfd_error_handler)
4103 (_("%B: relocation %s against STT_GNU_IFUNC "
4104 "symbol `%s' has non-zero addend: %d"),
4105 input_bfd, howto->name, name, rel->r_addend);
4106 bfd_set_error (bfd_error_bad_value);
4110 /* Generate dynamic relocation only when there is a
4111 non-GOT reference in a shared object. */
4112 if (info->shared && h->non_got_ref)
4114 Elf_Internal_Rela outrel;
4117 /* Need a dynamic relocation to get the real function
4119 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4123 if (outrel.r_offset == (bfd_vma) -1
4124 || outrel.r_offset == (bfd_vma) -2)
4127 outrel.r_offset += (input_section->output_section->vma
4128 + input_section->output_offset);
4130 if (h->dynindx == -1
4132 || info->executable)
4134 /* This symbol is resolved locally. */
4135 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4136 outrel.r_addend = (h->root.u.def.value
4137 + h->root.u.def.section->output_section->vma
4138 + h->root.u.def.section->output_offset);
4142 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4143 outrel.r_addend = 0;
4146 sreloc = globals->root.irelifunc;
4147 elf_append_rela (output_bfd, sreloc, &outrel);
4149 /* If this reloc is against an external symbol, we
4150 do not want to fiddle with the addend. Otherwise,
4151 we need to include the symbol value so that it
4152 becomes an addend for the dynamic reloc. For an
4153 internal symbol, we have updated addend. */
4154 return bfd_reloc_ok;
4157 case BFD_RELOC_AARCH64_JUMP26:
4158 case BFD_RELOC_AARCH64_CALL26:
4159 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4162 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4164 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4165 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4166 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4167 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4168 base_got = globals->root.sgot;
4169 off = h->got.offset;
4171 if (base_got == NULL)
4174 if (off == (bfd_vma) -1)
4178 /* We can't use h->got.offset here to save state, or
4179 even just remember the offset, as finish_dynamic_symbol
4180 would use that as offset into .got. */
4182 if (globals->root.splt != NULL)
4184 plt_index = ((h->plt.offset - globals->plt_header_size) /
4185 globals->plt_entry_size);
4186 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4187 base_got = globals->root.sgotplt;
4191 plt_index = h->plt.offset / globals->plt_entry_size;
4192 off = plt_index * GOT_ENTRY_SIZE;
4193 base_got = globals->root.igotplt;
4196 if (h->dynindx == -1
4200 /* This references the local definition. We must
4201 initialize this entry in the global offset table.
4202 Since the offset must always be a multiple of 8,
4203 we use the least significant bit to record
4204 whether we have initialized it already.
4206 When doing a dynamic link, we create a .rela.got
4207 relocation entry to initialize the value. This
4208 is done in the finish_dynamic_symbol routine. */
4213 bfd_put_NN (output_bfd, value,
4214 base_got->contents + off);
4215 /* Note that this is harmless as -1 | 1 still is -1. */
4219 value = (base_got->output_section->vma
4220 + base_got->output_offset + off);
4223 value = aarch64_calculate_got_entry_vma (h, globals, info,
4225 unresolved_reloc_p);
4226 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4228 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4229 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4230 case BFD_RELOC_AARCH64_ADD_LO12:
4237 case BFD_RELOC_AARCH64_NONE:
4238 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4239 *unresolved_reloc_p = FALSE;
4240 return bfd_reloc_ok;
4242 case BFD_RELOC_AARCH64_NN:
4244 /* When generating a shared object or relocatable executable, these
4245 relocations are copied into the output file to be resolved at
4247 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4248 && (input_section->flags & SEC_ALLOC)
4250 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4251 || h->root.type != bfd_link_hash_undefweak))
4253 Elf_Internal_Rela outrel;
4255 bfd_boolean skip, relocate;
4258 *unresolved_reloc_p = FALSE;
4263 outrel.r_addend = signed_addend;
4265 _bfd_elf_section_offset (output_bfd, info, input_section,
4267 if (outrel.r_offset == (bfd_vma) - 1)
4269 else if (outrel.r_offset == (bfd_vma) - 2)
4275 outrel.r_offset += (input_section->output_section->vma
4276 + input_section->output_offset);
4279 memset (&outrel, 0, sizeof outrel);
4282 && (!info->shared || !SYMBOLIC_BIND (info, h) || !h->def_regular))
4283 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4288 /* On SVR4-ish systems, the dynamic loader cannot
4289 relocate the text and data segments independently,
4290 so the symbol does not matter. */
4292 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4293 outrel.r_addend += value;
4296 sreloc = elf_section_data (input_section)->sreloc;
4297 if (sreloc == NULL || sreloc->contents == NULL)
4298 return bfd_reloc_notsupported;
4300 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4301 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4303 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4305 /* Sanity to check that we have previously allocated
4306 sufficient space in the relocation section for the
4307 number of relocations we actually want to emit. */
4311 /* If this reloc is against an external symbol, we do not want to
4312 fiddle with the addend. Otherwise, we need to include the symbol
4313 value so that it becomes an addend for the dynamic reloc. */
4315 return bfd_reloc_ok;
4317 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4318 contents, rel->r_offset, value,
4322 value += signed_addend;
4325 case BFD_RELOC_AARCH64_JUMP26:
4326 case BFD_RELOC_AARCH64_CALL26:
4328 asection *splt = globals->root.splt;
4329 bfd_boolean via_plt_p =
4330 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4332 /* A call to an undefined weak symbol is converted to a jump to
4333 the next instruction unless a PLT entry will be created.
4334 The jump to the next instruction is optimized as a NOP.
4335 Do the same for local undefined symbols. */
4336 if (weak_undef_p && ! via_plt_p)
4338 bfd_putl32 (INSN_NOP, hit_data);
4339 return bfd_reloc_ok;
4342 /* If the call goes through a PLT entry, make sure to
4343 check distance to the right destination address. */
4346 value = (splt->output_section->vma
4347 + splt->output_offset + h->plt.offset);
4348 *unresolved_reloc_p = FALSE;
4351 /* If the target symbol is global and marked as a function the
4352 relocation applies a function call or a tail call. In this
4353 situation we can veneer out of range branches. The veneers
4354 use IP0 and IP1 hence cannot be used arbitrary out of range
4355 branches that occur within the body of a function. */
4356 if (h && h->type == STT_FUNC)
4358 /* Check if a stub has to be inserted because the destination
4360 if (! aarch64_valid_branch_p (value, place))
4362 /* The target is out of reach, so redirect the branch to
4363 the local stub for this function. */
4364 struct elf_aarch64_stub_hash_entry *stub_entry;
4365 stub_entry = elfNN_aarch64_get_stub_entry (input_section,
4368 if (stub_entry != NULL)
4369 value = (stub_entry->stub_offset
4370 + stub_entry->stub_sec->output_offset
4371 + stub_entry->stub_sec->output_section->vma);
4375 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4376 signed_addend, weak_undef_p);
4379 case BFD_RELOC_AARCH64_16:
4381 case BFD_RELOC_AARCH64_32:
4383 case BFD_RELOC_AARCH64_ADD_LO12:
4384 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4385 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4386 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4387 case BFD_RELOC_AARCH64_BRANCH19:
4388 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4389 case BFD_RELOC_AARCH64_LDST8_LO12:
4390 case BFD_RELOC_AARCH64_LDST16_LO12:
4391 case BFD_RELOC_AARCH64_LDST32_LO12:
4392 case BFD_RELOC_AARCH64_LDST64_LO12:
4393 case BFD_RELOC_AARCH64_LDST128_LO12:
4394 case BFD_RELOC_AARCH64_MOVW_G0_S:
4395 case BFD_RELOC_AARCH64_MOVW_G1_S:
4396 case BFD_RELOC_AARCH64_MOVW_G2_S:
4397 case BFD_RELOC_AARCH64_MOVW_G0:
4398 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4399 case BFD_RELOC_AARCH64_MOVW_G1:
4400 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4401 case BFD_RELOC_AARCH64_MOVW_G2:
4402 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4403 case BFD_RELOC_AARCH64_MOVW_G3:
4404 case BFD_RELOC_AARCH64_16_PCREL:
4405 case BFD_RELOC_AARCH64_32_PCREL:
4406 case BFD_RELOC_AARCH64_64_PCREL:
4407 case BFD_RELOC_AARCH64_TSTBR14:
4408 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4409 signed_addend, weak_undef_p);
4412 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4413 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4414 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4415 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4416 if (globals->root.sgot == NULL)
4417 BFD_ASSERT (h != NULL);
4421 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4423 unresolved_reloc_p);
4424 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4429 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4430 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4431 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4432 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4433 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4434 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4435 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4436 if (globals->root.sgot == NULL)
4437 return bfd_reloc_notsupported;
4439 value = (symbol_got_offset (input_bfd, h, r_symndx)
4440 + globals->root.sgot->output_section->vma
4441 + globals->root.sgot->output_offset);
4443 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4445 *unresolved_reloc_p = FALSE;
4448 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4449 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4450 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4451 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4452 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4453 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4454 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4455 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4456 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4457 signed_addend - tpoff_base (info),
4459 *unresolved_reloc_p = FALSE;
4462 case BFD_RELOC_AARCH64_TLSDESC_ADD:
4463 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4464 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4465 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4466 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4467 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4468 case BFD_RELOC_AARCH64_TLSDESC_LDR:
4469 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4470 if (globals->root.sgot == NULL)
4471 return bfd_reloc_notsupported;
4472 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4473 + globals->root.sgotplt->output_section->vma
4474 + globals->root.sgotplt->output_offset
4475 + globals->sgotplt_jump_table_size);
4477 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4479 *unresolved_reloc_p = FALSE;
4483 return bfd_reloc_notsupported;
4487 *saved_addend = value;
4489 /* Only apply the final relocation in a sequence. */
4491 return bfd_reloc_continue;
4493 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4497 /* Handle TLS relaxations. Relaxing is possible for symbols that use
4498 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4501 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4502 is to then call final_link_relocate. Return other values in the
4505 static bfd_reloc_status_type
4506 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
4507 bfd *input_bfd, bfd_byte *contents,
4508 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4510 bfd_boolean is_local = h == NULL;
4511 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
4514 BFD_ASSERT (globals && input_bfd && contents && rel);
4516 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
4518 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4519 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4522 /* GD->LE relaxation:
4523 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4525 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4527 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4528 return bfd_reloc_continue;
4532 /* GD->IE relaxation:
4533 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4535 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4537 return bfd_reloc_continue;
4540 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4544 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4547 /* Tiny TLSDESC->LE relaxation:
4548 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
4549 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
4553 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
4554 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
4556 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4557 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
4558 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4560 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4561 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
4562 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
4563 return bfd_reloc_continue;
4567 /* Tiny TLSDESC->IE relaxation:
4568 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
4569 adr x0, :tlsdesc:var => nop
4573 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
4574 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
4576 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4577 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4579 bfd_putl32 (0x58000000, contents + rel->r_offset);
4580 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
4581 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
4582 return bfd_reloc_continue;
4585 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4588 /* Tiny GD->LE relaxation:
4589 adr x0, :tlsgd:var => mrs x1, tpidr_el0
4590 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
4591 nop => add x0, x0, #:tprel_lo12_nc:x
4594 /* First kill the tls_get_addr reloc on the bl instruction. */
4595 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4597 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
4598 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
4599 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
4601 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4602 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
4603 rel[1].r_offset = rel->r_offset + 8;
4605 /* Move the current relocation to the second instruction in
4608 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4609 AARCH64_R (TLSLE_ADD_TPREL_HI12));
4610 return bfd_reloc_continue;
4614 /* Tiny GD->IE relaxation:
4615 adr x0, :tlsgd:var => ldr x0, :gottprel:var
4616 bl __tls_get_addr => mrs x1, tpidr_el0
4617 nop => add x0, x0, x1
4620 /* First kill the tls_get_addr reloc on the bl instruction. */
4621 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4622 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4624 bfd_putl32 (0x58000000, contents + rel->r_offset);
4625 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4626 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4627 return bfd_reloc_continue;
4630 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4631 return bfd_reloc_continue;
4633 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4636 /* GD->LE relaxation:
4637 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4639 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4640 return bfd_reloc_continue;
4644 /* GD->IE relaxation:
4645 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4647 insn = bfd_getl32 (contents + rel->r_offset);
4649 bfd_putl32 (insn, contents + rel->r_offset);
4650 return bfd_reloc_continue;
4653 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4656 /* GD->LE relaxation
4657 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4658 bl __tls_get_addr => mrs x1, tpidr_el0
4659 nop => add x0, x1, x0
4662 /* First kill the tls_get_addr reloc on the bl instruction. */
4663 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4664 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4666 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4667 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4668 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4669 return bfd_reloc_continue;
4673 /* GD->IE relaxation
4674 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4675 BL __tls_get_addr => mrs x1, tpidr_el0
4677 NOP => add x0, x1, x0
4680 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
4682 /* Remove the relocation on the BL instruction. */
4683 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4685 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4687 /* We choose to fixup the BL and NOP instructions using the
4688 offset from the second relocation to allow flexibility in
4689 scheduling instructions between the ADD and BL. */
4690 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4691 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4692 return bfd_reloc_continue;
4695 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4696 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4697 /* GD->IE/LE relaxation:
4698 add x0, x0, #:tlsdesc_lo12:var => nop
4701 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4702 return bfd_reloc_ok;
4704 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4705 /* IE->LE relaxation:
4706 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4710 insn = bfd_getl32 (contents + rel->r_offset);
4711 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4713 return bfd_reloc_continue;
4715 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4716 /* IE->LE relaxation:
4717 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4721 insn = bfd_getl32 (contents + rel->r_offset);
4722 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4724 return bfd_reloc_continue;
4727 return bfd_reloc_continue;
4730 return bfd_reloc_ok;
4733 /* Relocate an AArch64 ELF section. */
4736 elfNN_aarch64_relocate_section (bfd *output_bfd,
4737 struct bfd_link_info *info,
4739 asection *input_section,
4741 Elf_Internal_Rela *relocs,
4742 Elf_Internal_Sym *local_syms,
4743 asection **local_sections)
4745 Elf_Internal_Shdr *symtab_hdr;
4746 struct elf_link_hash_entry **sym_hashes;
4747 Elf_Internal_Rela *rel;
4748 Elf_Internal_Rela *relend;
4750 struct elf_aarch64_link_hash_table *globals;
4751 bfd_boolean save_addend = FALSE;
4754 globals = elf_aarch64_hash_table (info);
4756 symtab_hdr = &elf_symtab_hdr (input_bfd);
4757 sym_hashes = elf_sym_hashes (input_bfd);
4760 relend = relocs + input_section->reloc_count;
4761 for (; rel < relend; rel++)
4763 unsigned int r_type;
4764 bfd_reloc_code_real_type bfd_r_type;
4765 bfd_reloc_code_real_type relaxed_bfd_r_type;
4766 reloc_howto_type *howto;
4767 unsigned long r_symndx;
4768 Elf_Internal_Sym *sym;
4770 struct elf_link_hash_entry *h;
4772 bfd_reloc_status_type r;
4775 bfd_boolean unresolved_reloc = FALSE;
4776 char *error_message = NULL;
4778 r_symndx = ELFNN_R_SYM (rel->r_info);
4779 r_type = ELFNN_R_TYPE (rel->r_info);
4781 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
4782 howto = bfd_reloc.howto;
4786 (*_bfd_error_handler)
4787 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4788 input_bfd, input_section, r_type);
4791 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
4797 if (r_symndx < symtab_hdr->sh_info)
4799 sym = local_syms + r_symndx;
4800 sym_type = ELFNN_ST_TYPE (sym->st_info);
4801 sec = local_sections[r_symndx];
4803 /* An object file might have a reference to a local
4804 undefined symbol. This is a daft object file, but we
4805 should at least do something about it. */
4806 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4807 && bfd_is_und_section (sec)
4808 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4810 if (!info->callbacks->undefined_symbol
4811 (info, bfd_elf_string_from_elf_section
4812 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4813 input_bfd, input_section, rel->r_offset, TRUE))
4817 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4819 /* Relocate against local STT_GNU_IFUNC symbol. */
4820 if (!info->relocatable
4821 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
4823 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
4828 /* Set STT_GNU_IFUNC symbol value. */
4829 h->root.u.def.value = sym->st_value;
4830 h->root.u.def.section = sec;
4835 bfd_boolean warned, ignored;
4837 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4838 r_symndx, symtab_hdr, sym_hashes,
4840 unresolved_reloc, warned, ignored);
4845 if (sec != NULL && discarded_section (sec))
4846 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4847 rel, 1, relend, howto, 0, contents);
4849 if (info->relocatable)
4853 name = h->root.root.string;
4856 name = (bfd_elf_string_from_elf_section
4857 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4858 if (name == NULL || *name == '\0')
4859 name = bfd_section_name (input_bfd, sec);
4863 && r_type != R_AARCH64_NONE
4864 && r_type != R_AARCH64_NULL
4866 || h->root.type == bfd_link_hash_defined
4867 || h->root.type == bfd_link_hash_defweak)
4868 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
4870 (*_bfd_error_handler)
4871 ((sym_type == STT_TLS
4872 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4873 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4875 input_section, (long) rel->r_offset, howto->name, name);
4878 /* We relax only if we can see that there can be a valid transition
4879 from a reloc type to another.
4880 We call elfNN_aarch64_final_link_relocate unless we're completely
4881 done, i.e., the relaxation produced the final output we want. */
4883 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4885 if (relaxed_bfd_r_type != bfd_r_type)
4887 bfd_r_type = relaxed_bfd_r_type;
4888 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4889 BFD_ASSERT (howto != NULL);
4890 r_type = howto->type;
4891 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4892 unresolved_reloc = 0;
4895 r = bfd_reloc_continue;
4897 /* There may be multiple consecutive relocations for the
4898 same offset. In that case we are supposed to treat the
4899 output of each relocation as the addend for the next. */
4900 if (rel + 1 < relend
4901 && rel->r_offset == rel[1].r_offset
4902 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4903 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4906 save_addend = FALSE;
4908 if (r == bfd_reloc_continue)
4909 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4910 input_section, contents, rel,
4911 relocation, info, sec,
4912 h, &unresolved_reloc,
4913 save_addend, &addend, sym);
4915 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
4917 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4918 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4919 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4920 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4922 bfd_boolean need_relocs = FALSE;
4927 off = symbol_got_offset (input_bfd, h, r_symndx);
4928 indx = h && h->dynindx != -1 ? h->dynindx : 0;
4931 (info->shared || indx != 0) &&
4933 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4934 || h->root.type != bfd_link_hash_undefweak);
4936 BFD_ASSERT (globals->root.srelgot != NULL);
4940 Elf_Internal_Rela rela;
4941 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
4943 rela.r_offset = globals->root.sgot->output_section->vma +
4944 globals->root.sgot->output_offset + off;
4947 loc = globals->root.srelgot->contents;
4948 loc += globals->root.srelgot->reloc_count++
4949 * RELOC_SIZE (htab);
4950 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4954 bfd_put_NN (output_bfd,
4955 relocation - dtpoff_base (info),
4956 globals->root.sgot->contents + off
4961 /* This TLS symbol is global. We emit a
4962 relocation to fixup the tls offset at load
4965 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
4968 (globals->root.sgot->output_section->vma
4969 + globals->root.sgot->output_offset + off
4972 loc = globals->root.srelgot->contents;
4973 loc += globals->root.srelgot->reloc_count++
4974 * RELOC_SIZE (globals);
4975 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
4976 bfd_put_NN (output_bfd, (bfd_vma) 0,
4977 globals->root.sgot->contents + off
4983 bfd_put_NN (output_bfd, (bfd_vma) 1,
4984 globals->root.sgot->contents + off);
4985 bfd_put_NN (output_bfd,
4986 relocation - dtpoff_base (info),
4987 globals->root.sgot->contents + off
4991 symbol_got_offset_mark (input_bfd, h, r_symndx);
4995 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4996 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4997 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4998 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5000 bfd_boolean need_relocs = FALSE;
5005 off = symbol_got_offset (input_bfd, h, r_symndx);
5007 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5010 (info->shared || indx != 0) &&
5012 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5013 || h->root.type != bfd_link_hash_undefweak);
5015 BFD_ASSERT (globals->root.srelgot != NULL);
5019 Elf_Internal_Rela rela;
5022 rela.r_addend = relocation - dtpoff_base (info);
5026 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5027 rela.r_offset = globals->root.sgot->output_section->vma +
5028 globals->root.sgot->output_offset + off;
5030 loc = globals->root.srelgot->contents;
5031 loc += globals->root.srelgot->reloc_count++
5032 * RELOC_SIZE (htab);
5034 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5036 bfd_put_NN (output_bfd, rela.r_addend,
5037 globals->root.sgot->contents + off);
5040 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5041 globals->root.sgot->contents + off);
5043 symbol_got_offset_mark (input_bfd, h, r_symndx);
5047 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5048 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5049 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5050 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5051 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5052 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5053 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5054 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5057 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5058 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5059 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5060 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5061 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5062 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5064 bfd_boolean need_relocs = FALSE;
5065 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5066 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5068 need_relocs = (h == NULL
5069 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5070 || h->root.type != bfd_link_hash_undefweak);
5072 BFD_ASSERT (globals->root.srelgot != NULL);
5073 BFD_ASSERT (globals->root.sgot != NULL);
5078 Elf_Internal_Rela rela;
5079 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5082 rela.r_offset = (globals->root.sgotplt->output_section->vma
5083 + globals->root.sgotplt->output_offset
5084 + off + globals->sgotplt_jump_table_size);
5087 rela.r_addend = relocation - dtpoff_base (info);
5089 /* Allocate the next available slot in the PLT reloc
5090 section to hold our R_AARCH64_TLSDESC, the next
5091 available slot is determined from reloc_count,
5092 which we step. But note, reloc_count was
5093 artifically moved down while allocating slots for
5094 real PLT relocs such that all of the PLT relocs
5095 will fit above the initial reloc_count and the
5096 extra stuff will fit below. */
5097 loc = globals->root.srelplt->contents;
5098 loc += globals->root.srelplt->reloc_count++
5099 * RELOC_SIZE (globals);
5101 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5103 bfd_put_NN (output_bfd, (bfd_vma) 0,
5104 globals->root.sgotplt->contents + off +
5105 globals->sgotplt_jump_table_size);
5106 bfd_put_NN (output_bfd, (bfd_vma) 0,
5107 globals->root.sgotplt->contents + off +
5108 globals->sgotplt_jump_table_size +
5112 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5123 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5124 because such sections are not SEC_ALLOC and thus ld.so will
5125 not process them. */
5126 if (unresolved_reloc
5127 && !((input_section->flags & SEC_DEBUGGING) != 0
5129 && _bfd_elf_section_offset (output_bfd, info, input_section,
5130 +rel->r_offset) != (bfd_vma) - 1)
5132 (*_bfd_error_handler)
5134 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5135 input_bfd, input_section, (long) rel->r_offset, howto->name,
5136 h->root.root.string);
5140 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5144 case bfd_reloc_overflow:
5145 /* If the overflowing reloc was to an undefined symbol,
5146 we have already printed one error message and there
5147 is no point complaining again. */
5149 h->root.type != bfd_link_hash_undefined)
5150 && (!((*info->callbacks->reloc_overflow)
5151 (info, (h ? &h->root : NULL), name, howto->name,
5152 (bfd_vma) 0, input_bfd, input_section,
5157 case bfd_reloc_undefined:
5158 if (!((*info->callbacks->undefined_symbol)
5159 (info, name, input_bfd, input_section,
5160 rel->r_offset, TRUE)))
5164 case bfd_reloc_outofrange:
5165 error_message = _("out of range");
5168 case bfd_reloc_notsupported:
5169 error_message = _("unsupported relocation");
5172 case bfd_reloc_dangerous:
5173 /* error_message should already be set. */
5177 error_message = _("unknown error");
5181 BFD_ASSERT (error_message != NULL);
5182 if (!((*info->callbacks->reloc_dangerous)
5183 (info, error_message, input_bfd, input_section,
5194 /* Set the right machine number. */
5197 elfNN_aarch64_object_p (bfd *abfd)
5200 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5202 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5207 /* Function to keep AArch64 specific flags in the ELF header. */
5210 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5212 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5217 elf_elfheader (abfd)->e_flags = flags;
5218 elf_flags_init (abfd) = TRUE;
5224 /* Merge backend specific data from an object file to the output
5225 object file when linking. */
5228 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5232 bfd_boolean flags_compatible = TRUE;
5235 /* Check if we have the same endianess. */
5236 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5239 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5242 /* The input BFD must have had its flags initialised. */
5243 /* The following seems bogus to me -- The flags are initialized in
5244 the assembler but I don't think an elf_flags_init field is
5245 written into the object. */
5246 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5248 in_flags = elf_elfheader (ibfd)->e_flags;
5249 out_flags = elf_elfheader (obfd)->e_flags;
5251 if (!elf_flags_init (obfd))
5253 /* If the input is the default architecture and had the default
5254 flags then do not bother setting the flags for the output
5255 architecture, instead allow future merges to do this. If no
5256 future merges ever set these flags then they will retain their
5257 uninitialised values, which surprise surprise, correspond
5258 to the default values. */
5259 if (bfd_get_arch_info (ibfd)->the_default
5260 && elf_elfheader (ibfd)->e_flags == 0)
5263 elf_flags_init (obfd) = TRUE;
5264 elf_elfheader (obfd)->e_flags = in_flags;
5266 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5267 && bfd_get_arch_info (obfd)->the_default)
5268 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5269 bfd_get_mach (ibfd));
5274 /* Identical flags must be compatible. */
5275 if (in_flags == out_flags)
5278 /* Check to see if the input BFD actually contains any sections. If
5279 not, its flags may not have been initialised either, but it
5280 cannot actually cause any incompatiblity. Do not short-circuit
5281 dynamic objects; their section list may be emptied by
5282 elf_link_add_object_symbols.
5284 Also check to see if there are no code sections in the input.
5285 In this case there is no need to check for code specific flags.
5286 XXX - do we need to worry about floating-point format compatability
5287 in data sections ? */
5288 if (!(ibfd->flags & DYNAMIC))
5290 bfd_boolean null_input_bfd = TRUE;
5291 bfd_boolean only_data_sections = TRUE;
5293 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5295 if ((bfd_get_section_flags (ibfd, sec)
5296 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5297 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5298 only_data_sections = FALSE;
5300 null_input_bfd = FALSE;
5304 if (null_input_bfd || only_data_sections)
5308 return flags_compatible;
5311 /* Display the flags field. */
5314 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5316 FILE *file = (FILE *) ptr;
5317 unsigned long flags;
5319 BFD_ASSERT (abfd != NULL && ptr != NULL);
5321 /* Print normal ELF private data. */
5322 _bfd_elf_print_private_bfd_data (abfd, ptr);
5324 flags = elf_elfheader (abfd)->e_flags;
5325 /* Ignore init flag - it may not be set, despite the flags field
5326 containing valid data. */
5328 /* xgettext:c-format */
5329 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5332 fprintf (file, _("<Unrecognised flag bits set>"));
5339 /* Update the got entry reference counts for the section being removed. */
5342 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
5343 struct bfd_link_info *info,
5345 const Elf_Internal_Rela * relocs)
5347 struct elf_aarch64_link_hash_table *htab;
5348 Elf_Internal_Shdr *symtab_hdr;
5349 struct elf_link_hash_entry **sym_hashes;
5350 struct elf_aarch64_local_symbol *locals;
5351 const Elf_Internal_Rela *rel, *relend;
5353 if (info->relocatable)
5356 htab = elf_aarch64_hash_table (info);
5361 elf_section_data (sec)->local_dynrel = NULL;
5363 symtab_hdr = &elf_symtab_hdr (abfd);
5364 sym_hashes = elf_sym_hashes (abfd);
5366 locals = elf_aarch64_locals (abfd);
5368 relend = relocs + sec->reloc_count;
5369 for (rel = relocs; rel < relend; rel++)
5371 unsigned long r_symndx;
5372 unsigned int r_type;
5373 struct elf_link_hash_entry *h = NULL;
5375 r_symndx = ELFNN_R_SYM (rel->r_info);
5377 if (r_symndx >= symtab_hdr->sh_info)
5380 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5381 while (h->root.type == bfd_link_hash_indirect
5382 || h->root.type == bfd_link_hash_warning)
5383 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5387 Elf_Internal_Sym *isym;
5389 /* A local symbol. */
5390 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5393 /* Check relocation against local STT_GNU_IFUNC symbol. */
5395 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5397 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
5405 struct elf_aarch64_link_hash_entry *eh;
5406 struct elf_dyn_relocs **pp;
5407 struct elf_dyn_relocs *p;
5409 eh = (struct elf_aarch64_link_hash_entry *) h;
5411 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
5414 /* Everything must go for SEC. */
5420 r_type = ELFNN_R_TYPE (rel->r_info);
5421 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
5423 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5424 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5425 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5426 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5427 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5428 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5429 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5430 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5431 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5432 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5433 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5434 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5435 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5436 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5437 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5438 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5439 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5440 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5441 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5442 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5443 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5444 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5445 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5446 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5447 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5450 if (h->got.refcount > 0)
5451 h->got.refcount -= 1;
5453 if (h->type == STT_GNU_IFUNC)
5455 if (h->plt.refcount > 0)
5456 h->plt.refcount -= 1;
5459 else if (locals != NULL)
5461 if (locals[r_symndx].got_refcount > 0)
5462 locals[r_symndx].got_refcount -= 1;
5466 case BFD_RELOC_AARCH64_CALL26:
5467 case BFD_RELOC_AARCH64_JUMP26:
5468 /* If this is a local symbol then we resolve it
5469 directly without creating a PLT entry. */
5473 if (h->plt.refcount > 0)
5474 h->plt.refcount -= 1;
5477 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5478 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5479 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5480 case BFD_RELOC_AARCH64_MOVW_G3:
5481 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
5482 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5483 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
5484 case BFD_RELOC_AARCH64_NN:
5485 if (h != NULL && info->executable)
5487 if (h->plt.refcount > 0)
5488 h->plt.refcount -= 1;
5500 /* Adjust a symbol defined by a dynamic object and referenced by a
5501 regular object. The current definition is in some section of the
5502 dynamic object, but we're not including those sections. We have to
5503 change the definition to something the rest of the link can
5507 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
5508 struct elf_link_hash_entry *h)
5510 struct elf_aarch64_link_hash_table *htab;
5513 /* If this is a function, put it in the procedure linkage table. We
5514 will fill in the contents of the procedure linkage table later,
5515 when we know the address of the .got section. */
5516 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
5518 if (h->plt.refcount <= 0
5519 || (h->type != STT_GNU_IFUNC
5520 && (SYMBOL_CALLS_LOCAL (info, h)
5521 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
5522 && h->root.type == bfd_link_hash_undefweak))))
5524 /* This case can occur if we saw a CALL26 reloc in
5525 an input file, but the symbol wasn't referred to
5526 by a dynamic object or all references were
5527 garbage collected. In which case we can end up
5529 h->plt.offset = (bfd_vma) - 1;
5536 /* It's possible that we incorrectly decided a .plt reloc was
5537 needed for an R_X86_64_PC32 reloc to a non-function sym in
5538 check_relocs. We can't decide accurately between function and
5539 non-function syms in check-relocs; Objects loaded later in
5540 the link may change h->type. So fix it now. */
5541 h->plt.offset = (bfd_vma) - 1;
5544 /* If this is a weak symbol, and there is a real definition, the
5545 processor independent code will have arranged for us to see the
5546 real definition first, and we can just use the same value. */
5547 if (h->u.weakdef != NULL)
5549 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
5550 || h->u.weakdef->root.type == bfd_link_hash_defweak);
5551 h->root.u.def.section = h->u.weakdef->root.u.def.section;
5552 h->root.u.def.value = h->u.weakdef->root.u.def.value;
5553 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
5554 h->non_got_ref = h->u.weakdef->non_got_ref;
5558 /* If we are creating a shared library, we must presume that the
5559 only references to the symbol are via the global offset table.
5560 For such cases we need not do anything here; the relocations will
5561 be handled correctly by relocate_section. */
5565 /* If there are no references to this symbol that do not use the
5566 GOT, we don't need to generate a copy reloc. */
5567 if (!h->non_got_ref)
5570 /* If -z nocopyreloc was given, we won't generate them either. */
5571 if (info->nocopyreloc)
5577 /* We must allocate the symbol in our .dynbss section, which will
5578 become part of the .bss section of the executable. There will be
5579 an entry for this symbol in the .dynsym section. The dynamic
5580 object will contain position independent code, so all references
5581 from the dynamic object to this symbol will go through the global
5582 offset table. The dynamic linker will use the .dynsym entry to
5583 determine the address it must put in the global offset table, so
5584 both the dynamic object and the regular object will refer to the
5585 same memory location for the variable. */
5587 htab = elf_aarch64_hash_table (info);
5589 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
5590 to copy the initial value out of the dynamic object and into the
5591 runtime process image. */
5592 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
5594 htab->srelbss->size += RELOC_SIZE (htab);
5600 return _bfd_elf_adjust_dynamic_copy (info, h, s);
5605 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
5607 struct elf_aarch64_local_symbol *locals;
5608 locals = elf_aarch64_locals (abfd);
5611 locals = (struct elf_aarch64_local_symbol *)
5612 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
5615 elf_aarch64_locals (abfd) = locals;
5620 /* Create the .got section to hold the global offset table. */
5623 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
5625 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5628 struct elf_link_hash_entry *h;
5629 struct elf_link_hash_table *htab = elf_hash_table (info);
5631 /* This function may be called more than once. */
5632 s = bfd_get_linker_section (abfd, ".got");
5636 flags = bed->dynamic_sec_flags;
5638 s = bfd_make_section_anyway_with_flags (abfd,
5639 (bed->rela_plts_and_copies_p
5640 ? ".rela.got" : ".rel.got"),
5641 (bed->dynamic_sec_flags
5644 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
5648 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
5650 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
5653 htab->sgot->size += GOT_ENTRY_SIZE;
5655 if (bed->want_got_sym)
5657 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
5658 (or .got.plt) section. We don't do this in the linker script
5659 because we don't want to define the symbol if we are not creating
5660 a global offset table. */
5661 h = _bfd_elf_define_linkage_sym (abfd, info, s,
5662 "_GLOBAL_OFFSET_TABLE_");
5663 elf_hash_table (info)->hgot = h;
5668 if (bed->want_got_plt)
5670 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
5672 || !bfd_set_section_alignment (abfd, s,
5673 bed->s->log_file_align))
5678 /* The first bit of the global offset table is the header. */
5679 s->size += bed->got_header_size;
5684 /* Look through the relocs for a section during the first phase. */
5687 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
5688 asection *sec, const Elf_Internal_Rela *relocs)
5690 Elf_Internal_Shdr *symtab_hdr;
5691 struct elf_link_hash_entry **sym_hashes;
5692 const Elf_Internal_Rela *rel;
5693 const Elf_Internal_Rela *rel_end;
5696 struct elf_aarch64_link_hash_table *htab;
5698 if (info->relocatable)
5701 BFD_ASSERT (is_aarch64_elf (abfd));
5703 htab = elf_aarch64_hash_table (info);
5706 symtab_hdr = &elf_symtab_hdr (abfd);
5707 sym_hashes = elf_sym_hashes (abfd);
5709 rel_end = relocs + sec->reloc_count;
5710 for (rel = relocs; rel < rel_end; rel++)
5712 struct elf_link_hash_entry *h;
5713 unsigned long r_symndx;
5714 unsigned int r_type;
5715 bfd_reloc_code_real_type bfd_r_type;
5716 Elf_Internal_Sym *isym;
5718 r_symndx = ELFNN_R_SYM (rel->r_info);
5719 r_type = ELFNN_R_TYPE (rel->r_info);
5721 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5723 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5728 if (r_symndx < symtab_hdr->sh_info)
5730 /* A local symbol. */
5731 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5736 /* Check relocation against local STT_GNU_IFUNC symbol. */
5737 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5739 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
5744 /* Fake a STT_GNU_IFUNC symbol. */
5745 h->type = STT_GNU_IFUNC;
5748 h->forced_local = 1;
5749 h->root.type = bfd_link_hash_defined;
5756 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5757 while (h->root.type == bfd_link_hash_indirect
5758 || h->root.type == bfd_link_hash_warning)
5759 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5761 /* PR15323, ref flags aren't set for references in the same
5763 h->root.non_ir_ref = 1;
5766 /* Could be done earlier, if h were already available. */
5767 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5771 /* Create the ifunc sections for static executables. If we
5772 never see an indirect function symbol nor we are building
5773 a static executable, those sections will be empty and
5774 won't appear in output. */
5780 case BFD_RELOC_AARCH64_NN:
5781 case BFD_RELOC_AARCH64_CALL26:
5782 case BFD_RELOC_AARCH64_JUMP26:
5783 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5784 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5785 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5786 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5787 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5788 case BFD_RELOC_AARCH64_ADD_LO12:
5789 if (htab->root.dynobj == NULL)
5790 htab->root.dynobj = abfd;
5791 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
5796 /* It is referenced by a non-shared object. */
5798 h->root.non_ir_ref = 1;
5803 case BFD_RELOC_AARCH64_NN:
5805 /* We don't need to handle relocs into sections not going into
5806 the "real" output. */
5807 if ((sec->flags & SEC_ALLOC) == 0)
5815 h->plt.refcount += 1;
5816 h->pointer_equality_needed = 1;
5819 /* No need to do anything if we're not creating a shared
5825 struct elf_dyn_relocs *p;
5826 struct elf_dyn_relocs **head;
5828 /* We must copy these reloc types into the output file.
5829 Create a reloc section in dynobj and make room for
5833 if (htab->root.dynobj == NULL)
5834 htab->root.dynobj = abfd;
5836 sreloc = _bfd_elf_make_dynamic_reloc_section
5837 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
5843 /* If this is a global symbol, we count the number of
5844 relocations we need for this symbol. */
5847 struct elf_aarch64_link_hash_entry *eh;
5848 eh = (struct elf_aarch64_link_hash_entry *) h;
5849 head = &eh->dyn_relocs;
5853 /* Track dynamic relocs needed for local syms too.
5854 We really need local syms available to do this
5860 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5865 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5869 /* Beware of type punned pointers vs strict aliasing
5871 vpp = &(elf_section_data (s)->local_dynrel);
5872 head = (struct elf_dyn_relocs **) vpp;
5876 if (p == NULL || p->sec != sec)
5878 bfd_size_type amt = sizeof *p;
5879 p = ((struct elf_dyn_relocs *)
5880 bfd_zalloc (htab->root.dynobj, amt));
5893 /* RR: We probably want to keep a consistency check that
5894 there are no dangling GOT_PAGE relocs. */
5895 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5896 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5897 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5898 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5899 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5900 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5901 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5902 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5903 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5904 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5905 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5906 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5907 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5908 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5909 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5910 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5911 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5912 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5913 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5914 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5915 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5916 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5917 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5918 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5919 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5922 unsigned old_got_type;
5924 got_type = aarch64_reloc_got_type (bfd_r_type);
5928 h->got.refcount += 1;
5929 old_got_type = elf_aarch64_hash_entry (h)->got_type;
5933 struct elf_aarch64_local_symbol *locals;
5935 if (!elfNN_aarch64_allocate_local_symbols
5936 (abfd, symtab_hdr->sh_info))
5939 locals = elf_aarch64_locals (abfd);
5940 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5941 locals[r_symndx].got_refcount += 1;
5942 old_got_type = locals[r_symndx].got_type;
5945 /* If a variable is accessed with both general dynamic TLS
5946 methods, two slots may be created. */
5947 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
5948 got_type |= old_got_type;
5950 /* We will already have issued an error message if there
5951 is a TLS/non-TLS mismatch, based on the symbol type.
5952 So just combine any TLS types needed. */
5953 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
5954 && got_type != GOT_NORMAL)
5955 got_type |= old_got_type;
5957 /* If the symbol is accessed by both IE and GD methods, we
5958 are able to relax. Turn off the GD flag, without
5959 messing up with any other kind of TLS types that may be
5961 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
5962 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
5964 if (old_got_type != got_type)
5967 elf_aarch64_hash_entry (h)->got_type = got_type;
5970 struct elf_aarch64_local_symbol *locals;
5971 locals = elf_aarch64_locals (abfd);
5972 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
5973 locals[r_symndx].got_type = got_type;
5977 if (htab->root.dynobj == NULL)
5978 htab->root.dynobj = abfd;
5979 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
5984 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5985 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5986 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5987 case BFD_RELOC_AARCH64_MOVW_G3:
5990 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
5991 (*_bfd_error_handler)
5992 (_("%B: relocation %s against `%s' can not be used when making "
5993 "a shared object; recompile with -fPIC"),
5994 abfd, elfNN_aarch64_howto_table[howto_index].name,
5995 (h) ? h->root.root.string : "a local symbol");
5996 bfd_set_error (bfd_error_bad_value);
6000 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6001 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6002 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6003 if (h != NULL && info->executable)
6005 /* If this reloc is in a read-only section, we might
6006 need a copy reloc. We can't check reliably at this
6007 stage whether the section is read-only, as input
6008 sections have not yet been mapped to output sections.
6009 Tentatively set the flag for now, and correct in
6010 adjust_dynamic_symbol. */
6012 h->plt.refcount += 1;
6013 h->pointer_equality_needed = 1;
6015 /* FIXME:: RR need to handle these in shared libraries
6016 and essentially bomb out as these being non-PIC
6017 relocations in shared libraries. */
6020 case BFD_RELOC_AARCH64_CALL26:
6021 case BFD_RELOC_AARCH64_JUMP26:
6022 /* If this is a local symbol then we resolve it
6023 directly without creating a PLT entry. */
6028 if (h->plt.refcount <= 0)
6029 h->plt.refcount = 1;
6031 h->plt.refcount += 1;
6042 /* Treat mapping symbols as special target symbols. */
6045 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6048 return bfd_is_aarch64_special_symbol_name (sym->name,
6049 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6052 /* This is a copy of elf_find_function () from elf.c except that
6053 AArch64 mapping symbols are ignored when looking for function names. */
6056 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6060 const char **filename_ptr,
6061 const char **functionname_ptr)
6063 const char *filename = NULL;
6064 asymbol *func = NULL;
6065 bfd_vma low_func = 0;
6068 for (p = symbols; *p != NULL; p++)
6072 q = (elf_symbol_type *) * p;
6074 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6079 filename = bfd_asymbol_name (&q->symbol);
6083 /* Skip mapping symbols. */
6084 if ((q->symbol.flags & BSF_LOCAL)
6085 && (bfd_is_aarch64_special_symbol_name
6086 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6089 if (bfd_get_section (&q->symbol) == section
6090 && q->symbol.value >= low_func && q->symbol.value <= offset)
6092 func = (asymbol *) q;
6093 low_func = q->symbol.value;
6103 *filename_ptr = filename;
6104 if (functionname_ptr)
6105 *functionname_ptr = bfd_asymbol_name (func);
6111 /* Find the nearest line to a particular section and offset, for error
6112 reporting. This code is a duplicate of the code in elf.c, except
6113 that it uses aarch64_elf_find_function. */
6116 elfNN_aarch64_find_nearest_line (bfd *abfd,
6120 const char **filename_ptr,
6121 const char **functionname_ptr,
6122 unsigned int *line_ptr,
6123 unsigned int *discriminator_ptr)
6125 bfd_boolean found = FALSE;
6127 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6128 filename_ptr, functionname_ptr,
6129 line_ptr, discriminator_ptr,
6130 dwarf_debug_sections, 0,
6131 &elf_tdata (abfd)->dwarf2_find_line_info))
6133 if (!*functionname_ptr)
6134 aarch64_elf_find_function (abfd, symbols, section, offset,
6135 *filename_ptr ? NULL : filename_ptr,
6141 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6142 toolchain uses DWARF1. */
6144 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6145 &found, filename_ptr,
6146 functionname_ptr, line_ptr,
6147 &elf_tdata (abfd)->line_info))
6150 if (found && (*functionname_ptr || *line_ptr))
6153 if (symbols == NULL)
6156 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6157 filename_ptr, functionname_ptr))
6165 elfNN_aarch64_find_inliner_info (bfd *abfd,
6166 const char **filename_ptr,
6167 const char **functionname_ptr,
6168 unsigned int *line_ptr)
6171 found = _bfd_dwarf2_find_inliner_info
6172 (abfd, filename_ptr,
6173 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6179 elfNN_aarch64_post_process_headers (bfd *abfd,
6180 struct bfd_link_info *link_info)
6182 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6184 i_ehdrp = elf_elfheader (abfd);
6185 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6187 _bfd_elf_post_process_headers (abfd, link_info);
6190 static enum elf_reloc_type_class
6191 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6192 const asection *rel_sec ATTRIBUTE_UNUSED,
6193 const Elf_Internal_Rela *rela)
6195 switch ((int) ELFNN_R_TYPE (rela->r_info))
6197 case AARCH64_R (RELATIVE):
6198 return reloc_class_relative;
6199 case AARCH64_R (JUMP_SLOT):
6200 return reloc_class_plt;
6201 case AARCH64_R (COPY):
6202 return reloc_class_copy;
6204 return reloc_class_normal;
6208 /* Handle an AArch64 specific section when reading an object file. This is
6209 called when bfd_section_from_shdr finds a section with an unknown
6213 elfNN_aarch64_section_from_shdr (bfd *abfd,
6214 Elf_Internal_Shdr *hdr,
6215 const char *name, int shindex)
6217 /* There ought to be a place to keep ELF backend specific flags, but
6218 at the moment there isn't one. We just keep track of the
6219 sections by their name, instead. Fortunately, the ABI gives
6220 names for all the AArch64 specific sections, so we will probably get
6222 switch (hdr->sh_type)
6224 case SHT_AARCH64_ATTRIBUTES:
6231 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6237 /* A structure used to record a list of sections, independently
6238 of the next and prev fields in the asection structure. */
6239 typedef struct section_list
6242 struct section_list *next;
6243 struct section_list *prev;
6247 /* Unfortunately we need to keep a list of sections for which
6248 an _aarch64_elf_section_data structure has been allocated. This
6249 is because it is possible for functions like elfNN_aarch64_write_section
6250 to be called on a section which has had an elf_data_structure
6251 allocated for it (and so the used_by_bfd field is valid) but
6252 for which the AArch64 extended version of this structure - the
6253 _aarch64_elf_section_data structure - has not been allocated. */
6254 static section_list *sections_with_aarch64_elf_section_data = NULL;
6257 record_section_with_aarch64_elf_section_data (asection *sec)
6259 struct section_list *entry;
6261 entry = bfd_malloc (sizeof (*entry));
6265 entry->next = sections_with_aarch64_elf_section_data;
6267 if (entry->next != NULL)
6268 entry->next->prev = entry;
6269 sections_with_aarch64_elf_section_data = entry;
6272 static struct section_list *
6273 find_aarch64_elf_section_entry (asection *sec)
6275 struct section_list *entry;
6276 static struct section_list *last_entry = NULL;
6278 /* This is a short cut for the typical case where the sections are added
6279 to the sections_with_aarch64_elf_section_data list in forward order and
6280 then looked up here in backwards order. This makes a real difference
6281 to the ld-srec/sec64k.exp linker test. */
6282 entry = sections_with_aarch64_elf_section_data;
6283 if (last_entry != NULL)
6285 if (last_entry->sec == sec)
6287 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6288 entry = last_entry->next;
6291 for (; entry; entry = entry->next)
6292 if (entry->sec == sec)
6296 /* Record the entry prior to this one - it is the entry we are
6297 most likely to want to locate next time. Also this way if we
6298 have been called from
6299 unrecord_section_with_aarch64_elf_section_data () we will not
6300 be caching a pointer that is about to be freed. */
6301 last_entry = entry->prev;
6307 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6309 struct section_list *entry;
6311 entry = find_aarch64_elf_section_entry (sec);
6315 if (entry->prev != NULL)
6316 entry->prev->next = entry->next;
6317 if (entry->next != NULL)
6318 entry->next->prev = entry->prev;
6319 if (entry == sections_with_aarch64_elf_section_data)
6320 sections_with_aarch64_elf_section_data = entry->next;
6329 struct bfd_link_info *info;
6332 int (*func) (void *, const char *, Elf_Internal_Sym *,
6333 asection *, struct elf_link_hash_entry *);
6334 } output_arch_syminfo;
6336 enum map_symbol_type
6343 /* Output a single mapping symbol. */
6346 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
6347 enum map_symbol_type type, bfd_vma offset)
6349 static const char *names[2] = { "$x", "$d" };
6350 Elf_Internal_Sym sym;
6352 sym.st_value = (osi->sec->output_section->vma
6353 + osi->sec->output_offset + offset);
6356 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6357 sym.st_shndx = osi->sec_shndx;
6358 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
6363 /* Output mapping symbols for PLT entries associated with H. */
6366 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
6368 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
6371 if (h->root.type == bfd_link_hash_indirect)
6374 if (h->root.type == bfd_link_hash_warning)
6375 /* When warning symbols are created, they **replace** the "real"
6376 entry in the hash table, thus we never get to see the real
6377 symbol in a hash traversal. So look at it now. */
6378 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6380 if (h->plt.offset == (bfd_vma) - 1)
6383 addr = h->plt.offset;
6386 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6393 /* Output a single local symbol for a generated stub. */
6396 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
6397 bfd_vma offset, bfd_vma size)
6399 Elf_Internal_Sym sym;
6401 sym.st_value = (osi->sec->output_section->vma
6402 + osi->sec->output_offset + offset);
6405 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6406 sym.st_shndx = osi->sec_shndx;
6407 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
6411 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
6413 struct elf_aarch64_stub_hash_entry *stub_entry;
6417 output_arch_syminfo *osi;
6419 /* Massage our args to the form they really have. */
6420 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
6421 osi = (output_arch_syminfo *) in_arg;
6423 stub_sec = stub_entry->stub_sec;
6425 /* Ensure this stub is attached to the current section being
6427 if (stub_sec != osi->sec)
6430 addr = (bfd_vma) stub_entry->stub_offset;
6432 stub_name = stub_entry->output_name;
6434 switch (stub_entry->stub_type)
6436 case aarch64_stub_adrp_branch:
6437 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6438 sizeof (aarch64_adrp_branch_stub)))
6440 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6443 case aarch64_stub_long_branch:
6444 if (!elfNN_aarch64_output_stub_sym
6445 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
6447 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6449 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
6452 case aarch64_stub_erratum_835769_veneer:
6453 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6454 sizeof (aarch64_erratum_835769_stub)))
6456 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6466 /* Output mapping symbols for linker generated sections. */
6469 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
6470 struct bfd_link_info *info,
6472 int (*func) (void *, const char *,
6475 struct elf_link_hash_entry
6478 output_arch_syminfo osi;
6479 struct elf_aarch64_link_hash_table *htab;
6481 htab = elf_aarch64_hash_table (info);
6487 /* Long calls stubs. */
6488 if (htab->stub_bfd && htab->stub_bfd->sections)
6492 for (stub_sec = htab->stub_bfd->sections;
6493 stub_sec != NULL; stub_sec = stub_sec->next)
6495 /* Ignore non-stub sections. */
6496 if (!strstr (stub_sec->name, STUB_SUFFIX))
6501 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6502 (output_bfd, osi.sec->output_section);
6504 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
6509 /* Finally, output mapping symbols for the PLT. */
6510 if (!htab->root.splt || htab->root.splt->size == 0)
6513 /* For now live without mapping symbols for the plt. */
6514 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6515 (output_bfd, htab->root.splt->output_section);
6516 osi.sec = htab->root.splt;
6518 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
6525 /* Allocate target specific section data. */
6528 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
6530 if (!sec->used_by_bfd)
6532 _aarch64_elf_section_data *sdata;
6533 bfd_size_type amt = sizeof (*sdata);
6535 sdata = bfd_zalloc (abfd, amt);
6538 sec->used_by_bfd = sdata;
6541 record_section_with_aarch64_elf_section_data (sec);
6543 return _bfd_elf_new_section_hook (abfd, sec);
6548 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
6550 void *ignore ATTRIBUTE_UNUSED)
6552 unrecord_section_with_aarch64_elf_section_data (sec);
6556 elfNN_aarch64_close_and_cleanup (bfd *abfd)
6559 bfd_map_over_sections (abfd,
6560 unrecord_section_via_map_over_sections, NULL);
6562 return _bfd_elf_close_and_cleanup (abfd);
6566 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
6569 bfd_map_over_sections (abfd,
6570 unrecord_section_via_map_over_sections, NULL);
6572 return _bfd_free_cached_info (abfd);
6575 /* Create dynamic sections. This is different from the ARM backend in that
6576 the got, plt, gotplt and their relocation sections are all created in the
6577 standard part of the bfd elf backend. */
6580 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
6581 struct bfd_link_info *info)
6583 struct elf_aarch64_link_hash_table *htab;
6585 /* We need to create .got section. */
6586 if (!aarch64_elf_create_got_section (dynobj, info))
6589 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
6592 htab = elf_aarch64_hash_table (info);
6593 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
6595 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
6597 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
6604 /* Allocate space in .plt, .got and associated reloc sections for
6608 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
6610 struct bfd_link_info *info;
6611 struct elf_aarch64_link_hash_table *htab;
6612 struct elf_aarch64_link_hash_entry *eh;
6613 struct elf_dyn_relocs *p;
6615 /* An example of a bfd_link_hash_indirect symbol is versioned
6616 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
6617 -> __gxx_personality_v0(bfd_link_hash_defined)
6619 There is no need to process bfd_link_hash_indirect symbols here
6620 because we will also be presented with the concrete instance of
6621 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
6622 called to copy all relevant data from the generic to the concrete
6625 if (h->root.type == bfd_link_hash_indirect)
6628 if (h->root.type == bfd_link_hash_warning)
6629 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6631 info = (struct bfd_link_info *) inf;
6632 htab = elf_aarch64_hash_table (info);
6634 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
6635 here if it is defined and referenced in a non-shared object. */
6636 if (h->type == STT_GNU_IFUNC
6639 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
6641 /* Make sure this symbol is output as a dynamic symbol.
6642 Undefined weak syms won't yet be marked as dynamic. */
6643 if (h->dynindx == -1 && !h->forced_local)
6645 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6649 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
6651 asection *s = htab->root.splt;
6653 /* If this is the first .plt entry, make room for the special
6656 s->size += htab->plt_header_size;
6658 h->plt.offset = s->size;
6660 /* If this symbol is not defined in a regular file, and we are
6661 not generating a shared library, then set the symbol to this
6662 location in the .plt. This is required to make function
6663 pointers compare as equal between the normal executable and
6664 the shared library. */
6665 if (!info->shared && !h->def_regular)
6667 h->root.u.def.section = s;
6668 h->root.u.def.value = h->plt.offset;
6671 /* Make room for this entry. For now we only create the
6672 small model PLT entries. We later need to find a way
6673 of relaxing into these from the large model PLT entries. */
6674 s->size += PLT_SMALL_ENTRY_SIZE;
6676 /* We also need to make an entry in the .got.plt section, which
6677 will be placed in the .got section by the linker script. */
6678 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
6680 /* We also need to make an entry in the .rela.plt section. */
6681 htab->root.srelplt->size += RELOC_SIZE (htab);
6683 /* We need to ensure that all GOT entries that serve the PLT
6684 are consecutive with the special GOT slots [0] [1] and
6685 [2]. Any addtional relocations, such as
6686 R_AARCH64_TLSDESC, must be placed after the PLT related
6687 entries. We abuse the reloc_count such that during
6688 sizing we adjust reloc_count to indicate the number of
6689 PLT related reserved entries. In subsequent phases when
6690 filling in the contents of the reloc entries, PLT related
6691 entries are placed by computing their PLT index (0
6692 .. reloc_count). While other none PLT relocs are placed
6693 at the slot indicated by reloc_count and reloc_count is
6696 htab->root.srelplt->reloc_count++;
6700 h->plt.offset = (bfd_vma) - 1;
6706 h->plt.offset = (bfd_vma) - 1;
6710 eh = (struct elf_aarch64_link_hash_entry *) h;
6711 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6713 if (h->got.refcount > 0)
6716 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
6718 h->got.offset = (bfd_vma) - 1;
6720 dyn = htab->root.dynamic_sections_created;
6722 /* Make sure this symbol is output as a dynamic symbol.
6723 Undefined weak syms won't yet be marked as dynamic. */
6724 if (dyn && h->dynindx == -1 && !h->forced_local)
6726 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6730 if (got_type == GOT_UNKNOWN)
6733 else if (got_type == GOT_NORMAL)
6735 h->got.offset = htab->root.sgot->size;
6736 htab->root.sgot->size += GOT_ENTRY_SIZE;
6737 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6738 || h->root.type != bfd_link_hash_undefweak)
6740 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6742 htab->root.srelgot->size += RELOC_SIZE (htab);
6748 if (got_type & GOT_TLSDESC_GD)
6750 eh->tlsdesc_got_jump_table_offset =
6751 (htab->root.sgotplt->size
6752 - aarch64_compute_jump_table_size (htab));
6753 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6754 h->got.offset = (bfd_vma) - 2;
6757 if (got_type & GOT_TLS_GD)
6759 h->got.offset = htab->root.sgot->size;
6760 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6763 if (got_type & GOT_TLS_IE)
6765 h->got.offset = htab->root.sgot->size;
6766 htab->root.sgot->size += GOT_ENTRY_SIZE;
6769 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6770 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6771 || h->root.type != bfd_link_hash_undefweak)
6774 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6776 if (got_type & GOT_TLSDESC_GD)
6778 htab->root.srelplt->size += RELOC_SIZE (htab);
6779 /* Note reloc_count not incremented here! We have
6780 already adjusted reloc_count for this relocation
6783 /* TLSDESC PLT is now needed, but not yet determined. */
6784 htab->tlsdesc_plt = (bfd_vma) - 1;
6787 if (got_type & GOT_TLS_GD)
6788 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6790 if (got_type & GOT_TLS_IE)
6791 htab->root.srelgot->size += RELOC_SIZE (htab);
6797 h->got.offset = (bfd_vma) - 1;
6800 if (eh->dyn_relocs == NULL)
6803 /* In the shared -Bsymbolic case, discard space allocated for
6804 dynamic pc-relative relocs against symbols which turn out to be
6805 defined in regular objects. For the normal shared case, discard
6806 space for pc-relative relocs that have become local due to symbol
6807 visibility changes. */
6811 /* Relocs that use pc_count are those that appear on a call
6812 insn, or certain REL relocs that can generated via assembly.
6813 We want calls to protected symbols to resolve directly to the
6814 function rather than going via the plt. If people want
6815 function pointer comparisons to work as expected then they
6816 should avoid writing weird assembly. */
6817 if (SYMBOL_CALLS_LOCAL (info, h))
6819 struct elf_dyn_relocs **pp;
6821 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6823 p->count -= p->pc_count;
6832 /* Also discard relocs on undefined weak syms with non-default
6834 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6836 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6837 eh->dyn_relocs = NULL;
6839 /* Make sure undefined weak symbols are output as a dynamic
6841 else if (h->dynindx == -1
6843 && !bfd_elf_link_record_dynamic_symbol (info, h))
6848 else if (ELIMINATE_COPY_RELOCS)
6850 /* For the non-shared case, discard space for relocs against
6851 symbols which turn out to need copy relocs or are not
6857 || (htab->root.dynamic_sections_created
6858 && (h->root.type == bfd_link_hash_undefweak
6859 || h->root.type == bfd_link_hash_undefined))))
6861 /* Make sure this symbol is output as a dynamic symbol.
6862 Undefined weak syms won't yet be marked as dynamic. */
6863 if (h->dynindx == -1
6865 && !bfd_elf_link_record_dynamic_symbol (info, h))
6868 /* If that succeeded, we know we'll be keeping all the
6870 if (h->dynindx != -1)
6874 eh->dyn_relocs = NULL;
6879 /* Finally, allocate space. */
6880 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6884 sreloc = elf_section_data (p->sec)->sreloc;
6886 BFD_ASSERT (sreloc != NULL);
6888 sreloc->size += p->count * RELOC_SIZE (htab);
6894 /* Allocate space in .plt, .got and associated reloc sections for
6895 ifunc dynamic relocs. */
6898 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
6901 struct bfd_link_info *info;
6902 struct elf_aarch64_link_hash_table *htab;
6903 struct elf_aarch64_link_hash_entry *eh;
6905 /* An example of a bfd_link_hash_indirect symbol is versioned
6906 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
6907 -> __gxx_personality_v0(bfd_link_hash_defined)
6909 There is no need to process bfd_link_hash_indirect symbols here
6910 because we will also be presented with the concrete instance of
6911 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
6912 called to copy all relevant data from the generic to the concrete
6915 if (h->root.type == bfd_link_hash_indirect)
6918 if (h->root.type == bfd_link_hash_warning)
6919 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6921 info = (struct bfd_link_info *) inf;
6922 htab = elf_aarch64_hash_table (info);
6924 eh = (struct elf_aarch64_link_hash_entry *) h;
6926 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
6927 here if it is defined and referenced in a non-shared object. */
6928 if (h->type == STT_GNU_IFUNC
6930 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
6932 htab->plt_entry_size,
6933 htab->plt_header_size,
6938 /* Allocate space in .plt, .got and associated reloc sections for
6939 local dynamic relocs. */
6942 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
6944 struct elf_link_hash_entry *h
6945 = (struct elf_link_hash_entry *) *slot;
6947 if (h->type != STT_GNU_IFUNC
6951 || h->root.type != bfd_link_hash_defined)
6954 return elfNN_aarch64_allocate_dynrelocs (h, inf);
6957 /* Allocate space in .plt, .got and associated reloc sections for
6958 local ifunc dynamic relocs. */
6961 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
6963 struct elf_link_hash_entry *h
6964 = (struct elf_link_hash_entry *) *slot;
6966 if (h->type != STT_GNU_IFUNC
6970 || h->root.type != bfd_link_hash_defined)
6973 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
6976 /* This is the most important function of all . Innocuosly named
6979 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
6980 struct bfd_link_info *info)
6982 struct elf_aarch64_link_hash_table *htab;
6988 htab = elf_aarch64_hash_table ((info));
6989 dynobj = htab->root.dynobj;
6991 BFD_ASSERT (dynobj != NULL);
6993 if (htab->root.dynamic_sections_created)
6995 if (info->executable)
6997 s = bfd_get_linker_section (dynobj, ".interp");
7000 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7001 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7005 /* Set up .got offsets for local syms, and space for local dynamic
7007 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7009 struct elf_aarch64_local_symbol *locals = NULL;
7010 Elf_Internal_Shdr *symtab_hdr;
7014 if (!is_aarch64_elf (ibfd))
7017 for (s = ibfd->sections; s != NULL; s = s->next)
7019 struct elf_dyn_relocs *p;
7021 for (p = (struct elf_dyn_relocs *)
7022 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7024 if (!bfd_is_abs_section (p->sec)
7025 && bfd_is_abs_section (p->sec->output_section))
7027 /* Input section has been discarded, either because
7028 it is a copy of a linkonce section or due to
7029 linker script /DISCARD/, so we'll be discarding
7032 else if (p->count != 0)
7034 srel = elf_section_data (p->sec)->sreloc;
7035 srel->size += p->count * RELOC_SIZE (htab);
7036 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7037 info->flags |= DF_TEXTREL;
7042 locals = elf_aarch64_locals (ibfd);
7046 symtab_hdr = &elf_symtab_hdr (ibfd);
7047 srel = htab->root.srelgot;
7048 for (i = 0; i < symtab_hdr->sh_info; i++)
7050 locals[i].got_offset = (bfd_vma) - 1;
7051 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7052 if (locals[i].got_refcount > 0)
7054 unsigned got_type = locals[i].got_type;
7055 if (got_type & GOT_TLSDESC_GD)
7057 locals[i].tlsdesc_got_jump_table_offset =
7058 (htab->root.sgotplt->size
7059 - aarch64_compute_jump_table_size (htab));
7060 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7061 locals[i].got_offset = (bfd_vma) - 2;
7064 if (got_type & GOT_TLS_GD)
7066 locals[i].got_offset = htab->root.sgot->size;
7067 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7070 if (got_type & GOT_TLS_IE)
7072 locals[i].got_offset = htab->root.sgot->size;
7073 htab->root.sgot->size += GOT_ENTRY_SIZE;
7076 if (got_type == GOT_UNKNOWN)
7080 if (got_type == GOT_NORMAL)
7086 if (got_type & GOT_TLSDESC_GD)
7088 htab->root.srelplt->size += RELOC_SIZE (htab);
7089 /* Note RELOC_COUNT not incremented here! */
7090 htab->tlsdesc_plt = (bfd_vma) - 1;
7093 if (got_type & GOT_TLS_GD)
7094 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7096 if (got_type & GOT_TLS_IE)
7097 htab->root.srelgot->size += RELOC_SIZE (htab);
7102 locals[i].got_refcount = (bfd_vma) - 1;
7108 /* Allocate global sym .plt and .got entries, and space for global
7109 sym dynamic relocs. */
7110 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7113 /* Allocate global ifunc sym .plt and .got entries, and space for global
7114 ifunc sym dynamic relocs. */
7115 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7118 /* Allocate .plt and .got entries, and space for local symbols. */
7119 htab_traverse (htab->loc_hash_table,
7120 elfNN_aarch64_allocate_local_dynrelocs,
7123 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7124 htab_traverse (htab->loc_hash_table,
7125 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7128 /* For every jump slot reserved in the sgotplt, reloc_count is
7129 incremented. However, when we reserve space for TLS descriptors,
7130 it's not incremented, so in order to compute the space reserved
7131 for them, it suffices to multiply the reloc count by the jump
7134 if (htab->root.srelplt)
7135 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7137 if (htab->tlsdesc_plt)
7139 if (htab->root.splt->size == 0)
7140 htab->root.splt->size += PLT_ENTRY_SIZE;
7142 htab->tlsdesc_plt = htab->root.splt->size;
7143 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7145 /* If we're not using lazy TLS relocations, don't generate the
7146 GOT entry required. */
7147 if (!(info->flags & DF_BIND_NOW))
7149 htab->dt_tlsdesc_got = htab->root.sgot->size;
7150 htab->root.sgot->size += GOT_ENTRY_SIZE;
7154 /* Init mapping symbols information to use later to distingush between
7155 code and data while scanning for erratam 835769. */
7156 if (htab->fix_erratum_835769)
7157 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7159 if (!is_aarch64_elf (ibfd))
7161 bfd_elfNN_aarch64_init_maps (ibfd);
7164 /* We now have determined the sizes of the various dynamic sections.
7165 Allocate memory for them. */
7167 for (s = dynobj->sections; s != NULL; s = s->next)
7169 if ((s->flags & SEC_LINKER_CREATED) == 0)
7172 if (s == htab->root.splt
7173 || s == htab->root.sgot
7174 || s == htab->root.sgotplt
7175 || s == htab->root.iplt
7176 || s == htab->root.igotplt || s == htab->sdynbss)
7178 /* Strip this section if we don't need it; see the
7181 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7183 if (s->size != 0 && s != htab->root.srelplt)
7186 /* We use the reloc_count field as a counter if we need
7187 to copy relocs into the output file. */
7188 if (s != htab->root.srelplt)
7193 /* It's not one of our sections, so don't allocate space. */
7199 /* If we don't need this section, strip it from the
7200 output file. This is mostly to handle .rela.bss and
7201 .rela.plt. We must create both sections in
7202 create_dynamic_sections, because they must be created
7203 before the linker maps input sections to output
7204 sections. The linker does that before
7205 adjust_dynamic_symbol is called, and it is that
7206 function which decides whether anything needs to go
7207 into these sections. */
7209 s->flags |= SEC_EXCLUDE;
7213 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7216 /* Allocate memory for the section contents. We use bfd_zalloc
7217 here in case unused entries are not reclaimed before the
7218 section's contents are written out. This should not happen,
7219 but this way if it does, we get a R_AARCH64_NONE reloc instead
7221 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7222 if (s->contents == NULL)
7226 if (htab->root.dynamic_sections_created)
7228 /* Add some entries to the .dynamic section. We fill in the
7229 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7230 must add the entries now so that we get the correct size for
7231 the .dynamic section. The DT_DEBUG entry is filled in by the
7232 dynamic linker and used by the debugger. */
7233 #define add_dynamic_entry(TAG, VAL) \
7234 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7236 if (info->executable)
7238 if (!add_dynamic_entry (DT_DEBUG, 0))
7242 if (htab->root.splt->size != 0)
7244 if (!add_dynamic_entry (DT_PLTGOT, 0)
7245 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7246 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7247 || !add_dynamic_entry (DT_JMPREL, 0))
7250 if (htab->tlsdesc_plt
7251 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7252 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7258 if (!add_dynamic_entry (DT_RELA, 0)
7259 || !add_dynamic_entry (DT_RELASZ, 0)
7260 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7263 /* If any dynamic relocs apply to a read-only section,
7264 then we need a DT_TEXTREL entry. */
7265 if ((info->flags & DF_TEXTREL) != 0)
7267 if (!add_dynamic_entry (DT_TEXTREL, 0))
7272 #undef add_dynamic_entry
7278 elf_aarch64_update_plt_entry (bfd *output_bfd,
7279 bfd_reloc_code_real_type r_type,
7280 bfd_byte *plt_entry, bfd_vma value)
7282 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
7284 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
7288 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
7289 struct elf_aarch64_link_hash_table
7290 *htab, bfd *output_bfd,
7291 struct bfd_link_info *info)
7293 bfd_byte *plt_entry;
7296 bfd_vma gotplt_entry_address;
7297 bfd_vma plt_entry_address;
7298 Elf_Internal_Rela rela;
7300 asection *plt, *gotplt, *relplt;
7302 /* When building a static executable, use .iplt, .igot.plt and
7303 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7304 if (htab->root.splt != NULL)
7306 plt = htab->root.splt;
7307 gotplt = htab->root.sgotplt;
7308 relplt = htab->root.srelplt;
7312 plt = htab->root.iplt;
7313 gotplt = htab->root.igotplt;
7314 relplt = htab->root.irelplt;
7317 /* Get the index in the procedure linkage table which
7318 corresponds to this symbol. This is the index of this symbol
7319 in all the symbols for which we are making plt entries. The
7320 first entry in the procedure linkage table is reserved.
7322 Get the offset into the .got table of the entry that
7323 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
7324 bytes. The first three are reserved for the dynamic linker.
7326 For static executables, we don't reserve anything. */
7328 if (plt == htab->root.splt)
7330 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
7331 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
7335 plt_index = h->plt.offset / htab->plt_entry_size;
7336 got_offset = plt_index * GOT_ENTRY_SIZE;
7339 plt_entry = plt->contents + h->plt.offset;
7340 plt_entry_address = plt->output_section->vma
7341 + plt->output_offset + h->plt.offset;
7342 gotplt_entry_address = gotplt->output_section->vma +
7343 gotplt->output_offset + got_offset;
7345 /* Copy in the boiler-plate for the PLTn entry. */
7346 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
7348 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7349 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7350 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7352 PG (gotplt_entry_address) -
7353 PG (plt_entry_address));
7355 /* Fill in the lo12 bits for the load from the pltgot. */
7356 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7358 PG_OFFSET (gotplt_entry_address));
7360 /* Fill in the lo12 bits for the add from the pltgot entry. */
7361 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7363 PG_OFFSET (gotplt_entry_address));
7365 /* All the GOTPLT Entries are essentially initialized to PLT0. */
7366 bfd_put_NN (output_bfd,
7367 plt->output_section->vma + plt->output_offset,
7368 gotplt->contents + got_offset);
7370 rela.r_offset = gotplt_entry_address;
7372 if (h->dynindx == -1
7373 || ((info->executable
7374 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7376 && h->type == STT_GNU_IFUNC))
7378 /* If an STT_GNU_IFUNC symbol is locally defined, generate
7379 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
7380 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
7381 rela.r_addend = (h->root.u.def.value
7382 + h->root.u.def.section->output_section->vma
7383 + h->root.u.def.section->output_offset);
7387 /* Fill in the entry in the .rela.plt section. */
7388 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
7392 /* Compute the relocation entry to used based on PLT index and do
7393 not adjust reloc_count. The reloc_count has already been adjusted
7394 to account for this entry. */
7395 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
7396 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7399 /* Size sections even though they're not dynamic. We use it to setup
7400 _TLS_MODULE_BASE_, if needed. */
7403 elfNN_aarch64_always_size_sections (bfd *output_bfd,
7404 struct bfd_link_info *info)
7408 if (info->relocatable)
7411 tls_sec = elf_hash_table (info)->tls_sec;
7415 struct elf_link_hash_entry *tlsbase;
7417 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
7418 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
7422 struct bfd_link_hash_entry *h = NULL;
7423 const struct elf_backend_data *bed =
7424 get_elf_backend_data (output_bfd);
7426 if (!(_bfd_generic_link_add_one_symbol
7427 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
7428 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
7431 tlsbase->type = STT_TLS;
7432 tlsbase = (struct elf_link_hash_entry *) h;
7433 tlsbase->def_regular = 1;
7434 tlsbase->other = STV_HIDDEN;
7435 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
7442 /* Finish up dynamic symbol handling. We set the contents of various
7443 dynamic sections here. */
7445 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
7446 struct bfd_link_info *info,
7447 struct elf_link_hash_entry *h,
7448 Elf_Internal_Sym *sym)
7450 struct elf_aarch64_link_hash_table *htab;
7451 htab = elf_aarch64_hash_table (info);
7453 if (h->plt.offset != (bfd_vma) - 1)
7455 asection *plt, *gotplt, *relplt;
7457 /* This symbol has an entry in the procedure linkage table. Set
7460 /* When building a static executable, use .iplt, .igot.plt and
7461 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7462 if (htab->root.splt != NULL)
7464 plt = htab->root.splt;
7465 gotplt = htab->root.sgotplt;
7466 relplt = htab->root.srelplt;
7470 plt = htab->root.iplt;
7471 gotplt = htab->root.igotplt;
7472 relplt = htab->root.irelplt;
7475 /* This symbol has an entry in the procedure linkage table. Set
7477 if ((h->dynindx == -1
7478 && !((h->forced_local || info->executable)
7480 && h->type == STT_GNU_IFUNC))
7486 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
7487 if (!h->def_regular)
7489 /* Mark the symbol as undefined, rather than as defined in
7490 the .plt section. */
7491 sym->st_shndx = SHN_UNDEF;
7492 /* If the symbol is weak we need to clear the value.
7493 Otherwise, the PLT entry would provide a definition for
7494 the symbol even if the symbol wasn't defined anywhere,
7495 and so the symbol would never be NULL. Leave the value if
7496 there were any relocations where pointer equality matters
7497 (this is a clue for the dynamic linker, to make function
7498 pointer comparisons work between an application and shared
7500 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
7505 if (h->got.offset != (bfd_vma) - 1
7506 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
7508 Elf_Internal_Rela rela;
7511 /* This symbol has an entry in the global offset table. Set it
7513 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
7516 rela.r_offset = (htab->root.sgot->output_section->vma
7517 + htab->root.sgot->output_offset
7518 + (h->got.offset & ~(bfd_vma) 1));
7521 && h->type == STT_GNU_IFUNC)
7525 /* Generate R_AARCH64_GLOB_DAT. */
7532 if (!h->pointer_equality_needed)
7535 /* For non-shared object, we can't use .got.plt, which
7536 contains the real function address if we need pointer
7537 equality. We load the GOT entry with the PLT entry. */
7538 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
7539 bfd_put_NN (output_bfd, (plt->output_section->vma
7540 + plt->output_offset
7542 htab->root.sgot->contents
7543 + (h->got.offset & ~(bfd_vma) 1));
7547 else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
7549 if (!h->def_regular)
7552 BFD_ASSERT ((h->got.offset & 1) != 0);
7553 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
7554 rela.r_addend = (h->root.u.def.value
7555 + h->root.u.def.section->output_section->vma
7556 + h->root.u.def.section->output_offset);
7561 BFD_ASSERT ((h->got.offset & 1) == 0);
7562 bfd_put_NN (output_bfd, (bfd_vma) 0,
7563 htab->root.sgot->contents + h->got.offset);
7564 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
7568 loc = htab->root.srelgot->contents;
7569 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
7570 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7575 Elf_Internal_Rela rela;
7578 /* This symbol needs a copy reloc. Set it up. */
7580 if (h->dynindx == -1
7581 || (h->root.type != bfd_link_hash_defined
7582 && h->root.type != bfd_link_hash_defweak)
7583 || htab->srelbss == NULL)
7586 rela.r_offset = (h->root.u.def.value
7587 + h->root.u.def.section->output_section->vma
7588 + h->root.u.def.section->output_offset);
7589 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
7591 loc = htab->srelbss->contents;
7592 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
7593 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7596 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
7597 be NULL for local symbols. */
7599 && (h == elf_hash_table (info)->hdynamic
7600 || h == elf_hash_table (info)->hgot))
7601 sym->st_shndx = SHN_ABS;
7606 /* Finish up local dynamic symbol handling. We set the contents of
7607 various dynamic sections here. */
7610 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
7612 struct elf_link_hash_entry *h
7613 = (struct elf_link_hash_entry *) *slot;
7614 struct bfd_link_info *info
7615 = (struct bfd_link_info *) inf;
7617 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
7622 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
7623 struct elf_aarch64_link_hash_table
7626 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
7627 small and large plts and at the minute just generates
7630 /* PLT0 of the small PLT looks like this in ELF64 -
7631 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
7632 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
7633 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
7635 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
7636 // GOTPLT entry for this.
7638 PLT0 will be slightly different in ELF32 due to different got entry
7641 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
7645 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
7647 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
7650 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
7651 + htab->root.sgotplt->output_offset
7652 + GOT_ENTRY_SIZE * 2);
7654 plt_base = htab->root.splt->output_section->vma +
7655 htab->root.splt->output_offset;
7657 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7658 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7659 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7660 htab->root.splt->contents + 4,
7661 PG (plt_got_2nd_ent) - PG (plt_base + 4));
7663 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7664 htab->root.splt->contents + 8,
7665 PG_OFFSET (plt_got_2nd_ent));
7667 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7668 htab->root.splt->contents + 12,
7669 PG_OFFSET (plt_got_2nd_ent));
7673 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
7674 struct bfd_link_info *info)
7676 struct elf_aarch64_link_hash_table *htab;
7680 htab = elf_aarch64_hash_table (info);
7681 dynobj = htab->root.dynobj;
7682 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
7684 if (htab->root.dynamic_sections_created)
7686 ElfNN_External_Dyn *dyncon, *dynconend;
7688 if (sdyn == NULL || htab->root.sgot == NULL)
7691 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
7692 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
7693 for (; dyncon < dynconend; dyncon++)
7695 Elf_Internal_Dyn dyn;
7698 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
7706 s = htab->root.sgotplt;
7707 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
7711 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
7715 s = htab->root.srelplt;
7716 dyn.d_un.d_val = s->size;
7720 /* The procedure linkage table relocs (DT_JMPREL) should
7721 not be included in the overall relocs (DT_RELA).
7722 Therefore, we override the DT_RELASZ entry here to
7723 make it not include the JMPREL relocs. Since the
7724 linker script arranges for .rela.plt to follow all
7725 other relocation sections, we don't have to worry
7726 about changing the DT_RELA entry. */
7727 if (htab->root.srelplt != NULL)
7729 s = htab->root.srelplt;
7730 dyn.d_un.d_val -= s->size;
7734 case DT_TLSDESC_PLT:
7735 s = htab->root.splt;
7736 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
7737 + htab->tlsdesc_plt;
7740 case DT_TLSDESC_GOT:
7741 s = htab->root.sgot;
7742 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
7743 + htab->dt_tlsdesc_got;
7747 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
7752 /* Fill in the special first entry in the procedure linkage table. */
7753 if (htab->root.splt && htab->root.splt->size > 0)
7755 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
7757 elf_section_data (htab->root.splt->output_section)->
7758 this_hdr.sh_entsize = htab->plt_entry_size;
7761 if (htab->tlsdesc_plt)
7763 bfd_put_NN (output_bfd, (bfd_vma) 0,
7764 htab->root.sgot->contents + htab->dt_tlsdesc_got);
7766 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
7767 elfNN_aarch64_tlsdesc_small_plt_entry,
7768 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
7771 bfd_vma adrp1_addr =
7772 htab->root.splt->output_section->vma
7773 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
7775 bfd_vma adrp2_addr = adrp1_addr + 4;
7778 htab->root.sgot->output_section->vma
7779 + htab->root.sgot->output_offset;
7781 bfd_vma pltgot_addr =
7782 htab->root.sgotplt->output_section->vma
7783 + htab->root.sgotplt->output_offset;
7785 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
7787 bfd_byte *plt_entry =
7788 htab->root.splt->contents + htab->tlsdesc_plt;
7790 /* adrp x2, DT_TLSDESC_GOT */
7791 elf_aarch64_update_plt_entry (output_bfd,
7792 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7794 (PG (dt_tlsdesc_got)
7795 - PG (adrp1_addr)));
7798 elf_aarch64_update_plt_entry (output_bfd,
7799 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7802 - PG (adrp2_addr)));
7804 /* ldr x2, [x2, #0] */
7805 elf_aarch64_update_plt_entry (output_bfd,
7806 BFD_RELOC_AARCH64_LDSTNN_LO12,
7808 PG_OFFSET (dt_tlsdesc_got));
7811 elf_aarch64_update_plt_entry (output_bfd,
7812 BFD_RELOC_AARCH64_ADD_LO12,
7814 PG_OFFSET (pltgot_addr));
7819 if (htab->root.sgotplt)
7821 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
7823 (*_bfd_error_handler)
7824 (_("discarded output section: `%A'"), htab->root.sgotplt);
7828 /* Fill in the first three entries in the global offset table. */
7829 if (htab->root.sgotplt->size > 0)
7831 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
7833 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
7834 bfd_put_NN (output_bfd,
7836 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
7837 bfd_put_NN (output_bfd,
7839 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
7842 if (htab->root.sgot)
7844 if (htab->root.sgot->size > 0)
7847 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
7848 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
7852 elf_section_data (htab->root.sgotplt->output_section)->
7853 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
7856 if (htab->root.sgot && htab->root.sgot->size > 0)
7857 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
7860 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
7861 htab_traverse (htab->loc_hash_table,
7862 elfNN_aarch64_finish_local_dynamic_symbol,
7868 /* Return address for Ith PLT stub in section PLT, for relocation REL
7869 or (bfd_vma) -1 if it should not be included. */
7872 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
7873 const arelent *rel ATTRIBUTE_UNUSED)
7875 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
7879 /* We use this so we can override certain functions
7880 (though currently we don't). */
7882 const struct elf_size_info elfNN_aarch64_size_info =
7884 sizeof (ElfNN_External_Ehdr),
7885 sizeof (ElfNN_External_Phdr),
7886 sizeof (ElfNN_External_Shdr),
7887 sizeof (ElfNN_External_Rel),
7888 sizeof (ElfNN_External_Rela),
7889 sizeof (ElfNN_External_Sym),
7890 sizeof (ElfNN_External_Dyn),
7891 sizeof (Elf_External_Note),
7892 4, /* Hash table entry size. */
7893 1, /* Internal relocs per external relocs. */
7894 ARCH_SIZE, /* Arch size. */
7895 LOG_FILE_ALIGN, /* Log_file_align. */
7896 ELFCLASSNN, EV_CURRENT,
7897 bfd_elfNN_write_out_phdrs,
7898 bfd_elfNN_write_shdrs_and_ehdr,
7899 bfd_elfNN_checksum_contents,
7900 bfd_elfNN_write_relocs,
7901 bfd_elfNN_swap_symbol_in,
7902 bfd_elfNN_swap_symbol_out,
7903 bfd_elfNN_slurp_reloc_table,
7904 bfd_elfNN_slurp_symbol_table,
7905 bfd_elfNN_swap_dyn_in,
7906 bfd_elfNN_swap_dyn_out,
7907 bfd_elfNN_swap_reloc_in,
7908 bfd_elfNN_swap_reloc_out,
7909 bfd_elfNN_swap_reloca_in,
7910 bfd_elfNN_swap_reloca_out
7913 #define ELF_ARCH bfd_arch_aarch64
7914 #define ELF_MACHINE_CODE EM_AARCH64
7915 #define ELF_MAXPAGESIZE 0x10000
7916 #define ELF_MINPAGESIZE 0x1000
7917 #define ELF_COMMONPAGESIZE 0x1000
7919 #define bfd_elfNN_close_and_cleanup \
7920 elfNN_aarch64_close_and_cleanup
7922 #define bfd_elfNN_bfd_free_cached_info \
7923 elfNN_aarch64_bfd_free_cached_info
7925 #define bfd_elfNN_bfd_is_target_special_symbol \
7926 elfNN_aarch64_is_target_special_symbol
7928 #define bfd_elfNN_bfd_link_hash_table_create \
7929 elfNN_aarch64_link_hash_table_create
7931 #define bfd_elfNN_bfd_merge_private_bfd_data \
7932 elfNN_aarch64_merge_private_bfd_data
7934 #define bfd_elfNN_bfd_print_private_bfd_data \
7935 elfNN_aarch64_print_private_bfd_data
7937 #define bfd_elfNN_bfd_reloc_type_lookup \
7938 elfNN_aarch64_reloc_type_lookup
7940 #define bfd_elfNN_bfd_reloc_name_lookup \
7941 elfNN_aarch64_reloc_name_lookup
7943 #define bfd_elfNN_bfd_set_private_flags \
7944 elfNN_aarch64_set_private_flags
7946 #define bfd_elfNN_find_inliner_info \
7947 elfNN_aarch64_find_inliner_info
7949 #define bfd_elfNN_find_nearest_line \
7950 elfNN_aarch64_find_nearest_line
7952 #define bfd_elfNN_mkobject \
7953 elfNN_aarch64_mkobject
7955 #define bfd_elfNN_new_section_hook \
7956 elfNN_aarch64_new_section_hook
7958 #define elf_backend_adjust_dynamic_symbol \
7959 elfNN_aarch64_adjust_dynamic_symbol
7961 #define elf_backend_always_size_sections \
7962 elfNN_aarch64_always_size_sections
7964 #define elf_backend_check_relocs \
7965 elfNN_aarch64_check_relocs
7967 #define elf_backend_copy_indirect_symbol \
7968 elfNN_aarch64_copy_indirect_symbol
7970 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
7971 to them in our hash. */
7972 #define elf_backend_create_dynamic_sections \
7973 elfNN_aarch64_create_dynamic_sections
7975 #define elf_backend_init_index_section \
7976 _bfd_elf_init_2_index_sections
7978 #define elf_backend_finish_dynamic_sections \
7979 elfNN_aarch64_finish_dynamic_sections
7981 #define elf_backend_finish_dynamic_symbol \
7982 elfNN_aarch64_finish_dynamic_symbol
7984 #define elf_backend_gc_sweep_hook \
7985 elfNN_aarch64_gc_sweep_hook
7987 #define elf_backend_object_p \
7988 elfNN_aarch64_object_p
7990 #define elf_backend_output_arch_local_syms \
7991 elfNN_aarch64_output_arch_local_syms
7993 #define elf_backend_plt_sym_val \
7994 elfNN_aarch64_plt_sym_val
7996 #define elf_backend_post_process_headers \
7997 elfNN_aarch64_post_process_headers
7999 #define elf_backend_relocate_section \
8000 elfNN_aarch64_relocate_section
8002 #define elf_backend_reloc_type_class \
8003 elfNN_aarch64_reloc_type_class
8005 #define elf_backend_section_from_shdr \
8006 elfNN_aarch64_section_from_shdr
8008 #define elf_backend_size_dynamic_sections \
8009 elfNN_aarch64_size_dynamic_sections
8011 #define elf_backend_size_info \
8012 elfNN_aarch64_size_info
8014 #define elf_backend_write_section \
8015 elfNN_aarch64_write_section
8017 #define elf_backend_can_refcount 1
8018 #define elf_backend_can_gc_sections 1
8019 #define elf_backend_plt_readonly 1
8020 #define elf_backend_want_got_plt 1
8021 #define elf_backend_want_plt_sym 0
8022 #define elf_backend_may_use_rel_p 0
8023 #define elf_backend_may_use_rela_p 1
8024 #define elf_backend_default_use_rela_p 1
8025 #define elf_backend_rela_normal 1
8026 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8027 #define elf_backend_default_execstack 0
8029 #undef elf_backend_obj_attrs_section
8030 #define elf_backend_obj_attrs_section ".ARM.attributes"
8032 #include "elfNN-target.h"