1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 /* Notes on implementation:
23 Thread Local Store (TLS)
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
95 elfNN_aarch64_check_relocs()
97 This function is invoked for each relocation.
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
107 elfNN_aarch64_allocate_dynrelocs ()
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
115 elfNN_aarch64_size_dynamic_sections ()
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
122 elfNN_aarch64_relocate_section ()
124 Calls elfNN_aarch64_final_link_relocate ()
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
134 elfNN_aarch64_final_link_relocate ()
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
140 #include "libiberty.h"
142 #include "bfd_stdint.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12 \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21 \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
188 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
189 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
190 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
191 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
192 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
194 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
195 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
203 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
204 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
205 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
206 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1)
208 #define ELIMINATE_COPY_RELOCS 0
210 /* Return size of a relocation entry. HTAB is the bfd's
211 elf_aarch64_link_hash_entry. */
212 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
214 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
215 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
216 #define PLT_ENTRY_SIZE (32)
217 #define PLT_SMALL_ENTRY_SIZE (16)
218 #define PLT_TLSDESC_ENTRY_SIZE (32)
220 /* Encoding of the nop instruction */
221 #define INSN_NOP 0xd503201f
223 #define aarch64_compute_jump_table_size(htab) \
224 (((htab)->root.srelplt == NULL) ? 0 \
225 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
227 /* The first entry in a procedure linkage table looks like this
228 if the distance between the PLTGOT and the PLT is < 4GB use
229 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
230 in x16 and needs to work out PLTGOT[1] by using an address of
231 [x16,#-GOT_ENTRY_SIZE]. */
232 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
234 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
235 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
237 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
238 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
240 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
241 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
243 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
244 0x1f, 0x20, 0x03, 0xd5, /* nop */
245 0x1f, 0x20, 0x03, 0xd5, /* nop */
246 0x1f, 0x20, 0x03, 0xd5, /* nop */
249 /* Per function entry in a procedure linkage table looks like this
250 if the distance between the PLTGOT and the PLT is < 4GB use
251 these PLT entries. */
252 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
254 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
256 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
257 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
259 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
260 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
262 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
265 static const bfd_byte
266 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
268 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
269 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
270 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
272 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
273 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
275 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
276 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
278 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
279 0x1f, 0x20, 0x03, 0xd5, /* nop */
280 0x1f, 0x20, 0x03, 0xd5, /* nop */
283 #define elf_info_to_howto elfNN_aarch64_info_to_howto
284 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
286 #define AARCH64_ELF_ABI_VERSION 0
288 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
289 #define ALL_ONES (~ (bfd_vma) 0)
291 /* Indexed by the bfd interal reloc enumerators.
292 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
295 static reloc_howto_type elfNN_aarch64_howto_table[] =
299 /* Basic data relocations. */
302 HOWTO (R_AARCH64_NULL, /* type */
304 3, /* size (0 = byte, 1 = short, 2 = long) */
306 FALSE, /* pc_relative */
308 complain_overflow_dont, /* complain_on_overflow */
309 bfd_elf_generic_reloc, /* special_function */
310 "R_AARCH64_NULL", /* name */
311 FALSE, /* partial_inplace */
314 FALSE), /* pcrel_offset */
316 HOWTO (R_AARCH64_NONE, /* type */
318 3, /* size (0 = byte, 1 = short, 2 = long) */
320 FALSE, /* pc_relative */
322 complain_overflow_dont, /* complain_on_overflow */
323 bfd_elf_generic_reloc, /* special_function */
324 "R_AARCH64_NONE", /* name */
325 FALSE, /* partial_inplace */
328 FALSE), /* pcrel_offset */
332 HOWTO64 (AARCH64_R (ABS64), /* type */
334 4, /* size (4 = long long) */
336 FALSE, /* pc_relative */
338 complain_overflow_unsigned, /* complain_on_overflow */
339 bfd_elf_generic_reloc, /* special_function */
340 AARCH64_R_STR (ABS64), /* name */
341 FALSE, /* partial_inplace */
342 ALL_ONES, /* src_mask */
343 ALL_ONES, /* dst_mask */
344 FALSE), /* pcrel_offset */
347 HOWTO (AARCH64_R (ABS32), /* type */
349 2, /* size (0 = byte, 1 = short, 2 = long) */
351 FALSE, /* pc_relative */
353 complain_overflow_unsigned, /* complain_on_overflow */
354 bfd_elf_generic_reloc, /* special_function */
355 AARCH64_R_STR (ABS32), /* name */
356 FALSE, /* partial_inplace */
357 0xffffffff, /* src_mask */
358 0xffffffff, /* dst_mask */
359 FALSE), /* pcrel_offset */
362 HOWTO (AARCH64_R (ABS16), /* type */
364 1, /* size (0 = byte, 1 = short, 2 = long) */
366 FALSE, /* pc_relative */
368 complain_overflow_unsigned, /* complain_on_overflow */
369 bfd_elf_generic_reloc, /* special_function */
370 AARCH64_R_STR (ABS16), /* name */
371 FALSE, /* partial_inplace */
372 0xffff, /* src_mask */
373 0xffff, /* dst_mask */
374 FALSE), /* pcrel_offset */
376 /* .xword: (S+A-P) */
377 HOWTO64 (AARCH64_R (PREL64), /* type */
379 4, /* size (4 = long long) */
381 TRUE, /* pc_relative */
383 complain_overflow_signed, /* complain_on_overflow */
384 bfd_elf_generic_reloc, /* special_function */
385 AARCH64_R_STR (PREL64), /* name */
386 FALSE, /* partial_inplace */
387 ALL_ONES, /* src_mask */
388 ALL_ONES, /* dst_mask */
389 TRUE), /* pcrel_offset */
392 HOWTO (AARCH64_R (PREL32), /* type */
394 2, /* size (0 = byte, 1 = short, 2 = long) */
396 TRUE, /* pc_relative */
398 complain_overflow_signed, /* complain_on_overflow */
399 bfd_elf_generic_reloc, /* special_function */
400 AARCH64_R_STR (PREL32), /* name */
401 FALSE, /* partial_inplace */
402 0xffffffff, /* src_mask */
403 0xffffffff, /* dst_mask */
404 TRUE), /* pcrel_offset */
407 HOWTO (AARCH64_R (PREL16), /* type */
409 1, /* size (0 = byte, 1 = short, 2 = long) */
411 TRUE, /* pc_relative */
413 complain_overflow_signed, /* complain_on_overflow */
414 bfd_elf_generic_reloc, /* special_function */
415 AARCH64_R_STR (PREL16), /* name */
416 FALSE, /* partial_inplace */
417 0xffff, /* src_mask */
418 0xffff, /* dst_mask */
419 TRUE), /* pcrel_offset */
421 /* Group relocations to create a 16, 32, 48 or 64 bit
422 unsigned data or abs address inline. */
424 /* MOVZ: ((S+A) >> 0) & 0xffff */
425 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
427 2, /* size (0 = byte, 1 = short, 2 = long) */
429 FALSE, /* pc_relative */
431 complain_overflow_unsigned, /* complain_on_overflow */
432 bfd_elf_generic_reloc, /* special_function */
433 AARCH64_R_STR (MOVW_UABS_G0), /* name */
434 FALSE, /* partial_inplace */
435 0xffff, /* src_mask */
436 0xffff, /* dst_mask */
437 FALSE), /* pcrel_offset */
439 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
440 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
442 2, /* size (0 = byte, 1 = short, 2 = long) */
444 FALSE, /* pc_relative */
446 complain_overflow_dont, /* complain_on_overflow */
447 bfd_elf_generic_reloc, /* special_function */
448 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
449 FALSE, /* partial_inplace */
450 0xffff, /* src_mask */
451 0xffff, /* dst_mask */
452 FALSE), /* pcrel_offset */
454 /* MOVZ: ((S+A) >> 16) & 0xffff */
455 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
457 2, /* size (0 = byte, 1 = short, 2 = long) */
459 FALSE, /* pc_relative */
461 complain_overflow_unsigned, /* complain_on_overflow */
462 bfd_elf_generic_reloc, /* special_function */
463 AARCH64_R_STR (MOVW_UABS_G1), /* name */
464 FALSE, /* partial_inplace */
465 0xffff, /* src_mask */
466 0xffff, /* dst_mask */
467 FALSE), /* pcrel_offset */
469 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
470 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
474 FALSE, /* pc_relative */
476 complain_overflow_dont, /* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
479 FALSE, /* partial_inplace */
480 0xffff, /* src_mask */
481 0xffff, /* dst_mask */
482 FALSE), /* pcrel_offset */
484 /* MOVZ: ((S+A) >> 32) & 0xffff */
485 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
487 2, /* size (0 = byte, 1 = short, 2 = long) */
489 FALSE, /* pc_relative */
491 complain_overflow_unsigned, /* complain_on_overflow */
492 bfd_elf_generic_reloc, /* special_function */
493 AARCH64_R_STR (MOVW_UABS_G2), /* name */
494 FALSE, /* partial_inplace */
495 0xffff, /* src_mask */
496 0xffff, /* dst_mask */
497 FALSE), /* pcrel_offset */
499 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
500 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
502 2, /* size (0 = byte, 1 = short, 2 = long) */
504 FALSE, /* pc_relative */
506 complain_overflow_dont, /* complain_on_overflow */
507 bfd_elf_generic_reloc, /* special_function */
508 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
509 FALSE, /* partial_inplace */
510 0xffff, /* src_mask */
511 0xffff, /* dst_mask */
512 FALSE), /* pcrel_offset */
514 /* MOVZ: ((S+A) >> 48) & 0xffff */
515 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
517 2, /* size (0 = byte, 1 = short, 2 = long) */
519 FALSE, /* pc_relative */
521 complain_overflow_unsigned, /* complain_on_overflow */
522 bfd_elf_generic_reloc, /* special_function */
523 AARCH64_R_STR (MOVW_UABS_G3), /* name */
524 FALSE, /* partial_inplace */
525 0xffff, /* src_mask */
526 0xffff, /* dst_mask */
527 FALSE), /* pcrel_offset */
529 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
530 signed data or abs address inline. Will change instruction
531 to MOVN or MOVZ depending on sign of calculated value. */
533 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
534 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
536 2, /* size (0 = byte, 1 = short, 2 = long) */
538 FALSE, /* pc_relative */
540 complain_overflow_signed, /* complain_on_overflow */
541 bfd_elf_generic_reloc, /* special_function */
542 AARCH64_R_STR (MOVW_SABS_G0), /* name */
543 FALSE, /* partial_inplace */
544 0xffff, /* src_mask */
545 0xffff, /* dst_mask */
546 FALSE), /* pcrel_offset */
548 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
549 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
551 2, /* size (0 = byte, 1 = short, 2 = long) */
553 FALSE, /* pc_relative */
555 complain_overflow_signed, /* complain_on_overflow */
556 bfd_elf_generic_reloc, /* special_function */
557 AARCH64_R_STR (MOVW_SABS_G1), /* name */
558 FALSE, /* partial_inplace */
559 0xffff, /* src_mask */
560 0xffff, /* dst_mask */
561 FALSE), /* pcrel_offset */
563 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
564 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
568 FALSE, /* pc_relative */
570 complain_overflow_signed, /* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 AARCH64_R_STR (MOVW_SABS_G2), /* name */
573 FALSE, /* partial_inplace */
574 0xffff, /* src_mask */
575 0xffff, /* dst_mask */
576 FALSE), /* pcrel_offset */
578 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
579 addresses: PG(x) is (x & ~0xfff). */
581 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
582 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
586 TRUE, /* pc_relative */
588 complain_overflow_signed, /* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 AARCH64_R_STR (LD_PREL_LO19), /* name */
591 FALSE, /* partial_inplace */
592 0x7ffff, /* src_mask */
593 0x7ffff, /* dst_mask */
594 TRUE), /* pcrel_offset */
596 /* ADR: (S+A-P) & 0x1fffff */
597 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
599 2, /* size (0 = byte, 1 = short, 2 = long) */
601 TRUE, /* pc_relative */
603 complain_overflow_signed, /* complain_on_overflow */
604 bfd_elf_generic_reloc, /* special_function */
605 AARCH64_R_STR (ADR_PREL_LO21), /* name */
606 FALSE, /* partial_inplace */
607 0x1fffff, /* src_mask */
608 0x1fffff, /* dst_mask */
609 TRUE), /* pcrel_offset */
611 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
612 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
614 2, /* size (0 = byte, 1 = short, 2 = long) */
616 TRUE, /* pc_relative */
618 complain_overflow_signed, /* complain_on_overflow */
619 bfd_elf_generic_reloc, /* special_function */
620 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
621 FALSE, /* partial_inplace */
622 0x1fffff, /* src_mask */
623 0x1fffff, /* dst_mask */
624 TRUE), /* pcrel_offset */
626 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
627 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
629 2, /* size (0 = byte, 1 = short, 2 = long) */
631 TRUE, /* pc_relative */
633 complain_overflow_dont, /* complain_on_overflow */
634 bfd_elf_generic_reloc, /* special_function */
635 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
636 FALSE, /* partial_inplace */
637 0x1fffff, /* src_mask */
638 0x1fffff, /* dst_mask */
639 TRUE), /* pcrel_offset */
641 /* ADD: (S+A) & 0xfff [no overflow check] */
642 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
644 2, /* size (0 = byte, 1 = short, 2 = long) */
646 FALSE, /* pc_relative */
648 complain_overflow_dont, /* complain_on_overflow */
649 bfd_elf_generic_reloc, /* special_function */
650 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
651 FALSE, /* partial_inplace */
652 0x3ffc00, /* src_mask */
653 0x3ffc00, /* dst_mask */
654 FALSE), /* pcrel_offset */
656 /* LD/ST8: (S+A) & 0xfff */
657 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
659 2, /* size (0 = byte, 1 = short, 2 = long) */
661 FALSE, /* pc_relative */
663 complain_overflow_dont, /* complain_on_overflow */
664 bfd_elf_generic_reloc, /* special_function */
665 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
666 FALSE, /* partial_inplace */
667 0xfff, /* src_mask */
668 0xfff, /* dst_mask */
669 FALSE), /* pcrel_offset */
671 /* Relocations for control-flow instructions. */
673 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
674 HOWTO (AARCH64_R (TSTBR14), /* type */
676 2, /* size (0 = byte, 1 = short, 2 = long) */
678 TRUE, /* pc_relative */
680 complain_overflow_signed, /* complain_on_overflow */
681 bfd_elf_generic_reloc, /* special_function */
682 AARCH64_R_STR (TSTBR14), /* name */
683 FALSE, /* partial_inplace */
684 0x3fff, /* src_mask */
685 0x3fff, /* dst_mask */
686 TRUE), /* pcrel_offset */
688 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
689 HOWTO (AARCH64_R (CONDBR19), /* type */
691 2, /* size (0 = byte, 1 = short, 2 = long) */
693 TRUE, /* pc_relative */
695 complain_overflow_signed, /* complain_on_overflow */
696 bfd_elf_generic_reloc, /* special_function */
697 AARCH64_R_STR (CONDBR19), /* name */
698 FALSE, /* partial_inplace */
699 0x7ffff, /* src_mask */
700 0x7ffff, /* dst_mask */
701 TRUE), /* pcrel_offset */
703 /* B: ((S+A-P) >> 2) & 0x3ffffff */
704 HOWTO (AARCH64_R (JUMP26), /* type */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
708 TRUE, /* pc_relative */
710 complain_overflow_signed, /* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 AARCH64_R_STR (JUMP26), /* name */
713 FALSE, /* partial_inplace */
714 0x3ffffff, /* src_mask */
715 0x3ffffff, /* dst_mask */
716 TRUE), /* pcrel_offset */
718 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
719 HOWTO (AARCH64_R (CALL26), /* type */
721 2, /* size (0 = byte, 1 = short, 2 = long) */
723 TRUE, /* pc_relative */
725 complain_overflow_signed, /* complain_on_overflow */
726 bfd_elf_generic_reloc, /* special_function */
727 AARCH64_R_STR (CALL26), /* name */
728 FALSE, /* partial_inplace */
729 0x3ffffff, /* src_mask */
730 0x3ffffff, /* dst_mask */
731 TRUE), /* pcrel_offset */
733 /* LD/ST16: (S+A) & 0xffe */
734 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
736 2, /* size (0 = byte, 1 = short, 2 = long) */
738 FALSE, /* pc_relative */
740 complain_overflow_dont, /* complain_on_overflow */
741 bfd_elf_generic_reloc, /* special_function */
742 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
743 FALSE, /* partial_inplace */
744 0xffe, /* src_mask */
745 0xffe, /* dst_mask */
746 FALSE), /* pcrel_offset */
748 /* LD/ST32: (S+A) & 0xffc */
749 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
751 2, /* size (0 = byte, 1 = short, 2 = long) */
753 FALSE, /* pc_relative */
755 complain_overflow_dont, /* complain_on_overflow */
756 bfd_elf_generic_reloc, /* special_function */
757 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
758 FALSE, /* partial_inplace */
759 0xffc, /* src_mask */
760 0xffc, /* dst_mask */
761 FALSE), /* pcrel_offset */
763 /* LD/ST64: (S+A) & 0xff8 */
764 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
768 FALSE, /* pc_relative */
770 complain_overflow_dont, /* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
773 FALSE, /* partial_inplace */
774 0xff8, /* src_mask */
775 0xff8, /* dst_mask */
776 FALSE), /* pcrel_offset */
778 /* LD/ST128: (S+A) & 0xff0 */
779 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
781 2, /* size (0 = byte, 1 = short, 2 = long) */
783 FALSE, /* pc_relative */
785 complain_overflow_dont, /* complain_on_overflow */
786 bfd_elf_generic_reloc, /* special_function */
787 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
788 FALSE, /* partial_inplace */
789 0xff0, /* src_mask */
790 0xff0, /* dst_mask */
791 FALSE), /* pcrel_offset */
793 /* Set a load-literal immediate field to bits
794 0x1FFFFC of G(S)-P */
795 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
797 2, /* size (0 = byte,1 = short,2 = long) */
799 TRUE, /* pc_relative */
801 complain_overflow_signed, /* complain_on_overflow */
802 bfd_elf_generic_reloc, /* special_function */
803 AARCH64_R_STR (GOT_LD_PREL19), /* name */
804 FALSE, /* partial_inplace */
805 0xffffe0, /* src_mask */
806 0xffffe0, /* dst_mask */
807 TRUE), /* pcrel_offset */
809 /* Get to the page for the GOT entry for the symbol
810 (G(S) - P) using an ADRP instruction. */
811 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
813 2, /* size (0 = byte, 1 = short, 2 = long) */
815 TRUE, /* pc_relative */
817 complain_overflow_dont, /* complain_on_overflow */
818 bfd_elf_generic_reloc, /* special_function */
819 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
820 FALSE, /* partial_inplace */
821 0x1fffff, /* src_mask */
822 0x1fffff, /* dst_mask */
823 TRUE), /* pcrel_offset */
825 /* LD64: GOT offset G(S) & 0xff8 */
826 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
828 2, /* size (0 = byte, 1 = short, 2 = long) */
830 FALSE, /* pc_relative */
832 complain_overflow_dont, /* complain_on_overflow */
833 bfd_elf_generic_reloc, /* special_function */
834 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
835 FALSE, /* partial_inplace */
836 0xff8, /* src_mask */
837 0xff8, /* dst_mask */
838 FALSE), /* pcrel_offset */
840 /* LD32: GOT offset G(S) & 0xffc */
841 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
843 2, /* size (0 = byte, 1 = short, 2 = long) */
845 FALSE, /* pc_relative */
847 complain_overflow_dont, /* complain_on_overflow */
848 bfd_elf_generic_reloc, /* special_function */
849 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
850 FALSE, /* partial_inplace */
851 0xffc, /* src_mask */
852 0xffc, /* dst_mask */
853 FALSE), /* pcrel_offset */
855 /* LD64: GOT offset for the symbol. */
856 HOWTO64 (AARCH64_R (LD64_GOTOFF_LO15), /* type */
858 2, /* size (0 = byte, 1 = short, 2 = long) */
860 FALSE, /* pc_relative */
862 complain_overflow_unsigned, /* complain_on_overflow */
863 bfd_elf_generic_reloc, /* special_function */
864 AARCH64_R_STR (LD64_GOTOFF_LO15), /* name */
865 FALSE, /* partial_inplace */
866 0x7ff8, /* src_mask */
867 0x7ff8, /* dst_mask */
868 FALSE), /* pcrel_offset */
870 /* LD32: GOT offset to the page address of GOT table.
871 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x5ffc. */
872 HOWTO32 (AARCH64_R (LD32_GOTPAGE_LO14), /* type */
874 2, /* size (0 = byte, 1 = short, 2 = long) */
876 FALSE, /* pc_relative */
878 complain_overflow_unsigned, /* complain_on_overflow */
879 bfd_elf_generic_reloc, /* special_function */
880 AARCH64_R_STR (LD32_GOTPAGE_LO14), /* name */
881 FALSE, /* partial_inplace */
882 0x5ffc, /* src_mask */
883 0x5ffc, /* dst_mask */
884 FALSE), /* pcrel_offset */
886 /* LD64: GOT offset to the page address of GOT table.
887 (G(S) - PAGE (_GLOBAL_OFFSET_TABLE_)) & 0x7ff8. */
888 HOWTO64 (AARCH64_R (LD64_GOTPAGE_LO15), /* type */
890 2, /* size (0 = byte, 1 = short, 2 = long) */
892 FALSE, /* pc_relative */
894 complain_overflow_unsigned, /* complain_on_overflow */
895 bfd_elf_generic_reloc, /* special_function */
896 AARCH64_R_STR (LD64_GOTPAGE_LO15), /* name */
897 FALSE, /* partial_inplace */
898 0x7ff8, /* src_mask */
899 0x7ff8, /* dst_mask */
900 FALSE), /* pcrel_offset */
902 /* Get to the page for the GOT entry for the symbol
903 (G(S) - P) using an ADRP instruction. */
904 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
906 2, /* size (0 = byte, 1 = short, 2 = long) */
908 TRUE, /* pc_relative */
910 complain_overflow_dont, /* complain_on_overflow */
911 bfd_elf_generic_reloc, /* special_function */
912 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
913 FALSE, /* partial_inplace */
914 0x1fffff, /* src_mask */
915 0x1fffff, /* dst_mask */
916 TRUE), /* pcrel_offset */
918 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
920 2, /* size (0 = byte, 1 = short, 2 = long) */
922 TRUE, /* pc_relative */
924 complain_overflow_dont, /* complain_on_overflow */
925 bfd_elf_generic_reloc, /* special_function */
926 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
927 FALSE, /* partial_inplace */
928 0x1fffff, /* src_mask */
929 0x1fffff, /* dst_mask */
930 TRUE), /* pcrel_offset */
932 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
933 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
937 FALSE, /* pc_relative */
939 complain_overflow_dont, /* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
942 FALSE, /* partial_inplace */
943 0xfff, /* src_mask */
944 0xfff, /* dst_mask */
945 FALSE), /* pcrel_offset */
947 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
951 FALSE, /* pc_relative */
953 complain_overflow_dont, /* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
956 FALSE, /* partial_inplace */
957 0xffff, /* src_mask */
958 0xffff, /* dst_mask */
959 FALSE), /* pcrel_offset */
961 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
965 FALSE, /* pc_relative */
967 complain_overflow_dont, /* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
970 FALSE, /* partial_inplace */
971 0xffff, /* src_mask */
972 0xffff, /* dst_mask */
973 FALSE), /* pcrel_offset */
975 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
979 FALSE, /* pc_relative */
981 complain_overflow_dont, /* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
984 FALSE, /* partial_inplace */
985 0x1fffff, /* src_mask */
986 0x1fffff, /* dst_mask */
987 FALSE), /* pcrel_offset */
989 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
993 FALSE, /* pc_relative */
995 complain_overflow_dont, /* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
998 FALSE, /* partial_inplace */
999 0xff8, /* src_mask */
1000 0xff8, /* dst_mask */
1001 FALSE), /* pcrel_offset */
1003 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1007 FALSE, /* pc_relative */
1009 complain_overflow_dont, /* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
1012 FALSE, /* partial_inplace */
1013 0xffc, /* src_mask */
1014 0xffc, /* dst_mask */
1015 FALSE), /* pcrel_offset */
1017 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1021 FALSE, /* pc_relative */
1023 complain_overflow_dont, /* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
1026 FALSE, /* partial_inplace */
1027 0x1ffffc, /* src_mask */
1028 0x1ffffc, /* dst_mask */
1029 FALSE), /* pcrel_offset */
1031 /* Unsigned 12 bit byte offset to module TLS base address. */
1032 HOWTO (AARCH64_R (TLSLD_ADD_DTPREL_LO12), /* type */
1034 2, /* size (0 = byte, 1 = short, 2 = long) */
1036 FALSE, /* pc_relative */
1038 complain_overflow_unsigned, /* complain_on_overflow */
1039 bfd_elf_generic_reloc, /* special_function */
1040 AARCH64_R_STR (TLSLD_ADD_DTPREL_LO12), /* name */
1041 FALSE, /* partial_inplace */
1042 0xfff, /* src_mask */
1043 0xfff, /* dst_mask */
1044 FALSE), /* pcrel_offset */
1046 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
1047 HOWTO (AARCH64_R (TLSLD_ADD_LO12_NC), /* type */
1049 2, /* size (0 = byte, 1 = short, 2 = long) */
1051 FALSE, /* pc_relative */
1053 complain_overflow_dont, /* complain_on_overflow */
1054 bfd_elf_generic_reloc, /* special_function */
1055 AARCH64_R_STR (TLSLD_ADD_LO12_NC), /* name */
1056 FALSE, /* partial_inplace */
1057 0xfff, /* src_mask */
1058 0xfff, /* dst_mask */
1059 FALSE), /* pcrel_offset */
1061 /* Get to the page for the GOT entry for the symbol
1062 (G(S) - P) using an ADRP instruction. */
1063 HOWTO (AARCH64_R (TLSLD_ADR_PAGE21), /* type */
1064 12, /* rightshift */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 TRUE, /* pc_relative */
1069 complain_overflow_signed, /* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 AARCH64_R_STR (TLSLD_ADR_PAGE21), /* name */
1072 FALSE, /* partial_inplace */
1073 0x1fffff, /* src_mask */
1074 0x1fffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1077 HOWTO (AARCH64_R (TLSLD_ADR_PREL21), /* type */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 TRUE, /* pc_relative */
1083 complain_overflow_signed, /* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 AARCH64_R_STR (TLSLD_ADR_PREL21), /* name */
1086 FALSE, /* partial_inplace */
1087 0x1fffff, /* src_mask */
1088 0x1fffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1091 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
1092 32, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 FALSE, /* pc_relative */
1097 complain_overflow_unsigned, /* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
1100 FALSE, /* partial_inplace */
1101 0xffff, /* src_mask */
1102 0xffff, /* dst_mask */
1103 FALSE), /* pcrel_offset */
1105 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
1106 16, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 FALSE, /* pc_relative */
1111 complain_overflow_dont, /* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1114 FALSE, /* partial_inplace */
1115 0xffff, /* src_mask */
1116 0xffff, /* dst_mask */
1117 FALSE), /* pcrel_offset */
1119 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1120 16, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 FALSE, /* pc_relative */
1125 complain_overflow_dont, /* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1128 FALSE, /* partial_inplace */
1129 0xffff, /* src_mask */
1130 0xffff, /* dst_mask */
1131 FALSE), /* pcrel_offset */
1133 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 FALSE, /* pc_relative */
1139 complain_overflow_dont, /* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1142 FALSE, /* partial_inplace */
1143 0xffff, /* src_mask */
1144 0xffff, /* dst_mask */
1145 FALSE), /* pcrel_offset */
1147 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 FALSE, /* pc_relative */
1153 complain_overflow_dont, /* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1156 FALSE, /* partial_inplace */
1157 0xffff, /* src_mask */
1158 0xffff, /* dst_mask */
1159 FALSE), /* pcrel_offset */
1161 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1162 12, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 FALSE, /* pc_relative */
1167 complain_overflow_unsigned, /* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1170 FALSE, /* partial_inplace */
1171 0xfff, /* src_mask */
1172 0xfff, /* dst_mask */
1173 FALSE), /* pcrel_offset */
1175 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 FALSE, /* pc_relative */
1181 complain_overflow_unsigned, /* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1184 FALSE, /* partial_inplace */
1185 0xfff, /* src_mask */
1186 0xfff, /* dst_mask */
1187 FALSE), /* pcrel_offset */
1189 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 FALSE, /* pc_relative */
1195 complain_overflow_dont, /* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1198 FALSE, /* partial_inplace */
1199 0xfff, /* src_mask */
1200 0xfff, /* dst_mask */
1201 FALSE), /* pcrel_offset */
1203 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 TRUE, /* pc_relative */
1209 complain_overflow_dont, /* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1212 FALSE, /* partial_inplace */
1213 0x0ffffe0, /* src_mask */
1214 0x0ffffe0, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1217 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 TRUE, /* pc_relative */
1223 complain_overflow_dont, /* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1226 FALSE, /* partial_inplace */
1227 0x1fffff, /* src_mask */
1228 0x1fffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1231 /* Get to the page for the GOT entry for the symbol
1232 (G(S) - P) using an ADRP instruction. */
1233 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1234 12, /* rightshift */
1235 2, /* size (0 = byte, 1 = short, 2 = long) */
1237 TRUE, /* pc_relative */
1239 complain_overflow_dont, /* complain_on_overflow */
1240 bfd_elf_generic_reloc, /* special_function */
1241 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1242 FALSE, /* partial_inplace */
1243 0x1fffff, /* src_mask */
1244 0x1fffff, /* dst_mask */
1245 TRUE), /* pcrel_offset */
1247 /* LD64: GOT offset G(S) & 0xff8. */
1248 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1250 2, /* size (0 = byte, 1 = short, 2 = long) */
1252 FALSE, /* pc_relative */
1254 complain_overflow_dont, /* complain_on_overflow */
1255 bfd_elf_generic_reloc, /* special_function */
1256 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1257 FALSE, /* partial_inplace */
1258 0xff8, /* src_mask */
1259 0xff8, /* dst_mask */
1260 FALSE), /* pcrel_offset */
1262 /* LD32: GOT offset G(S) & 0xffc. */
1263 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1265 2, /* size (0 = byte, 1 = short, 2 = long) */
1267 FALSE, /* pc_relative */
1269 complain_overflow_dont, /* complain_on_overflow */
1270 bfd_elf_generic_reloc, /* special_function */
1271 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1272 FALSE, /* partial_inplace */
1273 0xffc, /* src_mask */
1274 0xffc, /* dst_mask */
1275 FALSE), /* pcrel_offset */
1277 /* ADD: GOT offset G(S) & 0xfff. */
1278 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1280 2, /* size (0 = byte, 1 = short, 2 = long) */
1282 FALSE, /* pc_relative */
1284 complain_overflow_dont, /* complain_on_overflow */
1285 bfd_elf_generic_reloc, /* special_function */
1286 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1287 FALSE, /* partial_inplace */
1288 0xfff, /* src_mask */
1289 0xfff, /* dst_mask */
1290 FALSE), /* pcrel_offset */
1292 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1293 16, /* rightshift */
1294 2, /* size (0 = byte, 1 = short, 2 = long) */
1296 FALSE, /* pc_relative */
1298 complain_overflow_dont, /* complain_on_overflow */
1299 bfd_elf_generic_reloc, /* special_function */
1300 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1301 FALSE, /* partial_inplace */
1302 0xffff, /* src_mask */
1303 0xffff, /* dst_mask */
1304 FALSE), /* pcrel_offset */
1306 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1308 2, /* size (0 = byte, 1 = short, 2 = long) */
1310 FALSE, /* pc_relative */
1312 complain_overflow_dont, /* complain_on_overflow */
1313 bfd_elf_generic_reloc, /* special_function */
1314 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1315 FALSE, /* partial_inplace */
1316 0xffff, /* src_mask */
1317 0xffff, /* dst_mask */
1318 FALSE), /* pcrel_offset */
1320 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1322 2, /* size (0 = byte, 1 = short, 2 = long) */
1324 FALSE, /* pc_relative */
1326 complain_overflow_dont, /* complain_on_overflow */
1327 bfd_elf_generic_reloc, /* special_function */
1328 AARCH64_R_STR (TLSDESC_LDR), /* name */
1329 FALSE, /* partial_inplace */
1332 FALSE), /* pcrel_offset */
1334 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1336 2, /* size (0 = byte, 1 = short, 2 = long) */
1338 FALSE, /* pc_relative */
1340 complain_overflow_dont, /* complain_on_overflow */
1341 bfd_elf_generic_reloc, /* special_function */
1342 AARCH64_R_STR (TLSDESC_ADD), /* name */
1343 FALSE, /* partial_inplace */
1346 FALSE), /* pcrel_offset */
1348 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1350 2, /* size (0 = byte, 1 = short, 2 = long) */
1352 FALSE, /* pc_relative */
1354 complain_overflow_dont, /* complain_on_overflow */
1355 bfd_elf_generic_reloc, /* special_function */
1356 AARCH64_R_STR (TLSDESC_CALL), /* name */
1357 FALSE, /* partial_inplace */
1360 FALSE), /* pcrel_offset */
1362 HOWTO (AARCH64_R (COPY), /* type */
1364 2, /* size (0 = byte, 1 = short, 2 = long) */
1366 FALSE, /* pc_relative */
1368 complain_overflow_bitfield, /* complain_on_overflow */
1369 bfd_elf_generic_reloc, /* special_function */
1370 AARCH64_R_STR (COPY), /* name */
1371 TRUE, /* partial_inplace */
1372 0xffffffff, /* src_mask */
1373 0xffffffff, /* dst_mask */
1374 FALSE), /* pcrel_offset */
1376 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1378 2, /* size (0 = byte, 1 = short, 2 = long) */
1380 FALSE, /* pc_relative */
1382 complain_overflow_bitfield, /* complain_on_overflow */
1383 bfd_elf_generic_reloc, /* special_function */
1384 AARCH64_R_STR (GLOB_DAT), /* name */
1385 TRUE, /* partial_inplace */
1386 0xffffffff, /* src_mask */
1387 0xffffffff, /* dst_mask */
1388 FALSE), /* pcrel_offset */
1390 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1392 2, /* size (0 = byte, 1 = short, 2 = long) */
1394 FALSE, /* pc_relative */
1396 complain_overflow_bitfield, /* complain_on_overflow */
1397 bfd_elf_generic_reloc, /* special_function */
1398 AARCH64_R_STR (JUMP_SLOT), /* name */
1399 TRUE, /* partial_inplace */
1400 0xffffffff, /* src_mask */
1401 0xffffffff, /* dst_mask */
1402 FALSE), /* pcrel_offset */
1404 HOWTO (AARCH64_R (RELATIVE), /* type */
1406 2, /* size (0 = byte, 1 = short, 2 = long) */
1408 FALSE, /* pc_relative */
1410 complain_overflow_bitfield, /* complain_on_overflow */
1411 bfd_elf_generic_reloc, /* special_function */
1412 AARCH64_R_STR (RELATIVE), /* name */
1413 TRUE, /* partial_inplace */
1414 ALL_ONES, /* src_mask */
1415 ALL_ONES, /* dst_mask */
1416 FALSE), /* pcrel_offset */
1418 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1420 2, /* size (0 = byte, 1 = short, 2 = long) */
1422 FALSE, /* pc_relative */
1424 complain_overflow_dont, /* complain_on_overflow */
1425 bfd_elf_generic_reloc, /* special_function */
1427 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1429 AARCH64_R_STR (TLS_DTPMOD), /* name */
1431 FALSE, /* partial_inplace */
1433 ALL_ONES, /* dst_mask */
1434 FALSE), /* pc_reloffset */
1436 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1438 2, /* size (0 = byte, 1 = short, 2 = long) */
1440 FALSE, /* pc_relative */
1442 complain_overflow_dont, /* complain_on_overflow */
1443 bfd_elf_generic_reloc, /* special_function */
1445 AARCH64_R_STR (TLS_DTPREL64), /* name */
1447 AARCH64_R_STR (TLS_DTPREL), /* name */
1449 FALSE, /* partial_inplace */
1451 ALL_ONES, /* dst_mask */
1452 FALSE), /* pcrel_offset */
1454 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1456 2, /* size (0 = byte, 1 = short, 2 = long) */
1458 FALSE, /* pc_relative */
1460 complain_overflow_dont, /* complain_on_overflow */
1461 bfd_elf_generic_reloc, /* special_function */
1463 AARCH64_R_STR (TLS_TPREL64), /* name */
1465 AARCH64_R_STR (TLS_TPREL), /* name */
1467 FALSE, /* partial_inplace */
1469 ALL_ONES, /* dst_mask */
1470 FALSE), /* pcrel_offset */
1472 HOWTO (AARCH64_R (TLSDESC), /* type */
1474 2, /* size (0 = byte, 1 = short, 2 = long) */
1476 FALSE, /* pc_relative */
1478 complain_overflow_dont, /* complain_on_overflow */
1479 bfd_elf_generic_reloc, /* special_function */
1480 AARCH64_R_STR (TLSDESC), /* name */
1481 FALSE, /* partial_inplace */
1483 ALL_ONES, /* dst_mask */
1484 FALSE), /* pcrel_offset */
1486 HOWTO (AARCH64_R (IRELATIVE), /* type */
1488 2, /* size (0 = byte, 1 = short, 2 = long) */
1490 FALSE, /* pc_relative */
1492 complain_overflow_bitfield, /* complain_on_overflow */
1493 bfd_elf_generic_reloc, /* special_function */
1494 AARCH64_R_STR (IRELATIVE), /* name */
1495 FALSE, /* partial_inplace */
1497 ALL_ONES, /* dst_mask */
1498 FALSE), /* pcrel_offset */
1503 static reloc_howto_type elfNN_aarch64_howto_none =
1504 HOWTO (R_AARCH64_NONE, /* type */
1506 3, /* size (0 = byte, 1 = short, 2 = long) */
1508 FALSE, /* pc_relative */
1510 complain_overflow_dont,/* complain_on_overflow */
1511 bfd_elf_generic_reloc, /* special_function */
1512 "R_AARCH64_NONE", /* name */
1513 FALSE, /* partial_inplace */
1516 FALSE); /* pcrel_offset */
1518 /* Given HOWTO, return the bfd internal relocation enumerator. */
1520 static bfd_reloc_code_real_type
1521 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1524 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1525 const ptrdiff_t offset
1526 = howto - elfNN_aarch64_howto_table;
1528 if (offset > 0 && offset < size - 1)
1529 return BFD_RELOC_AARCH64_RELOC_START + offset;
1531 if (howto == &elfNN_aarch64_howto_none)
1532 return BFD_RELOC_AARCH64_NONE;
1534 return BFD_RELOC_AARCH64_RELOC_START;
1537 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1539 static bfd_reloc_code_real_type
1540 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1542 static bfd_boolean initialized_p = FALSE;
1543 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1544 static unsigned int offsets[R_AARCH64_end];
1546 if (initialized_p == FALSE)
1550 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1551 if (elfNN_aarch64_howto_table[i].type != 0)
1552 offsets[elfNN_aarch64_howto_table[i].type] = i;
1554 initialized_p = TRUE;
1557 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1558 return BFD_RELOC_AARCH64_NONE;
1560 /* PR 17512: file: b371e70a. */
1561 if (r_type >= R_AARCH64_end)
1563 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1564 bfd_set_error (bfd_error_bad_value);
1565 return BFD_RELOC_AARCH64_NONE;
1568 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1571 struct elf_aarch64_reloc_map
1573 bfd_reloc_code_real_type from;
1574 bfd_reloc_code_real_type to;
1577 /* Map bfd generic reloc to AArch64-specific reloc. */
1578 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1580 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1582 /* Basic data relocations. */
1583 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1584 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1585 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1586 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1587 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1588 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1589 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1592 /* Given the bfd internal relocation enumerator in CODE, return the
1593 corresponding howto entry. */
1595 static reloc_howto_type *
1596 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1600 /* Convert bfd generic reloc to AArch64-specific reloc. */
1601 if (code < BFD_RELOC_AARCH64_RELOC_START
1602 || code > BFD_RELOC_AARCH64_RELOC_END)
1603 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1604 if (elf_aarch64_reloc_map[i].from == code)
1606 code = elf_aarch64_reloc_map[i].to;
1610 if (code > BFD_RELOC_AARCH64_RELOC_START
1611 && code < BFD_RELOC_AARCH64_RELOC_END)
1612 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1613 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1615 if (code == BFD_RELOC_AARCH64_NONE)
1616 return &elfNN_aarch64_howto_none;
1621 static reloc_howto_type *
1622 elfNN_aarch64_howto_from_type (unsigned int r_type)
1624 bfd_reloc_code_real_type val;
1625 reloc_howto_type *howto;
1630 bfd_set_error (bfd_error_bad_value);
1635 if (r_type == R_AARCH64_NONE)
1636 return &elfNN_aarch64_howto_none;
1638 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1639 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1644 bfd_set_error (bfd_error_bad_value);
1649 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1650 Elf_Internal_Rela *elf_reloc)
1652 unsigned int r_type;
1654 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1655 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1658 static reloc_howto_type *
1659 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1660 bfd_reloc_code_real_type code)
1662 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1667 bfd_set_error (bfd_error_bad_value);
1671 static reloc_howto_type *
1672 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1677 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1678 if (elfNN_aarch64_howto_table[i].name != NULL
1679 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1680 return &elfNN_aarch64_howto_table[i];
1685 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1686 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1687 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1688 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1690 /* The linker script knows the section names for placement.
1691 The entry_names are used to do simple name mangling on the stubs.
1692 Given a function name, and its type, the stub can be found. The
1693 name can be changed. The only requirement is the %s be present. */
1694 #define STUB_ENTRY_NAME "__%s_veneer"
1696 /* The name of the dynamic interpreter. This is put in the .interp
1698 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1700 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1701 (((1 << 25) - 1) << 2)
1702 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1705 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1706 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1709 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1711 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1712 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1716 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1718 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1719 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1720 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1723 static const uint32_t aarch64_adrp_branch_stub [] =
1725 0x90000010, /* adrp ip0, X */
1726 /* R_AARCH64_ADR_HI21_PCREL(X) */
1727 0x91000210, /* add ip0, ip0, :lo12:X */
1728 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1729 0xd61f0200, /* br ip0 */
1732 static const uint32_t aarch64_long_branch_stub[] =
1735 0x58000090, /* ldr ip0, 1f */
1737 0x18000090, /* ldr wip0, 1f */
1739 0x10000011, /* adr ip1, #0 */
1740 0x8b110210, /* add ip0, ip0, ip1 */
1741 0xd61f0200, /* br ip0 */
1742 0x00000000, /* 1: .xword or .word
1743 R_AARCH64_PRELNN(X) + 12
1748 static const uint32_t aarch64_erratum_835769_stub[] =
1750 0x00000000, /* Placeholder for multiply accumulate. */
1751 0x14000000, /* b <label> */
1754 static const uint32_t aarch64_erratum_843419_stub[] =
1756 0x00000000, /* Placeholder for LDR instruction. */
1757 0x14000000, /* b <label> */
1760 /* Section name for stubs is the associated section name plus this
1762 #define STUB_SUFFIX ".stub"
1764 enum elf_aarch64_stub_type
1767 aarch64_stub_adrp_branch,
1768 aarch64_stub_long_branch,
1769 aarch64_stub_erratum_835769_veneer,
1770 aarch64_stub_erratum_843419_veneer,
1773 struct elf_aarch64_stub_hash_entry
1775 /* Base hash table entry structure. */
1776 struct bfd_hash_entry root;
1778 /* The stub section. */
1781 /* Offset within stub_sec of the beginning of this stub. */
1782 bfd_vma stub_offset;
1784 /* Given the symbol's value and its section we can determine its final
1785 value when building the stubs (so the stub knows where to jump). */
1786 bfd_vma target_value;
1787 asection *target_section;
1789 enum elf_aarch64_stub_type stub_type;
1791 /* The symbol table entry, if any, that this was derived from. */
1792 struct elf_aarch64_link_hash_entry *h;
1794 /* Destination symbol type */
1795 unsigned char st_type;
1797 /* Where this stub is being called from, or, in the case of combined
1798 stub sections, the first input section in the group. */
1801 /* The name for the local symbol at the start of this stub. The
1802 stub name in the hash table has to be unique; this does not, so
1803 it can be friendlier. */
1806 /* The instruction which caused this stub to be generated (only valid for
1807 erratum 835769 workaround stubs at present). */
1808 uint32_t veneered_insn;
1810 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
1811 bfd_vma adrp_offset;
1814 /* Used to build a map of a section. This is required for mixed-endian
1817 typedef struct elf_elf_section_map
1822 elf_aarch64_section_map;
1825 typedef struct _aarch64_elf_section_data
1827 struct bfd_elf_section_data elf;
1828 unsigned int mapcount;
1829 unsigned int mapsize;
1830 elf_aarch64_section_map *map;
1832 _aarch64_elf_section_data;
1834 #define elf_aarch64_section_data(sec) \
1835 ((_aarch64_elf_section_data *) elf_section_data (sec))
1837 /* The size of the thread control block which is defined to be two pointers. */
1838 #define TCB_SIZE (ARCH_SIZE/8)*2
1840 struct elf_aarch64_local_symbol
1842 unsigned int got_type;
1843 bfd_signed_vma got_refcount;
1846 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1847 offset is from the end of the jump table and reserved entries
1850 The magic value (bfd_vma) -1 indicates that an offset has not be
1852 bfd_vma tlsdesc_got_jump_table_offset;
1855 struct elf_aarch64_obj_tdata
1857 struct elf_obj_tdata root;
1859 /* local symbol descriptors */
1860 struct elf_aarch64_local_symbol *locals;
1862 /* Zero to warn when linking objects with incompatible enum sizes. */
1863 int no_enum_size_warning;
1865 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1866 int no_wchar_size_warning;
1869 #define elf_aarch64_tdata(bfd) \
1870 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1872 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1874 #define is_aarch64_elf(bfd) \
1875 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1876 && elf_tdata (bfd) != NULL \
1877 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1880 elfNN_aarch64_mkobject (bfd *abfd)
1882 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1886 #define elf_aarch64_hash_entry(ent) \
1887 ((struct elf_aarch64_link_hash_entry *)(ent))
1889 #define GOT_UNKNOWN 0
1890 #define GOT_NORMAL 1
1891 #define GOT_TLS_GD 2
1892 #define GOT_TLS_IE 4
1893 #define GOT_TLSDESC_GD 8
1895 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1897 /* AArch64 ELF linker hash entry. */
1898 struct elf_aarch64_link_hash_entry
1900 struct elf_link_hash_entry root;
1902 /* Track dynamic relocs copied for this symbol. */
1903 struct elf_dyn_relocs *dyn_relocs;
1905 /* Since PLT entries have variable size, we need to record the
1906 index into .got.plt instead of recomputing it from the PLT
1908 bfd_signed_vma plt_got_offset;
1910 /* Bit mask representing the type of GOT entry(s) if any required by
1912 unsigned int got_type;
1914 /* A pointer to the most recently used stub hash entry against this
1916 struct elf_aarch64_stub_hash_entry *stub_cache;
1918 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1919 is from the end of the jump table and reserved entries within the PLTGOT.
1921 The magic value (bfd_vma) -1 indicates that an offset has not
1923 bfd_vma tlsdesc_got_jump_table_offset;
1927 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1929 unsigned long r_symndx)
1932 return elf_aarch64_hash_entry (h)->got_type;
1934 if (! elf_aarch64_locals (abfd))
1937 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1940 /* Get the AArch64 elf linker hash table from a link_info structure. */
1941 #define elf_aarch64_hash_table(info) \
1942 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1944 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1945 ((struct elf_aarch64_stub_hash_entry *) \
1946 bfd_hash_lookup ((table), (string), (create), (copy)))
1948 /* AArch64 ELF linker hash table. */
1949 struct elf_aarch64_link_hash_table
1951 /* The main hash table. */
1952 struct elf_link_hash_table root;
1954 /* Nonzero to force PIC branch veneers. */
1957 /* Fix erratum 835769. */
1958 int fix_erratum_835769;
1960 /* Fix erratum 843419. */
1961 int fix_erratum_843419;
1963 /* Enable ADRP->ADR rewrite for erratum 843419 workaround. */
1964 int fix_erratum_843419_adr;
1966 /* The number of bytes in the initial entry in the PLT. */
1967 bfd_size_type plt_header_size;
1969 /* The number of bytes in the subsequent PLT etries. */
1970 bfd_size_type plt_entry_size;
1972 /* Short-cuts to get to dynamic linker sections. */
1976 /* Small local sym cache. */
1977 struct sym_cache sym_cache;
1979 /* For convenience in allocate_dynrelocs. */
1982 /* The amount of space used by the reserved portion of the sgotplt
1983 section, plus whatever space is used by the jump slots. */
1984 bfd_vma sgotplt_jump_table_size;
1986 /* The stub hash table. */
1987 struct bfd_hash_table stub_hash_table;
1989 /* Linker stub bfd. */
1992 /* Linker call-backs. */
1993 asection *(*add_stub_section) (const char *, asection *);
1994 void (*layout_sections_again) (void);
1996 /* Array to keep track of which stub sections have been created, and
1997 information on stub grouping. */
2000 /* This is the section to which stubs in the group will be
2003 /* The stub section. */
2007 /* Assorted information used by elfNN_aarch64_size_stubs. */
2008 unsigned int bfd_count;
2010 asection **input_list;
2012 /* The offset into splt of the PLT entry for the TLS descriptor
2013 resolver. Special values are 0, if not necessary (or not found
2014 to be necessary yet), and -1 if needed but not determined
2016 bfd_vma tlsdesc_plt;
2018 /* The GOT offset for the lazy trampoline. Communicated to the
2019 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
2020 indicates an offset is not allocated. */
2021 bfd_vma dt_tlsdesc_got;
2023 /* Used by local STT_GNU_IFUNC symbols. */
2024 htab_t loc_hash_table;
2025 void * loc_hash_memory;
2028 /* Create an entry in an AArch64 ELF linker hash table. */
2030 static struct bfd_hash_entry *
2031 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
2032 struct bfd_hash_table *table,
2035 struct elf_aarch64_link_hash_entry *ret =
2036 (struct elf_aarch64_link_hash_entry *) entry;
2038 /* Allocate the structure if it has not already been allocated by a
2041 ret = bfd_hash_allocate (table,
2042 sizeof (struct elf_aarch64_link_hash_entry));
2044 return (struct bfd_hash_entry *) ret;
2046 /* Call the allocation method of the superclass. */
2047 ret = ((struct elf_aarch64_link_hash_entry *)
2048 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2052 ret->dyn_relocs = NULL;
2053 ret->got_type = GOT_UNKNOWN;
2054 ret->plt_got_offset = (bfd_vma) - 1;
2055 ret->stub_cache = NULL;
2056 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
2059 return (struct bfd_hash_entry *) ret;
2062 /* Initialize an entry in the stub hash table. */
2064 static struct bfd_hash_entry *
2065 stub_hash_newfunc (struct bfd_hash_entry *entry,
2066 struct bfd_hash_table *table, const char *string)
2068 /* Allocate the structure if it has not already been allocated by a
2072 entry = bfd_hash_allocate (table,
2074 elf_aarch64_stub_hash_entry));
2079 /* Call the allocation method of the superclass. */
2080 entry = bfd_hash_newfunc (entry, table, string);
2083 struct elf_aarch64_stub_hash_entry *eh;
2085 /* Initialize the local fields. */
2086 eh = (struct elf_aarch64_stub_hash_entry *) entry;
2087 eh->adrp_offset = 0;
2088 eh->stub_sec = NULL;
2089 eh->stub_offset = 0;
2090 eh->target_value = 0;
2091 eh->target_section = NULL;
2092 eh->stub_type = aarch64_stub_none;
2100 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
2101 for local symbol so that we can handle local STT_GNU_IFUNC symbols
2102 as global symbol. We reuse indx and dynstr_index for local symbol
2103 hash since they aren't used by global symbols in this backend. */
2106 elfNN_aarch64_local_htab_hash (const void *ptr)
2108 struct elf_link_hash_entry *h
2109 = (struct elf_link_hash_entry *) ptr;
2110 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2113 /* Compare local hash entries. */
2116 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2118 struct elf_link_hash_entry *h1
2119 = (struct elf_link_hash_entry *) ptr1;
2120 struct elf_link_hash_entry *h2
2121 = (struct elf_link_hash_entry *) ptr2;
2123 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2126 /* Find and/or create a hash entry for local symbol. */
2128 static struct elf_link_hash_entry *
2129 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2130 bfd *abfd, const Elf_Internal_Rela *rel,
2133 struct elf_aarch64_link_hash_entry e, *ret;
2134 asection *sec = abfd->sections;
2135 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2136 ELFNN_R_SYM (rel->r_info));
2139 e.root.indx = sec->id;
2140 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2141 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2142 create ? INSERT : NO_INSERT);
2149 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2153 ret = (struct elf_aarch64_link_hash_entry *)
2154 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2155 sizeof (struct elf_aarch64_link_hash_entry));
2158 memset (ret, 0, sizeof (*ret));
2159 ret->root.indx = sec->id;
2160 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2161 ret->root.dynindx = -1;
2167 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2170 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2171 struct elf_link_hash_entry *dir,
2172 struct elf_link_hash_entry *ind)
2174 struct elf_aarch64_link_hash_entry *edir, *eind;
2176 edir = (struct elf_aarch64_link_hash_entry *) dir;
2177 eind = (struct elf_aarch64_link_hash_entry *) ind;
2179 if (eind->dyn_relocs != NULL)
2181 if (edir->dyn_relocs != NULL)
2183 struct elf_dyn_relocs **pp;
2184 struct elf_dyn_relocs *p;
2186 /* Add reloc counts against the indirect sym to the direct sym
2187 list. Merge any entries against the same section. */
2188 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2190 struct elf_dyn_relocs *q;
2192 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2193 if (q->sec == p->sec)
2195 q->pc_count += p->pc_count;
2196 q->count += p->count;
2203 *pp = edir->dyn_relocs;
2206 edir->dyn_relocs = eind->dyn_relocs;
2207 eind->dyn_relocs = NULL;
2210 if (ind->root.type == bfd_link_hash_indirect)
2212 /* Copy over PLT info. */
2213 if (dir->got.refcount <= 0)
2215 edir->got_type = eind->got_type;
2216 eind->got_type = GOT_UNKNOWN;
2220 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2223 /* Destroy an AArch64 elf linker hash table. */
2226 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2228 struct elf_aarch64_link_hash_table *ret
2229 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2231 if (ret->loc_hash_table)
2232 htab_delete (ret->loc_hash_table);
2233 if (ret->loc_hash_memory)
2234 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2236 bfd_hash_table_free (&ret->stub_hash_table);
2237 _bfd_elf_link_hash_table_free (obfd);
2240 /* Create an AArch64 elf linker hash table. */
2242 static struct bfd_link_hash_table *
2243 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2245 struct elf_aarch64_link_hash_table *ret;
2246 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2248 ret = bfd_zmalloc (amt);
2252 if (!_bfd_elf_link_hash_table_init
2253 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2254 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2260 ret->plt_header_size = PLT_ENTRY_SIZE;
2261 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2263 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2265 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2266 sizeof (struct elf_aarch64_stub_hash_entry)))
2268 _bfd_elf_link_hash_table_free (abfd);
2272 ret->loc_hash_table = htab_try_create (1024,
2273 elfNN_aarch64_local_htab_hash,
2274 elfNN_aarch64_local_htab_eq,
2276 ret->loc_hash_memory = objalloc_create ();
2277 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2279 elfNN_aarch64_link_hash_table_free (abfd);
2282 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2284 return &ret->root.root;
2288 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2289 bfd_vma offset, bfd_vma value)
2291 reloc_howto_type *howto;
2294 howto = elfNN_aarch64_howto_from_type (r_type);
2295 place = (input_section->output_section->vma + input_section->output_offset
2298 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2299 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2300 return _bfd_aarch64_elf_put_addend (input_bfd,
2301 input_section->contents + offset, r_type,
2305 static enum elf_aarch64_stub_type
2306 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2308 if (aarch64_valid_for_adrp_p (value, place))
2309 return aarch64_stub_adrp_branch;
2310 return aarch64_stub_long_branch;
2313 /* Determine the type of stub needed, if any, for a call. */
2315 static enum elf_aarch64_stub_type
2316 aarch64_type_of_stub (struct bfd_link_info *info,
2317 asection *input_sec,
2318 const Elf_Internal_Rela *rel,
2319 unsigned char st_type,
2320 struct elf_aarch64_link_hash_entry *hash,
2321 bfd_vma destination)
2324 bfd_signed_vma branch_offset;
2325 unsigned int r_type;
2326 struct elf_aarch64_link_hash_table *globals;
2327 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2328 bfd_boolean via_plt_p;
2330 if (st_type != STT_FUNC)
2333 globals = elf_aarch64_hash_table (info);
2334 via_plt_p = (globals->root.splt != NULL && hash != NULL
2335 && hash->root.plt.offset != (bfd_vma) - 1);
2340 /* Determine where the call point is. */
2341 location = (input_sec->output_offset
2342 + input_sec->output_section->vma + rel->r_offset);
2344 branch_offset = (bfd_signed_vma) (destination - location);
2346 r_type = ELFNN_R_TYPE (rel->r_info);
2348 /* We don't want to redirect any old unconditional jump in this way,
2349 only one which is being used for a sibcall, where it is
2350 acceptable for the IP0 and IP1 registers to be clobbered. */
2351 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2352 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2353 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2355 stub_type = aarch64_stub_long_branch;
2361 /* Build a name for an entry in the stub hash table. */
2364 elfNN_aarch64_stub_name (const asection *input_section,
2365 const asection *sym_sec,
2366 const struct elf_aarch64_link_hash_entry *hash,
2367 const Elf_Internal_Rela *rel)
2374 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2375 stub_name = bfd_malloc (len);
2376 if (stub_name != NULL)
2377 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2378 (unsigned int) input_section->id,
2379 hash->root.root.root.string,
2384 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2385 stub_name = bfd_malloc (len);
2386 if (stub_name != NULL)
2387 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2388 (unsigned int) input_section->id,
2389 (unsigned int) sym_sec->id,
2390 (unsigned int) ELFNN_R_SYM (rel->r_info),
2397 /* Look up an entry in the stub hash. Stub entries are cached because
2398 creating the stub name takes a bit of time. */
2400 static struct elf_aarch64_stub_hash_entry *
2401 elfNN_aarch64_get_stub_entry (const asection *input_section,
2402 const asection *sym_sec,
2403 struct elf_link_hash_entry *hash,
2404 const Elf_Internal_Rela *rel,
2405 struct elf_aarch64_link_hash_table *htab)
2407 struct elf_aarch64_stub_hash_entry *stub_entry;
2408 struct elf_aarch64_link_hash_entry *h =
2409 (struct elf_aarch64_link_hash_entry *) hash;
2410 const asection *id_sec;
2412 if ((input_section->flags & SEC_CODE) == 0)
2415 /* If this input section is part of a group of sections sharing one
2416 stub section, then use the id of the first section in the group.
2417 Stub names need to include a section id, as there may well be
2418 more than one stub used to reach say, printf, and we need to
2419 distinguish between them. */
2420 id_sec = htab->stub_group[input_section->id].link_sec;
2422 if (h != NULL && h->stub_cache != NULL
2423 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2425 stub_entry = h->stub_cache;
2431 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2432 if (stub_name == NULL)
2435 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2436 stub_name, FALSE, FALSE);
2438 h->stub_cache = stub_entry;
2447 /* Create a stub section. */
2450 _bfd_aarch64_create_stub_section (asection *section,
2451 struct elf_aarch64_link_hash_table *htab)
2457 namelen = strlen (section->name);
2458 len = namelen + sizeof (STUB_SUFFIX);
2459 s_name = bfd_alloc (htab->stub_bfd, len);
2463 memcpy (s_name, section->name, namelen);
2464 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2465 return (*htab->add_stub_section) (s_name, section);
2469 /* Find or create a stub section for a link section.
2471 Fix or create the stub section used to collect stubs attached to
2472 the specified link section. */
2475 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
2476 struct elf_aarch64_link_hash_table *htab)
2478 if (htab->stub_group[link_section->id].stub_sec == NULL)
2479 htab->stub_group[link_section->id].stub_sec
2480 = _bfd_aarch64_create_stub_section (link_section, htab);
2481 return htab->stub_group[link_section->id].stub_sec;
2485 /* Find or create a stub section in the stub group for an input
2489 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2490 struct elf_aarch64_link_hash_table *htab)
2492 asection *link_sec = htab->stub_group[section->id].link_sec;
2493 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
2497 /* Add a new stub entry in the stub group associated with an input
2498 section to the stub hash. Not all fields of the new stub entry are
2501 static struct elf_aarch64_stub_hash_entry *
2502 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2504 struct elf_aarch64_link_hash_table *htab)
2508 struct elf_aarch64_stub_hash_entry *stub_entry;
2510 link_sec = htab->stub_group[section->id].link_sec;
2511 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2513 /* Enter this entry into the linker stub hash table. */
2514 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2516 if (stub_entry == NULL)
2518 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2519 section->owner, stub_name);
2523 stub_entry->stub_sec = stub_sec;
2524 stub_entry->stub_offset = 0;
2525 stub_entry->id_sec = link_sec;
2530 /* Add a new stub entry in the final stub section to the stub hash.
2531 Not all fields of the new stub entry are initialised. */
2533 static struct elf_aarch64_stub_hash_entry *
2534 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
2535 asection *link_section,
2536 struct elf_aarch64_link_hash_table *htab)
2539 struct elf_aarch64_stub_hash_entry *stub_entry;
2541 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
2542 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2544 if (stub_entry == NULL)
2546 (*_bfd_error_handler) (_("cannot create stub entry %s"), stub_name);
2550 stub_entry->stub_sec = stub_sec;
2551 stub_entry->stub_offset = 0;
2552 stub_entry->id_sec = link_section;
2559 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2560 void *in_arg ATTRIBUTE_UNUSED)
2562 struct elf_aarch64_stub_hash_entry *stub_entry;
2567 bfd_vma veneered_insn_loc;
2568 bfd_vma veneer_entry_loc;
2569 bfd_signed_vma branch_offset = 0;
2570 unsigned int template_size;
2571 const uint32_t *template;
2574 /* Massage our args to the form they really have. */
2575 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2577 stub_sec = stub_entry->stub_sec;
2579 /* Make a note of the offset within the stubs for this entry. */
2580 stub_entry->stub_offset = stub_sec->size;
2581 loc = stub_sec->contents + stub_entry->stub_offset;
2583 stub_bfd = stub_sec->owner;
2585 /* This is the address of the stub destination. */
2586 sym_value = (stub_entry->target_value
2587 + stub_entry->target_section->output_offset
2588 + stub_entry->target_section->output_section->vma);
2590 if (stub_entry->stub_type == aarch64_stub_long_branch)
2592 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2593 + stub_sec->output_offset);
2595 /* See if we can relax the stub. */
2596 if (aarch64_valid_for_adrp_p (sym_value, place))
2597 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2600 switch (stub_entry->stub_type)
2602 case aarch64_stub_adrp_branch:
2603 template = aarch64_adrp_branch_stub;
2604 template_size = sizeof (aarch64_adrp_branch_stub);
2606 case aarch64_stub_long_branch:
2607 template = aarch64_long_branch_stub;
2608 template_size = sizeof (aarch64_long_branch_stub);
2610 case aarch64_stub_erratum_835769_veneer:
2611 template = aarch64_erratum_835769_stub;
2612 template_size = sizeof (aarch64_erratum_835769_stub);
2614 case aarch64_stub_erratum_843419_veneer:
2615 template = aarch64_erratum_843419_stub;
2616 template_size = sizeof (aarch64_erratum_843419_stub);
2622 for (i = 0; i < (template_size / sizeof template[0]); i++)
2624 bfd_putl32 (template[i], loc);
2628 template_size = (template_size + 7) & ~7;
2629 stub_sec->size += template_size;
2631 switch (stub_entry->stub_type)
2633 case aarch64_stub_adrp_branch:
2634 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2635 stub_entry->stub_offset, sym_value))
2636 /* The stub would not have been relaxed if the offset was out
2640 if (aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
2641 stub_entry->stub_offset + 4, sym_value))
2645 case aarch64_stub_long_branch:
2646 /* We want the value relative to the address 12 bytes back from the
2648 if (aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
2649 stub_entry->stub_offset + 16, sym_value + 12))
2653 case aarch64_stub_erratum_835769_veneer:
2654 veneered_insn_loc = stub_entry->target_section->output_section->vma
2655 + stub_entry->target_section->output_offset
2656 + stub_entry->target_value;
2657 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2658 + stub_entry->stub_sec->output_offset
2659 + stub_entry->stub_offset;
2660 branch_offset = veneered_insn_loc - veneer_entry_loc;
2661 branch_offset >>= 2;
2662 branch_offset &= 0x3ffffff;
2663 bfd_putl32 (stub_entry->veneered_insn,
2664 stub_sec->contents + stub_entry->stub_offset);
2665 bfd_putl32 (template[1] | branch_offset,
2666 stub_sec->contents + stub_entry->stub_offset + 4);
2669 case aarch64_stub_erratum_843419_veneer:
2670 if (aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
2671 stub_entry->stub_offset + 4, sym_value + 4))
2682 /* As above, but don't actually build the stub. Just bump offset so
2683 we know stub section sizes. */
2686 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2687 void *in_arg ATTRIBUTE_UNUSED)
2689 struct elf_aarch64_stub_hash_entry *stub_entry;
2692 /* Massage our args to the form they really have. */
2693 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2695 switch (stub_entry->stub_type)
2697 case aarch64_stub_adrp_branch:
2698 size = sizeof (aarch64_adrp_branch_stub);
2700 case aarch64_stub_long_branch:
2701 size = sizeof (aarch64_long_branch_stub);
2703 case aarch64_stub_erratum_835769_veneer:
2704 size = sizeof (aarch64_erratum_835769_stub);
2706 case aarch64_stub_erratum_843419_veneer:
2707 size = sizeof (aarch64_erratum_843419_stub);
2713 size = (size + 7) & ~7;
2714 stub_entry->stub_sec->size += size;
2718 /* External entry points for sizing and building linker stubs. */
2720 /* Set up various things so that we can make a list of input sections
2721 for each output section included in the link. Returns -1 on error,
2722 0 when no stubs will be needed, and 1 on success. */
2725 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2726 struct bfd_link_info *info)
2729 unsigned int bfd_count;
2730 int top_id, top_index;
2732 asection **input_list, **list;
2734 struct elf_aarch64_link_hash_table *htab =
2735 elf_aarch64_hash_table (info);
2737 if (!is_elf_hash_table (htab))
2740 /* Count the number of input BFDs and find the top input section id. */
2741 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2742 input_bfd != NULL; input_bfd = input_bfd->link.next)
2745 for (section = input_bfd->sections;
2746 section != NULL; section = section->next)
2748 if (top_id < section->id)
2749 top_id = section->id;
2752 htab->bfd_count = bfd_count;
2754 amt = sizeof (struct map_stub) * (top_id + 1);
2755 htab->stub_group = bfd_zmalloc (amt);
2756 if (htab->stub_group == NULL)
2759 /* We can't use output_bfd->section_count here to find the top output
2760 section index as some sections may have been removed, and
2761 _bfd_strip_section_from_output doesn't renumber the indices. */
2762 for (section = output_bfd->sections, top_index = 0;
2763 section != NULL; section = section->next)
2765 if (top_index < section->index)
2766 top_index = section->index;
2769 htab->top_index = top_index;
2770 amt = sizeof (asection *) * (top_index + 1);
2771 input_list = bfd_malloc (amt);
2772 htab->input_list = input_list;
2773 if (input_list == NULL)
2776 /* For sections we aren't interested in, mark their entries with a
2777 value we can check later. */
2778 list = input_list + top_index;
2780 *list = bfd_abs_section_ptr;
2781 while (list-- != input_list);
2783 for (section = output_bfd->sections;
2784 section != NULL; section = section->next)
2786 if ((section->flags & SEC_CODE) != 0)
2787 input_list[section->index] = NULL;
2793 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2794 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2796 /* The linker repeatedly calls this function for each input section,
2797 in the order that input sections are linked into output sections.
2798 Build lists of input sections to determine groupings between which
2799 we may insert linker stubs. */
2802 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2804 struct elf_aarch64_link_hash_table *htab =
2805 elf_aarch64_hash_table (info);
2807 if (isec->output_section->index <= htab->top_index)
2809 asection **list = htab->input_list + isec->output_section->index;
2811 if (*list != bfd_abs_section_ptr)
2813 /* Steal the link_sec pointer for our list. */
2814 /* This happens to make the list in reverse order,
2815 which is what we want. */
2816 PREV_SEC (isec) = *list;
2822 /* See whether we can group stub sections together. Grouping stub
2823 sections may result in fewer stubs. More importantly, we need to
2824 put all .init* and .fini* stubs at the beginning of the .init or
2825 .fini output sections respectively, because glibc splits the
2826 _init and _fini functions into multiple parts. Putting a stub in
2827 the middle of a function is not a good idea. */
2830 group_sections (struct elf_aarch64_link_hash_table *htab,
2831 bfd_size_type stub_group_size,
2832 bfd_boolean stubs_always_before_branch)
2834 asection **list = htab->input_list + htab->top_index;
2838 asection *tail = *list;
2840 if (tail == bfd_abs_section_ptr)
2843 while (tail != NULL)
2847 bfd_size_type total;
2851 while ((prev = PREV_SEC (curr)) != NULL
2852 && ((total += curr->output_offset - prev->output_offset)
2856 /* OK, the size from the start of CURR to the end is less
2857 than stub_group_size and thus can be handled by one stub
2858 section. (Or the tail section is itself larger than
2859 stub_group_size, in which case we may be toast.)
2860 We should really be keeping track of the total size of
2861 stubs added here, as stubs contribute to the final output
2865 prev = PREV_SEC (tail);
2866 /* Set up this stub group. */
2867 htab->stub_group[tail->id].link_sec = curr;
2869 while (tail != curr && (tail = prev) != NULL);
2871 /* But wait, there's more! Input sections up to stub_group_size
2872 bytes before the stub section can be handled by it too. */
2873 if (!stubs_always_before_branch)
2877 && ((total += tail->output_offset - prev->output_offset)
2881 prev = PREV_SEC (tail);
2882 htab->stub_group[tail->id].link_sec = curr;
2888 while (list-- != htab->input_list);
2890 free (htab->input_list);
2895 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2897 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2898 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2899 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2900 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2901 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2902 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2904 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2905 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2906 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2907 #define AARCH64_ZR 0x1f
2909 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2910 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2912 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2913 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2914 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2915 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2916 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2917 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2918 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2919 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2920 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2921 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2922 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2923 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2924 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2925 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2926 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2927 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2928 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2929 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2931 /* Classify an INSN if it is indeed a load/store.
2933 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2935 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2938 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2943 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2944 bfd_boolean *pair, bfd_boolean *load)
2952 /* Bail out quickly if INSN doesn't fall into the the load-store
2954 if (!AARCH64_LDST (insn))
2959 if (AARCH64_LDST_EX (insn))
2961 *rt = AARCH64_RT (insn);
2963 if (AARCH64_BIT (insn, 21) == 1)
2966 *rt2 = AARCH64_RT2 (insn);
2968 *load = AARCH64_LD (insn);
2971 else if (AARCH64_LDST_NAP (insn)
2972 || AARCH64_LDSTP_PI (insn)
2973 || AARCH64_LDSTP_O (insn)
2974 || AARCH64_LDSTP_PRE (insn))
2977 *rt = AARCH64_RT (insn);
2978 *rt2 = AARCH64_RT2 (insn);
2979 *load = AARCH64_LD (insn);
2982 else if (AARCH64_LDST_PCREL (insn)
2983 || AARCH64_LDST_UI (insn)
2984 || AARCH64_LDST_PIIMM (insn)
2985 || AARCH64_LDST_U (insn)
2986 || AARCH64_LDST_PREIMM (insn)
2987 || AARCH64_LDST_RO (insn)
2988 || AARCH64_LDST_UIMM (insn))
2990 *rt = AARCH64_RT (insn);
2992 if (AARCH64_LDST_PCREL (insn))
2994 opc = AARCH64_BITS (insn, 22, 2);
2995 v = AARCH64_BIT (insn, 26);
2996 opc_v = opc | (v << 2);
2997 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
2998 || opc_v == 5 || opc_v == 7);
3001 else if (AARCH64_LDST_SIMD_M (insn)
3002 || AARCH64_LDST_SIMD_M_PI (insn))
3004 *rt = AARCH64_RT (insn);
3005 *load = AARCH64_BIT (insn, 22);
3006 opcode = (insn >> 12) & 0xf;
3033 else if (AARCH64_LDST_SIMD_S (insn)
3034 || AARCH64_LDST_SIMD_S_PI (insn))
3036 *rt = AARCH64_RT (insn);
3037 r = (insn >> 21) & 1;
3038 *load = AARCH64_BIT (insn, 22);
3039 opcode = (insn >> 13) & 0x7;
3051 *rt2 = *rt + (r == 0 ? 2 : 3);
3059 *rt2 = *rt + (r == 0 ? 2 : 3);
3071 /* Return TRUE if INSN is multiply-accumulate. */
3074 aarch64_mlxl_p (uint32_t insn)
3076 uint32_t op31 = AARCH64_OP31 (insn);
3078 if (AARCH64_MAC (insn)
3079 && (op31 == 0 || op31 == 1 || op31 == 5)
3080 /* Exclude MUL instructions which are encoded as a multiple accumulate
3082 && AARCH64_RA (insn) != AARCH64_ZR)
3088 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
3089 it is possible for a 64-bit multiply-accumulate instruction to generate an
3090 incorrect result. The details are quite complex and hard to
3091 determine statically, since branches in the code may exist in some
3092 circumstances, but all cases end with a memory (load, store, or
3093 prefetch) instruction followed immediately by the multiply-accumulate
3094 operation. We employ a linker patching technique, by moving the potentially
3095 affected multiply-accumulate instruction into a patch region and replacing
3096 the original instruction with a branch to the patch. This function checks
3097 if INSN_1 is the memory operation followed by a multiply-accumulate
3098 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
3099 if INSN_1 and INSN_2 are safe. */
3102 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3112 if (aarch64_mlxl_p (insn_2)
3113 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3115 /* Any SIMD memory op is independent of the subsequent MLA
3116 by definition of the erratum. */
3117 if (AARCH64_BIT (insn_1, 26))
3120 /* If not SIMD, check for integer memory ops and MLA relationship. */
3121 rn = AARCH64_RN (insn_2);
3122 ra = AARCH64_RA (insn_2);
3123 rm = AARCH64_RM (insn_2);
3125 /* If this is a load and there's a true(RAW) dependency, we are safe
3126 and this is not an erratum sequence. */
3128 (rt == rn || rt == rm || rt == ra
3129 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3132 /* We conservatively put out stubs for all other cases (including
3140 /* Used to order a list of mapping symbols by address. */
3143 elf_aarch64_compare_mapping (const void *a, const void *b)
3145 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3146 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3148 if (amap->vma > bmap->vma)
3150 else if (amap->vma < bmap->vma)
3152 else if (amap->type > bmap->type)
3153 /* Ensure results do not depend on the host qsort for objects with
3154 multiple mapping symbols at the same address by sorting on type
3157 else if (amap->type < bmap->type)
3165 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3167 char *stub_name = (char *) bfd_malloc
3168 (strlen ("__erratum_835769_veneer_") + 16);
3169 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3173 /* Scan for Cortex-A53 erratum 835769 sequence.
3175 Return TRUE else FALSE on abnormal termination. */
3178 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3179 struct bfd_link_info *info,
3180 unsigned int *num_fixes_p)
3183 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3184 unsigned int num_fixes = *num_fixes_p;
3189 for (section = input_bfd->sections;
3191 section = section->next)
3193 bfd_byte *contents = NULL;
3194 struct _aarch64_elf_section_data *sec_data;
3197 if (elf_section_type (section) != SHT_PROGBITS
3198 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3199 || (section->flags & SEC_EXCLUDE) != 0
3200 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3201 || (section->output_section == bfd_abs_section_ptr))
3204 if (elf_section_data (section)->this_hdr.contents != NULL)
3205 contents = elf_section_data (section)->this_hdr.contents;
3206 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3209 sec_data = elf_aarch64_section_data (section);
3211 qsort (sec_data->map, sec_data->mapcount,
3212 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3214 for (span = 0; span < sec_data->mapcount; span++)
3216 unsigned int span_start = sec_data->map[span].vma;
3217 unsigned int span_end = ((span == sec_data->mapcount - 1)
3218 ? sec_data->map[0].vma + section->size
3219 : sec_data->map[span + 1].vma);
3221 char span_type = sec_data->map[span].type;
3223 if (span_type == 'd')
3226 for (i = span_start; i + 4 < span_end; i += 4)
3228 uint32_t insn_1 = bfd_getl32 (contents + i);
3229 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3231 if (aarch64_erratum_sequence (insn_1, insn_2))
3233 struct elf_aarch64_stub_hash_entry *stub_entry;
3234 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3238 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3244 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3245 stub_entry->target_section = section;
3246 stub_entry->target_value = i + 4;
3247 stub_entry->veneered_insn = insn_2;
3248 stub_entry->output_name = stub_name;
3253 if (elf_section_data (section)->this_hdr.contents == NULL)
3257 *num_fixes_p = num_fixes;
3263 /* Test if instruction INSN is ADRP. */
3266 _bfd_aarch64_adrp_p (uint32_t insn)
3268 return ((insn & 0x9f000000) == 0x90000000);
3272 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
3275 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
3283 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
3286 && AARCH64_LDST_UIMM (insn_3)
3287 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
3291 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
3293 Return TRUE if section CONTENTS at offset I contains one of the
3294 erratum 843419 sequences, otherwise return FALSE. If a sequence is
3295 seen set P_VENEER_I to the offset of the final LOAD/STORE
3296 instruction in the sequence.
3300 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
3301 bfd_vma i, bfd_vma span_end,
3302 bfd_vma *p_veneer_i)
3304 uint32_t insn_1 = bfd_getl32 (contents + i);
3306 if (!_bfd_aarch64_adrp_p (insn_1))
3309 if (span_end < i + 12)
3312 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3313 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
3315 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
3318 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
3320 *p_veneer_i = i + 8;
3324 if (span_end < i + 16)
3327 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
3329 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
3331 *p_veneer_i = i + 12;
3339 /* Resize all stub sections. */
3342 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
3346 /* OK, we've added some stubs. Find out the new size of the
3348 for (section = htab->stub_bfd->sections;
3349 section != NULL; section = section->next)
3351 /* Ignore non-stub sections. */
3352 if (!strstr (section->name, STUB_SUFFIX))
3357 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3359 for (section = htab->stub_bfd->sections;
3360 section != NULL; section = section->next)
3362 if (!strstr (section->name, STUB_SUFFIX))
3368 /* Ensure all stub sections have a size which is a multiple of
3369 4096. This is important in order to ensure that the insertion
3370 of stub sections does not in itself move existing code around
3371 in such a way that new errata sequences are created. */
3372 if (htab->fix_erratum_843419)
3374 section->size = BFD_ALIGN (section->size, 0x1000);
3379 /* Construct an erratum 843419 workaround stub name.
3383 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
3386 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
3387 char *stub_name = bfd_malloc (len);
3389 if (stub_name != NULL)
3390 snprintf (stub_name, len, "e843419@%04x_%08x_%" BFD_VMA_FMT "x",
3391 input_section->owner->id,
3397 /* Build a stub_entry structure describing an 843419 fixup.
3399 The stub_entry constructed is populated with the bit pattern INSN
3400 of the instruction located at OFFSET within input SECTION.
3402 Returns TRUE on success. */
3405 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
3406 bfd_vma adrp_offset,
3407 bfd_vma ldst_offset,
3409 struct bfd_link_info *info)
3411 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3413 struct elf_aarch64_stub_hash_entry *stub_entry;
3415 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
3416 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3424 /* We always place an 843419 workaround veneer in the stub section
3425 attached to the input section in which an erratum sequence has
3426 been found. This ensures that later in the link process (in
3427 elfNN_aarch64_write_section) when we copy the veneered
3428 instruction from the input section into the stub section the
3429 copied instruction will have had any relocations applied to it.
3430 If we placed workaround veneers in any other stub section then we
3431 could not assume that all relocations have been processed on the
3432 corresponding input section at the point we output the stub
3436 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
3437 if (stub_entry == NULL)
3443 stub_entry->adrp_offset = adrp_offset;
3444 stub_entry->target_value = ldst_offset;
3445 stub_entry->target_section = section;
3446 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
3447 stub_entry->veneered_insn = insn;
3448 stub_entry->output_name = stub_name;
3454 /* Scan an input section looking for the signature of erratum 843419.
3456 Scans input SECTION in INPUT_BFD looking for erratum 843419
3457 signatures, for each signature found a stub_entry is created
3458 describing the location of the erratum for subsequent fixup.
3460 Return TRUE on successful scan, FALSE on failure to scan.
3464 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
3465 struct bfd_link_info *info)
3467 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3472 if (elf_section_type (section) != SHT_PROGBITS
3473 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3474 || (section->flags & SEC_EXCLUDE) != 0
3475 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3476 || (section->output_section == bfd_abs_section_ptr))
3481 bfd_byte *contents = NULL;
3482 struct _aarch64_elf_section_data *sec_data;
3485 if (elf_section_data (section)->this_hdr.contents != NULL)
3486 contents = elf_section_data (section)->this_hdr.contents;
3487 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3490 sec_data = elf_aarch64_section_data (section);
3492 qsort (sec_data->map, sec_data->mapcount,
3493 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3495 for (span = 0; span < sec_data->mapcount; span++)
3497 unsigned int span_start = sec_data->map[span].vma;
3498 unsigned int span_end = ((span == sec_data->mapcount - 1)
3499 ? sec_data->map[0].vma + section->size
3500 : sec_data->map[span + 1].vma);
3502 char span_type = sec_data->map[span].type;
3504 if (span_type == 'd')
3507 for (i = span_start; i + 8 < span_end; i += 4)
3509 bfd_vma vma = (section->output_section->vma
3510 + section->output_offset
3514 if (_bfd_aarch64_erratum_843419_p
3515 (contents, vma, i, span_end, &veneer_i))
3517 uint32_t insn = bfd_getl32 (contents + veneer_i);
3519 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
3526 if (elf_section_data (section)->this_hdr.contents == NULL)
3535 /* Determine and set the size of the stub section for a final link.
3537 The basic idea here is to examine all the relocations looking for
3538 PC-relative calls to a target that is unreachable with a "bl"
3542 elfNN_aarch64_size_stubs (bfd *output_bfd,
3544 struct bfd_link_info *info,
3545 bfd_signed_vma group_size,
3546 asection * (*add_stub_section) (const char *,
3548 void (*layout_sections_again) (void))
3550 bfd_size_type stub_group_size;
3551 bfd_boolean stubs_always_before_branch;
3552 bfd_boolean stub_changed = FALSE;
3553 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3554 unsigned int num_erratum_835769_fixes = 0;
3556 /* Propagate mach to stub bfd, because it may not have been
3557 finalized when we created stub_bfd. */
3558 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3559 bfd_get_mach (output_bfd));
3561 /* Stash our params away. */
3562 htab->stub_bfd = stub_bfd;
3563 htab->add_stub_section = add_stub_section;
3564 htab->layout_sections_again = layout_sections_again;
3565 stubs_always_before_branch = group_size < 0;
3567 stub_group_size = -group_size;
3569 stub_group_size = group_size;
3571 if (stub_group_size == 1)
3573 /* Default values. */
3574 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3575 stub_group_size = 127 * 1024 * 1024;
3578 group_sections (htab, stub_group_size, stubs_always_before_branch);
3580 (*htab->layout_sections_again) ();
3582 if (htab->fix_erratum_835769)
3586 for (input_bfd = info->input_bfds;
3587 input_bfd != NULL; input_bfd = input_bfd->link.next)
3588 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
3589 &num_erratum_835769_fixes))
3592 _bfd_aarch64_resize_stubs (htab);
3593 (*htab->layout_sections_again) ();
3596 if (htab->fix_erratum_843419)
3600 for (input_bfd = info->input_bfds;
3602 input_bfd = input_bfd->link.next)
3606 for (section = input_bfd->sections;
3608 section = section->next)
3609 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
3613 _bfd_aarch64_resize_stubs (htab);
3614 (*htab->layout_sections_again) ();
3621 for (input_bfd = info->input_bfds;
3622 input_bfd != NULL; input_bfd = input_bfd->link.next)
3624 Elf_Internal_Shdr *symtab_hdr;
3626 Elf_Internal_Sym *local_syms = NULL;
3628 /* We'll need the symbol table in a second. */
3629 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3630 if (symtab_hdr->sh_info == 0)
3633 /* Walk over each section attached to the input bfd. */
3634 for (section = input_bfd->sections;
3635 section != NULL; section = section->next)
3637 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3639 /* If there aren't any relocs, then there's nothing more
3641 if ((section->flags & SEC_RELOC) == 0
3642 || section->reloc_count == 0
3643 || (section->flags & SEC_CODE) == 0)
3646 /* If this section is a link-once section that will be
3647 discarded, then don't create any stubs. */
3648 if (section->output_section == NULL
3649 || section->output_section->owner != output_bfd)
3652 /* Get the relocs. */
3654 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3655 NULL, info->keep_memory);
3656 if (internal_relocs == NULL)
3657 goto error_ret_free_local;
3659 /* Now examine each relocation. */
3660 irela = internal_relocs;
3661 irelaend = irela + section->reloc_count;
3662 for (; irela < irelaend; irela++)
3664 unsigned int r_type, r_indx;
3665 enum elf_aarch64_stub_type stub_type;
3666 struct elf_aarch64_stub_hash_entry *stub_entry;
3669 bfd_vma destination;
3670 struct elf_aarch64_link_hash_entry *hash;
3671 const char *sym_name;
3673 const asection *id_sec;
3674 unsigned char st_type;
3677 r_type = ELFNN_R_TYPE (irela->r_info);
3678 r_indx = ELFNN_R_SYM (irela->r_info);
3680 if (r_type >= (unsigned int) R_AARCH64_end)
3682 bfd_set_error (bfd_error_bad_value);
3683 error_ret_free_internal:
3684 if (elf_section_data (section)->relocs == NULL)
3685 free (internal_relocs);
3686 goto error_ret_free_local;
3689 /* Only look for stubs on unconditional branch and
3690 branch and link instructions. */
3691 if (r_type != (unsigned int) AARCH64_R (CALL26)
3692 && r_type != (unsigned int) AARCH64_R (JUMP26))
3695 /* Now determine the call target, its name, value,
3702 if (r_indx < symtab_hdr->sh_info)
3704 /* It's a local symbol. */
3705 Elf_Internal_Sym *sym;
3706 Elf_Internal_Shdr *hdr;
3708 if (local_syms == NULL)
3711 = (Elf_Internal_Sym *) symtab_hdr->contents;
3712 if (local_syms == NULL)
3714 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3715 symtab_hdr->sh_info, 0,
3717 if (local_syms == NULL)
3718 goto error_ret_free_internal;
3721 sym = local_syms + r_indx;
3722 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3723 sym_sec = hdr->bfd_section;
3725 /* This is an undefined symbol. It can never
3729 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3730 sym_value = sym->st_value;
3731 destination = (sym_value + irela->r_addend
3732 + sym_sec->output_offset
3733 + sym_sec->output_section->vma);
3734 st_type = ELF_ST_TYPE (sym->st_info);
3736 = bfd_elf_string_from_elf_section (input_bfd,
3737 symtab_hdr->sh_link,
3744 e_indx = r_indx - symtab_hdr->sh_info;
3745 hash = ((struct elf_aarch64_link_hash_entry *)
3746 elf_sym_hashes (input_bfd)[e_indx]);
3748 while (hash->root.root.type == bfd_link_hash_indirect
3749 || hash->root.root.type == bfd_link_hash_warning)
3750 hash = ((struct elf_aarch64_link_hash_entry *)
3751 hash->root.root.u.i.link);
3753 if (hash->root.root.type == bfd_link_hash_defined
3754 || hash->root.root.type == bfd_link_hash_defweak)
3756 struct elf_aarch64_link_hash_table *globals =
3757 elf_aarch64_hash_table (info);
3758 sym_sec = hash->root.root.u.def.section;
3759 sym_value = hash->root.root.u.def.value;
3760 /* For a destination in a shared library,
3761 use the PLT stub as target address to
3762 decide whether a branch stub is
3764 if (globals->root.splt != NULL && hash != NULL
3765 && hash->root.plt.offset != (bfd_vma) - 1)
3767 sym_sec = globals->root.splt;
3768 sym_value = hash->root.plt.offset;
3769 if (sym_sec->output_section != NULL)
3770 destination = (sym_value
3771 + sym_sec->output_offset
3773 sym_sec->output_section->vma);
3775 else if (sym_sec->output_section != NULL)
3776 destination = (sym_value + irela->r_addend
3777 + sym_sec->output_offset
3778 + sym_sec->output_section->vma);
3780 else if (hash->root.root.type == bfd_link_hash_undefined
3781 || (hash->root.root.type
3782 == bfd_link_hash_undefweak))
3784 /* For a shared library, use the PLT stub as
3785 target address to decide whether a long
3786 branch stub is needed.
3787 For absolute code, they cannot be handled. */
3788 struct elf_aarch64_link_hash_table *globals =
3789 elf_aarch64_hash_table (info);
3791 if (globals->root.splt != NULL && hash != NULL
3792 && hash->root.plt.offset != (bfd_vma) - 1)
3794 sym_sec = globals->root.splt;
3795 sym_value = hash->root.plt.offset;
3796 if (sym_sec->output_section != NULL)
3797 destination = (sym_value
3798 + sym_sec->output_offset
3800 sym_sec->output_section->vma);
3807 bfd_set_error (bfd_error_bad_value);
3808 goto error_ret_free_internal;
3810 st_type = ELF_ST_TYPE (hash->root.type);
3811 sym_name = hash->root.root.root.string;
3814 /* Determine what (if any) linker stub is needed. */
3815 stub_type = aarch64_type_of_stub
3816 (info, section, irela, st_type, hash, destination);
3817 if (stub_type == aarch64_stub_none)
3820 /* Support for grouping stub sections. */
3821 id_sec = htab->stub_group[section->id].link_sec;
3823 /* Get the name of this stub. */
3824 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3827 goto error_ret_free_internal;
3830 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3831 stub_name, FALSE, FALSE);
3832 if (stub_entry != NULL)
3834 /* The proper stub has already been created. */
3839 stub_entry = _bfd_aarch64_add_stub_entry_in_group
3840 (stub_name, section, htab);
3841 if (stub_entry == NULL)
3844 goto error_ret_free_internal;
3847 stub_entry->target_value = sym_value;
3848 stub_entry->target_section = sym_sec;
3849 stub_entry->stub_type = stub_type;
3850 stub_entry->h = hash;
3851 stub_entry->st_type = st_type;
3853 if (sym_name == NULL)
3854 sym_name = "unnamed";
3855 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3856 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3857 if (stub_entry->output_name == NULL)
3860 goto error_ret_free_internal;
3863 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3866 stub_changed = TRUE;
3869 /* We're done with the internal relocs, free them. */
3870 if (elf_section_data (section)->relocs == NULL)
3871 free (internal_relocs);
3878 _bfd_aarch64_resize_stubs (htab);
3880 /* Ask the linker to do its stuff. */
3881 (*htab->layout_sections_again) ();
3882 stub_changed = FALSE;
3887 error_ret_free_local:
3891 /* Build all the stubs associated with the current output file. The
3892 stubs are kept in a hash table attached to the main linker hash
3893 table. We also set up the .plt entries for statically linked PIC
3894 functions here. This function is called via aarch64_elf_finish in the
3898 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3901 struct bfd_hash_table *table;
3902 struct elf_aarch64_link_hash_table *htab;
3904 htab = elf_aarch64_hash_table (info);
3906 for (stub_sec = htab->stub_bfd->sections;
3907 stub_sec != NULL; stub_sec = stub_sec->next)
3911 /* Ignore non-stub sections. */
3912 if (!strstr (stub_sec->name, STUB_SUFFIX))
3915 /* Allocate memory to hold the linker stubs. */
3916 size = stub_sec->size;
3917 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3918 if (stub_sec->contents == NULL && size != 0)
3922 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
3923 stub_sec->size += 4;
3926 /* Build the stubs as directed by the stub hash table. */
3927 table = &htab->stub_hash_table;
3928 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3934 /* Add an entry to the code/data map for section SEC. */
3937 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3939 struct _aarch64_elf_section_data *sec_data =
3940 elf_aarch64_section_data (sec);
3941 unsigned int newidx;
3943 if (sec_data->map == NULL)
3945 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3946 sec_data->mapcount = 0;
3947 sec_data->mapsize = 1;
3950 newidx = sec_data->mapcount++;
3952 if (sec_data->mapcount > sec_data->mapsize)
3954 sec_data->mapsize *= 2;
3955 sec_data->map = bfd_realloc_or_free
3956 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3961 sec_data->map[newidx].vma = vma;
3962 sec_data->map[newidx].type = type;
3967 /* Initialise maps of insn/data for input BFDs. */
3969 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3971 Elf_Internal_Sym *isymbuf;
3972 Elf_Internal_Shdr *hdr;
3973 unsigned int i, localsyms;
3975 /* Make sure that we are dealing with an AArch64 elf binary. */
3976 if (!is_aarch64_elf (abfd))
3979 if ((abfd->flags & DYNAMIC) != 0)
3982 hdr = &elf_symtab_hdr (abfd);
3983 localsyms = hdr->sh_info;
3985 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3986 should contain the number of local symbols, which should come before any
3987 global symbols. Mapping symbols are always local. */
3988 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3990 /* No internal symbols read? Skip this BFD. */
3991 if (isymbuf == NULL)
3994 for (i = 0; i < localsyms; i++)
3996 Elf_Internal_Sym *isym = &isymbuf[i];
3997 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
4000 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
4002 name = bfd_elf_string_from_elf_section (abfd,
4006 if (bfd_is_aarch64_special_symbol_name
4007 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
4008 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
4013 /* Set option values needed during linking. */
4015 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
4016 struct bfd_link_info *link_info,
4018 int no_wchar_warn, int pic_veneer,
4019 int fix_erratum_835769,
4020 int fix_erratum_843419)
4022 struct elf_aarch64_link_hash_table *globals;
4024 globals = elf_aarch64_hash_table (link_info);
4025 globals->pic_veneer = pic_veneer;
4026 globals->fix_erratum_835769 = fix_erratum_835769;
4027 globals->fix_erratum_843419 = fix_erratum_843419;
4028 globals->fix_erratum_843419_adr = TRUE;
4030 BFD_ASSERT (is_aarch64_elf (output_bfd));
4031 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
4032 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
4036 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
4037 struct elf_aarch64_link_hash_table
4038 *globals, struct bfd_link_info *info,
4039 bfd_vma value, bfd *output_bfd,
4040 bfd_boolean *unresolved_reloc_p)
4042 bfd_vma off = (bfd_vma) - 1;
4043 asection *basegot = globals->root.sgot;
4044 bfd_boolean dyn = globals->root.dynamic_sections_created;
4048 BFD_ASSERT (basegot != NULL);
4049 off = h->got.offset;
4050 BFD_ASSERT (off != (bfd_vma) - 1);
4051 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
4053 && SYMBOL_REFERENCES_LOCAL (info, h))
4054 || (ELF_ST_VISIBILITY (h->other)
4055 && h->root.type == bfd_link_hash_undefweak))
4057 /* This is actually a static link, or it is a -Bsymbolic link
4058 and the symbol is defined locally. We must initialize this
4059 entry in the global offset table. Since the offset must
4060 always be a multiple of 8 (4 in the case of ILP32), we use
4061 the least significant bit to record whether we have
4062 initialized it already.
4063 When doing a dynamic link, we create a .rel(a).got relocation
4064 entry to initialize the value. This is done in the
4065 finish_dynamic_symbol routine. */
4070 bfd_put_NN (output_bfd, value, basegot->contents + off);
4075 *unresolved_reloc_p = FALSE;
4077 off = off + basegot->output_section->vma + basegot->output_offset;
4083 /* Change R_TYPE to a more efficient access model where possible,
4084 return the new reloc type. */
4086 static bfd_reloc_code_real_type
4087 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
4088 struct elf_link_hash_entry *h)
4090 bfd_boolean is_local = h == NULL;
4094 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4095 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4097 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4098 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
4100 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4102 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4105 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4107 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
4108 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4110 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4111 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4113 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4114 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
4116 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4117 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
4119 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4120 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
4122 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4125 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4127 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
4128 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4130 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4131 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4132 /* Instructions with these relocations will become NOPs. */
4133 return BFD_RELOC_AARCH64_NONE;
4143 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
4147 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4148 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4149 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4150 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4151 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4152 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4155 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4156 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4157 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4158 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
4159 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
4160 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
4163 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4164 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4165 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4166 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4167 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4168 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4169 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4170 return GOT_TLSDESC_GD;
4172 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4173 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4174 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4175 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4178 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
4179 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4180 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4181 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4182 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4183 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4184 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4185 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4186 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4196 aarch64_can_relax_tls (bfd *input_bfd,
4197 struct bfd_link_info *info,
4198 bfd_reloc_code_real_type r_type,
4199 struct elf_link_hash_entry *h,
4200 unsigned long r_symndx)
4202 unsigned int symbol_got_type;
4203 unsigned int reloc_got_type;
4205 if (! IS_AARCH64_TLS_RELOC (r_type))
4208 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
4209 reloc_got_type = aarch64_reloc_got_type (r_type);
4211 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
4217 if (h && h->root.type == bfd_link_hash_undefweak)
4223 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
4226 static bfd_reloc_code_real_type
4227 aarch64_tls_transition (bfd *input_bfd,
4228 struct bfd_link_info *info,
4229 unsigned int r_type,
4230 struct elf_link_hash_entry *h,
4231 unsigned long r_symndx)
4233 bfd_reloc_code_real_type bfd_r_type
4234 = elfNN_aarch64_bfd_reloc_from_type (r_type);
4236 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
4239 return aarch64_tls_transition_without_check (bfd_r_type, h);
4242 /* Return the base VMA address which should be subtracted from real addresses
4243 when resolving R_AARCH64_TLS_DTPREL relocation. */
4246 dtpoff_base (struct bfd_link_info *info)
4248 /* If tls_sec is NULL, we should have signalled an error already. */
4249 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
4250 return elf_hash_table (info)->tls_sec->vma;
4253 /* Return the base VMA address which should be subtracted from real addresses
4254 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
4257 tpoff_base (struct bfd_link_info *info)
4259 struct elf_link_hash_table *htab = elf_hash_table (info);
4261 /* If tls_sec is NULL, we should have signalled an error already. */
4262 BFD_ASSERT (htab->tls_sec != NULL);
4264 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
4265 htab->tls_sec->alignment_power);
4266 return htab->tls_sec->vma - base;
4270 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4271 unsigned long r_symndx)
4273 /* Calculate the address of the GOT entry for symbol
4274 referred to in h. */
4276 return &h->got.offset;
4280 struct elf_aarch64_local_symbol *l;
4282 l = elf_aarch64_locals (input_bfd);
4283 return &l[r_symndx].got_offset;
4288 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4289 unsigned long r_symndx)
4292 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
4297 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
4298 unsigned long r_symndx)
4301 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4306 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4307 unsigned long r_symndx)
4310 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4316 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4317 unsigned long r_symndx)
4319 /* Calculate the address of the GOT entry for symbol
4320 referred to in h. */
4323 struct elf_aarch64_link_hash_entry *eh;
4324 eh = (struct elf_aarch64_link_hash_entry *) h;
4325 return &eh->tlsdesc_got_jump_table_offset;
4330 struct elf_aarch64_local_symbol *l;
4332 l = elf_aarch64_locals (input_bfd);
4333 return &l[r_symndx].tlsdesc_got_jump_table_offset;
4338 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4339 unsigned long r_symndx)
4342 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4347 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
4348 struct elf_link_hash_entry *h,
4349 unsigned long r_symndx)
4352 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4357 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4358 unsigned long r_symndx)
4361 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4366 /* Data for make_branch_to_erratum_835769_stub(). */
4368 struct erratum_835769_branch_to_stub_data
4370 struct bfd_link_info *info;
4371 asection *output_section;
4375 /* Helper to insert branches to erratum 835769 stubs in the right
4376 places for a particular section. */
4379 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
4382 struct elf_aarch64_stub_hash_entry *stub_entry;
4383 struct erratum_835769_branch_to_stub_data *data;
4385 unsigned long branch_insn = 0;
4386 bfd_vma veneered_insn_loc, veneer_entry_loc;
4387 bfd_signed_vma branch_offset;
4388 unsigned int target;
4391 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4392 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
4394 if (stub_entry->target_section != data->output_section
4395 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
4398 contents = data->contents;
4399 veneered_insn_loc = stub_entry->target_section->output_section->vma
4400 + stub_entry->target_section->output_offset
4401 + stub_entry->target_value;
4402 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4403 + stub_entry->stub_sec->output_offset
4404 + stub_entry->stub_offset;
4405 branch_offset = veneer_entry_loc - veneered_insn_loc;
4407 abfd = stub_entry->target_section->owner;
4408 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4409 (*_bfd_error_handler)
4410 (_("%B: error: Erratum 835769 stub out "
4411 "of range (input file too large)"), abfd);
4413 target = stub_entry->target_value;
4414 branch_insn = 0x14000000;
4415 branch_offset >>= 2;
4416 branch_offset &= 0x3ffffff;
4417 branch_insn |= branch_offset;
4418 bfd_putl32 (branch_insn, &contents[target]);
4425 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
4428 struct elf_aarch64_stub_hash_entry *stub_entry
4429 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4430 struct erratum_835769_branch_to_stub_data *data
4431 = (struct erratum_835769_branch_to_stub_data *) in_arg;
4432 struct bfd_link_info *info;
4433 struct elf_aarch64_link_hash_table *htab;
4441 contents = data->contents;
4442 section = data->output_section;
4444 htab = elf_aarch64_hash_table (info);
4446 if (stub_entry->target_section != section
4447 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
4450 insn = bfd_getl32 (contents + stub_entry->target_value);
4452 stub_entry->stub_sec->contents + stub_entry->stub_offset);
4454 place = (section->output_section->vma + section->output_offset
4455 + stub_entry->adrp_offset);
4456 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
4458 if ((insn & AARCH64_ADRP_OP_MASK) != AARCH64_ADRP_OP)
4461 bfd_signed_vma imm =
4462 (_bfd_aarch64_sign_extend
4463 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
4466 if (htab->fix_erratum_843419_adr
4467 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
4469 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
4470 | AARCH64_RT (insn));
4471 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
4475 bfd_vma veneered_insn_loc;
4476 bfd_vma veneer_entry_loc;
4477 bfd_signed_vma branch_offset;
4478 uint32_t branch_insn;
4480 veneered_insn_loc = stub_entry->target_section->output_section->vma
4481 + stub_entry->target_section->output_offset
4482 + stub_entry->target_value;
4483 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4484 + stub_entry->stub_sec->output_offset
4485 + stub_entry->stub_offset;
4486 branch_offset = veneer_entry_loc - veneered_insn_loc;
4488 abfd = stub_entry->target_section->owner;
4489 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4490 (*_bfd_error_handler)
4491 (_("%B: error: Erratum 843419 stub out "
4492 "of range (input file too large)"), abfd);
4494 branch_insn = 0x14000000;
4495 branch_offset >>= 2;
4496 branch_offset &= 0x3ffffff;
4497 branch_insn |= branch_offset;
4498 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
4505 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
4506 struct bfd_link_info *link_info,
4511 struct elf_aarch64_link_hash_table *globals =
4512 elf_aarch64_hash_table (link_info);
4514 if (globals == NULL)
4517 /* Fix code to point to erratum 835769 stubs. */
4518 if (globals->fix_erratum_835769)
4520 struct erratum_835769_branch_to_stub_data data;
4522 data.info = link_info;
4523 data.output_section = sec;
4524 data.contents = contents;
4525 bfd_hash_traverse (&globals->stub_hash_table,
4526 make_branch_to_erratum_835769_stub, &data);
4529 if (globals->fix_erratum_843419)
4531 struct erratum_835769_branch_to_stub_data data;
4533 data.info = link_info;
4534 data.output_section = sec;
4535 data.contents = contents;
4536 bfd_hash_traverse (&globals->stub_hash_table,
4537 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
4543 /* Perform a relocation as part of a final link. */
4544 static bfd_reloc_status_type
4545 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4548 asection *input_section,
4550 Elf_Internal_Rela *rel,
4552 struct bfd_link_info *info,
4554 struct elf_link_hash_entry *h,
4555 bfd_boolean *unresolved_reloc_p,
4556 bfd_boolean save_addend,
4557 bfd_vma *saved_addend,
4558 Elf_Internal_Sym *sym)
4560 Elf_Internal_Shdr *symtab_hdr;
4561 unsigned int r_type = howto->type;
4562 bfd_reloc_code_real_type bfd_r_type
4563 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4564 bfd_reloc_code_real_type new_bfd_r_type;
4565 unsigned long r_symndx;
4566 bfd_byte *hit_data = contents + rel->r_offset;
4568 bfd_signed_vma signed_addend;
4569 struct elf_aarch64_link_hash_table *globals;
4570 bfd_boolean weak_undef_p;
4573 globals = elf_aarch64_hash_table (info);
4575 symtab_hdr = &elf_symtab_hdr (input_bfd);
4577 BFD_ASSERT (is_aarch64_elf (input_bfd));
4579 r_symndx = ELFNN_R_SYM (rel->r_info);
4581 /* It is possible to have linker relaxations on some TLS access
4582 models. Update our information here. */
4583 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4584 if (new_bfd_r_type != bfd_r_type)
4586 bfd_r_type = new_bfd_r_type;
4587 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4588 BFD_ASSERT (howto != NULL);
4589 r_type = howto->type;
4592 place = input_section->output_section->vma
4593 + input_section->output_offset + rel->r_offset;
4595 /* Get addend, accumulating the addend for consecutive relocs
4596 which refer to the same offset. */
4597 signed_addend = saved_addend ? *saved_addend : 0;
4598 signed_addend += rel->r_addend;
4600 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4601 : bfd_is_und_section (sym_sec));
4603 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4604 it here if it is defined in a non-shared object. */
4606 && h->type == STT_GNU_IFUNC
4613 if ((input_section->flags & SEC_ALLOC) == 0
4614 || h->plt.offset == (bfd_vma) -1)
4617 /* STT_GNU_IFUNC symbol must go through PLT. */
4618 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4619 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4624 if (h->root.root.string)
4625 name = h->root.root.string;
4627 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4629 (*_bfd_error_handler)
4630 (_("%B: relocation %s against STT_GNU_IFUNC "
4631 "symbol `%s' isn't handled by %s"), input_bfd,
4632 howto->name, name, __FUNCTION__);
4633 bfd_set_error (bfd_error_bad_value);
4636 case BFD_RELOC_AARCH64_NN:
4637 if (rel->r_addend != 0)
4639 if (h->root.root.string)
4640 name = h->root.root.string;
4642 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4644 (*_bfd_error_handler)
4645 (_("%B: relocation %s against STT_GNU_IFUNC "
4646 "symbol `%s' has non-zero addend: %d"),
4647 input_bfd, howto->name, name, rel->r_addend);
4648 bfd_set_error (bfd_error_bad_value);
4652 /* Generate dynamic relocation only when there is a
4653 non-GOT reference in a shared object. */
4654 if (info->shared && h->non_got_ref)
4656 Elf_Internal_Rela outrel;
4659 /* Need a dynamic relocation to get the real function
4661 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4665 if (outrel.r_offset == (bfd_vma) -1
4666 || outrel.r_offset == (bfd_vma) -2)
4669 outrel.r_offset += (input_section->output_section->vma
4670 + input_section->output_offset);
4672 if (h->dynindx == -1
4674 || info->executable)
4676 /* This symbol is resolved locally. */
4677 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4678 outrel.r_addend = (h->root.u.def.value
4679 + h->root.u.def.section->output_section->vma
4680 + h->root.u.def.section->output_offset);
4684 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4685 outrel.r_addend = 0;
4688 sreloc = globals->root.irelifunc;
4689 elf_append_rela (output_bfd, sreloc, &outrel);
4691 /* If this reloc is against an external symbol, we
4692 do not want to fiddle with the addend. Otherwise,
4693 we need to include the symbol value so that it
4694 becomes an addend for the dynamic reloc. For an
4695 internal symbol, we have updated addend. */
4696 return bfd_reloc_ok;
4699 case BFD_RELOC_AARCH64_CALL26:
4700 case BFD_RELOC_AARCH64_JUMP26:
4701 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4704 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4706 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4707 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4708 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4709 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4710 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4711 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4712 base_got = globals->root.sgot;
4713 off = h->got.offset;
4715 if (base_got == NULL)
4718 if (off == (bfd_vma) -1)
4722 /* We can't use h->got.offset here to save state, or
4723 even just remember the offset, as finish_dynamic_symbol
4724 would use that as offset into .got. */
4726 if (globals->root.splt != NULL)
4728 plt_index = ((h->plt.offset - globals->plt_header_size) /
4729 globals->plt_entry_size);
4730 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4731 base_got = globals->root.sgotplt;
4735 plt_index = h->plt.offset / globals->plt_entry_size;
4736 off = plt_index * GOT_ENTRY_SIZE;
4737 base_got = globals->root.igotplt;
4740 if (h->dynindx == -1
4744 /* This references the local definition. We must
4745 initialize this entry in the global offset table.
4746 Since the offset must always be a multiple of 8,
4747 we use the least significant bit to record
4748 whether we have initialized it already.
4750 When doing a dynamic link, we create a .rela.got
4751 relocation entry to initialize the value. This
4752 is done in the finish_dynamic_symbol routine. */
4757 bfd_put_NN (output_bfd, value,
4758 base_got->contents + off);
4759 /* Note that this is harmless as -1 | 1 still is -1. */
4763 value = (base_got->output_section->vma
4764 + base_got->output_offset + off);
4767 value = aarch64_calculate_got_entry_vma (h, globals, info,
4769 unresolved_reloc_p);
4770 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4771 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4772 addend = (globals->root.sgot->output_section->vma
4773 + globals->root.sgot->output_offset);
4774 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4775 addend, weak_undef_p);
4776 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4777 case BFD_RELOC_AARCH64_ADD_LO12:
4778 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4785 case BFD_RELOC_AARCH64_NONE:
4786 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4787 *unresolved_reloc_p = FALSE;
4788 return bfd_reloc_ok;
4790 case BFD_RELOC_AARCH64_NN:
4792 /* When generating a shared object or relocatable executable, these
4793 relocations are copied into the output file to be resolved at
4795 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4796 && (input_section->flags & SEC_ALLOC)
4798 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4799 || h->root.type != bfd_link_hash_undefweak))
4801 Elf_Internal_Rela outrel;
4803 bfd_boolean skip, relocate;
4806 *unresolved_reloc_p = FALSE;
4811 outrel.r_addend = signed_addend;
4813 _bfd_elf_section_offset (output_bfd, info, input_section,
4815 if (outrel.r_offset == (bfd_vma) - 1)
4817 else if (outrel.r_offset == (bfd_vma) - 2)
4823 outrel.r_offset += (input_section->output_section->vma
4824 + input_section->output_offset);
4827 memset (&outrel, 0, sizeof outrel);
4830 && (!info->shared || !SYMBOLIC_BIND (info, h) || !h->def_regular))
4831 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4836 /* On SVR4-ish systems, the dynamic loader cannot
4837 relocate the text and data segments independently,
4838 so the symbol does not matter. */
4840 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4841 outrel.r_addend += value;
4844 sreloc = elf_section_data (input_section)->sreloc;
4845 if (sreloc == NULL || sreloc->contents == NULL)
4846 return bfd_reloc_notsupported;
4848 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4849 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4851 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4853 /* Sanity to check that we have previously allocated
4854 sufficient space in the relocation section for the
4855 number of relocations we actually want to emit. */
4859 /* If this reloc is against an external symbol, we do not want to
4860 fiddle with the addend. Otherwise, we need to include the symbol
4861 value so that it becomes an addend for the dynamic reloc. */
4863 return bfd_reloc_ok;
4865 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4866 contents, rel->r_offset, value,
4870 value += signed_addend;
4873 case BFD_RELOC_AARCH64_CALL26:
4874 case BFD_RELOC_AARCH64_JUMP26:
4876 asection *splt = globals->root.splt;
4877 bfd_boolean via_plt_p =
4878 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4880 /* A call to an undefined weak symbol is converted to a jump to
4881 the next instruction unless a PLT entry will be created.
4882 The jump to the next instruction is optimized as a NOP.
4883 Do the same for local undefined symbols. */
4884 if (weak_undef_p && ! via_plt_p)
4886 bfd_putl32 (INSN_NOP, hit_data);
4887 return bfd_reloc_ok;
4890 /* If the call goes through a PLT entry, make sure to
4891 check distance to the right destination address. */
4894 value = (splt->output_section->vma
4895 + splt->output_offset + h->plt.offset);
4896 *unresolved_reloc_p = FALSE;
4899 /* If the target symbol is global and marked as a function the
4900 relocation applies a function call or a tail call. In this
4901 situation we can veneer out of range branches. The veneers
4902 use IP0 and IP1 hence cannot be used arbitrary out of range
4903 branches that occur within the body of a function. */
4904 if (h && h->type == STT_FUNC)
4906 /* Check if a stub has to be inserted because the destination
4908 if (! aarch64_valid_branch_p (value, place))
4910 /* The target is out of reach, so redirect the branch to
4911 the local stub for this function. */
4912 struct elf_aarch64_stub_hash_entry *stub_entry;
4913 stub_entry = elfNN_aarch64_get_stub_entry (input_section,
4916 if (stub_entry != NULL)
4917 value = (stub_entry->stub_offset
4918 + stub_entry->stub_sec->output_offset
4919 + stub_entry->stub_sec->output_section->vma);
4923 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4924 signed_addend, weak_undef_p);
4927 case BFD_RELOC_AARCH64_16_PCREL:
4928 case BFD_RELOC_AARCH64_32_PCREL:
4929 case BFD_RELOC_AARCH64_64_PCREL:
4930 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4931 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4932 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4933 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4935 && (input_section->flags & SEC_ALLOC) != 0
4936 && (input_section->flags & SEC_READONLY) != 0
4940 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4942 (*_bfd_error_handler)
4943 (_("%B: relocation %s against external symbol `%s' can not be used"
4944 " when making a shared object; recompile with -fPIC"),
4945 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
4946 h->root.root.string);
4947 bfd_set_error (bfd_error_bad_value);
4951 case BFD_RELOC_AARCH64_16:
4953 case BFD_RELOC_AARCH64_32:
4955 case BFD_RELOC_AARCH64_ADD_LO12:
4956 case BFD_RELOC_AARCH64_BRANCH19:
4957 case BFD_RELOC_AARCH64_LDST128_LO12:
4958 case BFD_RELOC_AARCH64_LDST16_LO12:
4959 case BFD_RELOC_AARCH64_LDST32_LO12:
4960 case BFD_RELOC_AARCH64_LDST64_LO12:
4961 case BFD_RELOC_AARCH64_LDST8_LO12:
4962 case BFD_RELOC_AARCH64_MOVW_G0:
4963 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4964 case BFD_RELOC_AARCH64_MOVW_G0_S:
4965 case BFD_RELOC_AARCH64_MOVW_G1:
4966 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4967 case BFD_RELOC_AARCH64_MOVW_G1_S:
4968 case BFD_RELOC_AARCH64_MOVW_G2:
4969 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4970 case BFD_RELOC_AARCH64_MOVW_G2_S:
4971 case BFD_RELOC_AARCH64_MOVW_G3:
4972 case BFD_RELOC_AARCH64_TSTBR14:
4973 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4974 signed_addend, weak_undef_p);
4977 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4978 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4979 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
4980 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4981 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
4982 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4983 if (globals->root.sgot == NULL)
4984 BFD_ASSERT (h != NULL);
4989 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4991 unresolved_reloc_p);
4992 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
4993 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
4994 addend = (globals->root.sgot->output_section->vma
4995 + globals->root.sgot->output_offset);
4996 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4997 addend, weak_undef_p);
5002 struct elf_aarch64_local_symbol *locals
5003 = elf_aarch64_locals (input_bfd);
5007 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
5008 (*_bfd_error_handler)
5009 (_("%B: Local symbol descriptor table be NULL when applying "
5010 "relocation %s against local symbol"),
5011 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
5015 off = symbol_got_offset (input_bfd, h, r_symndx);
5016 base_got = globals->root.sgot;
5017 bfd_vma got_entry_addr = (base_got->output_section->vma
5018 + base_got->output_offset + off);
5020 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5022 bfd_put_64 (output_bfd, value, base_got->contents + off);
5027 Elf_Internal_Rela outrel;
5029 /* For local symbol, we have done absolute relocation in static
5030 linking stageh. While for share library, we need to update
5031 the content of GOT entry according to the share objects
5032 loading base address. So we need to generate a
5033 R_AARCH64_RELATIVE reloc for dynamic linker. */
5034 s = globals->root.srelgot;
5038 outrel.r_offset = got_entry_addr;
5039 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
5040 outrel.r_addend = value;
5041 elf_append_rela (output_bfd, s, &outrel);
5044 symbol_got_offset_mark (input_bfd, h, r_symndx);
5047 /* Update the relocation value to GOT entry addr as we have transformed
5048 the direct data access into indirect data access through GOT. */
5049 value = got_entry_addr;
5051 if (bfd_r_type == BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15
5052 || bfd_r_type == BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14)
5053 addend = base_got->output_section->vma + base_got->output_offset;
5055 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5056 addend, weak_undef_p);
5061 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5062 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5063 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5064 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5065 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5066 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5067 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5068 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5069 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5070 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5071 if (globals->root.sgot == NULL)
5072 return bfd_reloc_notsupported;
5074 value = (symbol_got_offset (input_bfd, h, r_symndx)
5075 + globals->root.sgot->output_section->vma
5076 + globals->root.sgot->output_offset);
5078 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5080 *unresolved_reloc_p = FALSE;
5083 case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
5084 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5085 signed_addend - dtpoff_base (info),
5089 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5090 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5091 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5092 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5093 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5094 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5095 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5096 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5097 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5098 signed_addend - tpoff_base (info),
5100 *unresolved_reloc_p = FALSE;
5103 case BFD_RELOC_AARCH64_TLSDESC_ADD:
5104 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5105 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5106 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5107 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5108 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5109 case BFD_RELOC_AARCH64_TLSDESC_LDR:
5110 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5111 if (globals->root.sgot == NULL)
5112 return bfd_reloc_notsupported;
5113 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
5114 + globals->root.sgotplt->output_section->vma
5115 + globals->root.sgotplt->output_offset
5116 + globals->sgotplt_jump_table_size);
5118 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
5120 *unresolved_reloc_p = FALSE;
5124 return bfd_reloc_notsupported;
5128 *saved_addend = value;
5130 /* Only apply the final relocation in a sequence. */
5132 return bfd_reloc_continue;
5134 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
5138 /* Handle TLS relaxations. Relaxing is possible for symbols that use
5139 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
5142 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
5143 is to then call final_link_relocate. Return other values in the
5146 static bfd_reloc_status_type
5147 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
5148 bfd *input_bfd, bfd_byte *contents,
5149 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
5151 bfd_boolean is_local = h == NULL;
5152 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
5155 BFD_ASSERT (globals && input_bfd && contents && rel);
5157 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5159 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5160 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5163 /* GD->LE relaxation:
5164 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
5166 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
5168 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5169 return bfd_reloc_continue;
5173 /* GD->IE relaxation:
5174 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
5176 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
5178 return bfd_reloc_continue;
5181 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5185 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5188 /* Tiny TLSDESC->LE relaxation:
5189 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
5190 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
5194 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5195 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5197 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5198 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
5199 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5201 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5202 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
5203 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5204 return bfd_reloc_continue;
5208 /* Tiny TLSDESC->IE relaxation:
5209 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
5210 adr x0, :tlsdesc:var => nop
5214 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5215 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5217 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5218 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5220 bfd_putl32 (0x58000000, contents + rel->r_offset);
5221 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
5222 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5223 return bfd_reloc_continue;
5226 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5229 /* Tiny GD->LE relaxation:
5230 adr x0, :tlsgd:var => mrs x1, tpidr_el0
5231 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
5232 nop => add x0, x0, #:tprel_lo12_nc:x
5235 /* First kill the tls_get_addr reloc on the bl instruction. */
5236 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5238 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
5239 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
5240 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
5242 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5243 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
5244 rel[1].r_offset = rel->r_offset + 8;
5246 /* Move the current relocation to the second instruction in
5249 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5250 AARCH64_R (TLSLE_ADD_TPREL_HI12));
5251 return bfd_reloc_continue;
5255 /* Tiny GD->IE relaxation:
5256 adr x0, :tlsgd:var => ldr x0, :gottprel:var
5257 bl __tls_get_addr => mrs x1, tpidr_el0
5258 nop => add x0, x0, x1
5261 /* First kill the tls_get_addr reloc on the bl instruction. */
5262 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5263 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5265 bfd_putl32 (0x58000000, contents + rel->r_offset);
5266 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5267 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5268 return bfd_reloc_continue;
5271 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5272 return bfd_reloc_continue;
5274 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5277 /* GD->LE relaxation:
5278 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
5280 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5281 return bfd_reloc_continue;
5285 /* GD->IE relaxation:
5286 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
5288 insn = bfd_getl32 (contents + rel->r_offset);
5290 bfd_putl32 (insn, contents + rel->r_offset);
5291 return bfd_reloc_continue;
5294 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5297 /* GD->LE relaxation
5298 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
5299 bl __tls_get_addr => mrs x1, tpidr_el0
5300 nop => add x0, x1, x0
5303 /* First kill the tls_get_addr reloc on the bl instruction. */
5304 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5305 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5307 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5308 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5309 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5310 return bfd_reloc_continue;
5314 /* GD->IE relaxation
5315 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
5316 BL __tls_get_addr => mrs x1, tpidr_el0
5318 NOP => add x0, x1, x0
5321 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
5323 /* Remove the relocation on the BL instruction. */
5324 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5326 bfd_putl32 (0xf9400000, contents + rel->r_offset);
5328 /* We choose to fixup the BL and NOP instructions using the
5329 offset from the second relocation to allow flexibility in
5330 scheduling instructions between the ADD and BL. */
5331 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
5332 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
5333 return bfd_reloc_continue;
5336 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5337 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5338 /* GD->IE/LE relaxation:
5339 add x0, x0, #:tlsdesc_lo12:var => nop
5342 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
5343 return bfd_reloc_ok;
5345 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5346 /* IE->LE relaxation:
5347 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
5351 insn = bfd_getl32 (contents + rel->r_offset);
5352 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
5354 return bfd_reloc_continue;
5356 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5357 /* IE->LE relaxation:
5358 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
5362 insn = bfd_getl32 (contents + rel->r_offset);
5363 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
5365 return bfd_reloc_continue;
5368 return bfd_reloc_continue;
5371 return bfd_reloc_ok;
5374 /* Relocate an AArch64 ELF section. */
5377 elfNN_aarch64_relocate_section (bfd *output_bfd,
5378 struct bfd_link_info *info,
5380 asection *input_section,
5382 Elf_Internal_Rela *relocs,
5383 Elf_Internal_Sym *local_syms,
5384 asection **local_sections)
5386 Elf_Internal_Shdr *symtab_hdr;
5387 struct elf_link_hash_entry **sym_hashes;
5388 Elf_Internal_Rela *rel;
5389 Elf_Internal_Rela *relend;
5391 struct elf_aarch64_link_hash_table *globals;
5392 bfd_boolean save_addend = FALSE;
5395 globals = elf_aarch64_hash_table (info);
5397 symtab_hdr = &elf_symtab_hdr (input_bfd);
5398 sym_hashes = elf_sym_hashes (input_bfd);
5401 relend = relocs + input_section->reloc_count;
5402 for (; rel < relend; rel++)
5404 unsigned int r_type;
5405 bfd_reloc_code_real_type bfd_r_type;
5406 bfd_reloc_code_real_type relaxed_bfd_r_type;
5407 reloc_howto_type *howto;
5408 unsigned long r_symndx;
5409 Elf_Internal_Sym *sym;
5411 struct elf_link_hash_entry *h;
5413 bfd_reloc_status_type r;
5416 bfd_boolean unresolved_reloc = FALSE;
5417 char *error_message = NULL;
5419 r_symndx = ELFNN_R_SYM (rel->r_info);
5420 r_type = ELFNN_R_TYPE (rel->r_info);
5422 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
5423 howto = bfd_reloc.howto;
5427 (*_bfd_error_handler)
5428 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
5429 input_bfd, input_section, r_type);
5432 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
5438 if (r_symndx < symtab_hdr->sh_info)
5440 sym = local_syms + r_symndx;
5441 sym_type = ELFNN_ST_TYPE (sym->st_info);
5442 sec = local_sections[r_symndx];
5444 /* An object file might have a reference to a local
5445 undefined symbol. This is a daft object file, but we
5446 should at least do something about it. */
5447 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
5448 && bfd_is_und_section (sec)
5449 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
5451 if (!info->callbacks->undefined_symbol
5452 (info, bfd_elf_string_from_elf_section
5453 (input_bfd, symtab_hdr->sh_link, sym->st_name),
5454 input_bfd, input_section, rel->r_offset, TRUE))
5458 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
5460 /* Relocate against local STT_GNU_IFUNC symbol. */
5461 if (!info->relocatable
5462 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
5464 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
5469 /* Set STT_GNU_IFUNC symbol value. */
5470 h->root.u.def.value = sym->st_value;
5471 h->root.u.def.section = sec;
5476 bfd_boolean warned, ignored;
5478 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
5479 r_symndx, symtab_hdr, sym_hashes,
5481 unresolved_reloc, warned, ignored);
5486 if (sec != NULL && discarded_section (sec))
5487 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
5488 rel, 1, relend, howto, 0, contents);
5490 if (info->relocatable)
5494 name = h->root.root.string;
5497 name = (bfd_elf_string_from_elf_section
5498 (input_bfd, symtab_hdr->sh_link, sym->st_name));
5499 if (name == NULL || *name == '\0')
5500 name = bfd_section_name (input_bfd, sec);
5504 && r_type != R_AARCH64_NONE
5505 && r_type != R_AARCH64_NULL
5507 || h->root.type == bfd_link_hash_defined
5508 || h->root.type == bfd_link_hash_defweak)
5509 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
5511 (*_bfd_error_handler)
5512 ((sym_type == STT_TLS
5513 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
5514 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
5516 input_section, (long) rel->r_offset, howto->name, name);
5519 /* We relax only if we can see that there can be a valid transition
5520 from a reloc type to another.
5521 We call elfNN_aarch64_final_link_relocate unless we're completely
5522 done, i.e., the relaxation produced the final output we want. */
5524 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
5526 if (relaxed_bfd_r_type != bfd_r_type)
5528 bfd_r_type = relaxed_bfd_r_type;
5529 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
5530 BFD_ASSERT (howto != NULL);
5531 r_type = howto->type;
5532 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
5533 unresolved_reloc = 0;
5536 r = bfd_reloc_continue;
5538 /* There may be multiple consecutive relocations for the
5539 same offset. In that case we are supposed to treat the
5540 output of each relocation as the addend for the next. */
5541 if (rel + 1 < relend
5542 && rel->r_offset == rel[1].r_offset
5543 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
5544 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
5547 save_addend = FALSE;
5549 if (r == bfd_reloc_continue)
5550 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
5551 input_section, contents, rel,
5552 relocation, info, sec,
5553 h, &unresolved_reloc,
5554 save_addend, &addend, sym);
5556 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5558 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5559 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5560 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5561 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
5562 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
5563 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
5564 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5566 bfd_boolean need_relocs = FALSE;
5571 off = symbol_got_offset (input_bfd, h, r_symndx);
5572 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5575 (info->shared || indx != 0) &&
5577 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5578 || h->root.type != bfd_link_hash_undefweak);
5580 BFD_ASSERT (globals->root.srelgot != NULL);
5584 Elf_Internal_Rela rela;
5585 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
5587 rela.r_offset = globals->root.sgot->output_section->vma +
5588 globals->root.sgot->output_offset + off;
5591 loc = globals->root.srelgot->contents;
5592 loc += globals->root.srelgot->reloc_count++
5593 * RELOC_SIZE (htab);
5594 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5596 bfd_reloc_code_real_type real_type =
5597 elfNN_aarch64_bfd_reloc_from_type (r_type);
5599 if (real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PREL21
5600 || real_type == BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21
5601 || real_type == BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC)
5603 /* For local dynamic, don't generate DTPREL in any case.
5604 Initialize the DTPREL slot into zero, so we get module
5605 base address when invoke runtime TLS resolver. */
5606 bfd_put_NN (output_bfd, 0,
5607 globals->root.sgot->contents + off
5612 bfd_put_NN (output_bfd,
5613 relocation - dtpoff_base (info),
5614 globals->root.sgot->contents + off
5619 /* This TLS symbol is global. We emit a
5620 relocation to fixup the tls offset at load
5623 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
5626 (globals->root.sgot->output_section->vma
5627 + globals->root.sgot->output_offset + off
5630 loc = globals->root.srelgot->contents;
5631 loc += globals->root.srelgot->reloc_count++
5632 * RELOC_SIZE (globals);
5633 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5634 bfd_put_NN (output_bfd, (bfd_vma) 0,
5635 globals->root.sgot->contents + off
5641 bfd_put_NN (output_bfd, (bfd_vma) 1,
5642 globals->root.sgot->contents + off);
5643 bfd_put_NN (output_bfd,
5644 relocation - dtpoff_base (info),
5645 globals->root.sgot->contents + off
5649 symbol_got_offset_mark (input_bfd, h, r_symndx);
5653 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5654 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5655 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5656 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5658 bfd_boolean need_relocs = FALSE;
5663 off = symbol_got_offset (input_bfd, h, r_symndx);
5665 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5668 (info->shared || indx != 0) &&
5670 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5671 || h->root.type != bfd_link_hash_undefweak);
5673 BFD_ASSERT (globals->root.srelgot != NULL);
5677 Elf_Internal_Rela rela;
5680 rela.r_addend = relocation - dtpoff_base (info);
5684 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5685 rela.r_offset = globals->root.sgot->output_section->vma +
5686 globals->root.sgot->output_offset + off;
5688 loc = globals->root.srelgot->contents;
5689 loc += globals->root.srelgot->reloc_count++
5690 * RELOC_SIZE (htab);
5692 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5694 bfd_put_NN (output_bfd, rela.r_addend,
5695 globals->root.sgot->contents + off);
5698 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5699 globals->root.sgot->contents + off);
5701 symbol_got_offset_mark (input_bfd, h, r_symndx);
5705 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5706 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5707 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5708 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5709 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5710 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5711 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5712 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5715 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5716 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5717 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5718 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5719 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5720 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5722 bfd_boolean need_relocs = FALSE;
5723 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5724 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5726 need_relocs = (h == NULL
5727 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5728 || h->root.type != bfd_link_hash_undefweak);
5730 BFD_ASSERT (globals->root.srelgot != NULL);
5731 BFD_ASSERT (globals->root.sgot != NULL);
5736 Elf_Internal_Rela rela;
5737 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5740 rela.r_offset = (globals->root.sgotplt->output_section->vma
5741 + globals->root.sgotplt->output_offset
5742 + off + globals->sgotplt_jump_table_size);
5745 rela.r_addend = relocation - dtpoff_base (info);
5747 /* Allocate the next available slot in the PLT reloc
5748 section to hold our R_AARCH64_TLSDESC, the next
5749 available slot is determined from reloc_count,
5750 which we step. But note, reloc_count was
5751 artifically moved down while allocating slots for
5752 real PLT relocs such that all of the PLT relocs
5753 will fit above the initial reloc_count and the
5754 extra stuff will fit below. */
5755 loc = globals->root.srelplt->contents;
5756 loc += globals->root.srelplt->reloc_count++
5757 * RELOC_SIZE (globals);
5759 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5761 bfd_put_NN (output_bfd, (bfd_vma) 0,
5762 globals->root.sgotplt->contents + off +
5763 globals->sgotplt_jump_table_size);
5764 bfd_put_NN (output_bfd, (bfd_vma) 0,
5765 globals->root.sgotplt->contents + off +
5766 globals->sgotplt_jump_table_size +
5770 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5781 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5782 because such sections are not SEC_ALLOC and thus ld.so will
5783 not process them. */
5784 if (unresolved_reloc
5785 && !((input_section->flags & SEC_DEBUGGING) != 0
5787 && _bfd_elf_section_offset (output_bfd, info, input_section,
5788 +rel->r_offset) != (bfd_vma) - 1)
5790 (*_bfd_error_handler)
5792 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5793 input_bfd, input_section, (long) rel->r_offset, howto->name,
5794 h->root.root.string);
5798 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5802 case bfd_reloc_overflow:
5803 if (!(*info->callbacks->reloc_overflow)
5804 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
5805 input_bfd, input_section, rel->r_offset))
5809 case bfd_reloc_undefined:
5810 if (!((*info->callbacks->undefined_symbol)
5811 (info, name, input_bfd, input_section,
5812 rel->r_offset, TRUE)))
5816 case bfd_reloc_outofrange:
5817 error_message = _("out of range");
5820 case bfd_reloc_notsupported:
5821 error_message = _("unsupported relocation");
5824 case bfd_reloc_dangerous:
5825 /* error_message should already be set. */
5829 error_message = _("unknown error");
5833 BFD_ASSERT (error_message != NULL);
5834 if (!((*info->callbacks->reloc_dangerous)
5835 (info, error_message, input_bfd, input_section,
5846 /* Set the right machine number. */
5849 elfNN_aarch64_object_p (bfd *abfd)
5852 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5854 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5859 /* Function to keep AArch64 specific flags in the ELF header. */
5862 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5864 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5869 elf_elfheader (abfd)->e_flags = flags;
5870 elf_flags_init (abfd) = TRUE;
5876 /* Merge backend specific data from an object file to the output
5877 object file when linking. */
5880 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5884 bfd_boolean flags_compatible = TRUE;
5887 /* Check if we have the same endianess. */
5888 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5891 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5894 /* The input BFD must have had its flags initialised. */
5895 /* The following seems bogus to me -- The flags are initialized in
5896 the assembler but I don't think an elf_flags_init field is
5897 written into the object. */
5898 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5900 in_flags = elf_elfheader (ibfd)->e_flags;
5901 out_flags = elf_elfheader (obfd)->e_flags;
5903 if (!elf_flags_init (obfd))
5905 /* If the input is the default architecture and had the default
5906 flags then do not bother setting the flags for the output
5907 architecture, instead allow future merges to do this. If no
5908 future merges ever set these flags then they will retain their
5909 uninitialised values, which surprise surprise, correspond
5910 to the default values. */
5911 if (bfd_get_arch_info (ibfd)->the_default
5912 && elf_elfheader (ibfd)->e_flags == 0)
5915 elf_flags_init (obfd) = TRUE;
5916 elf_elfheader (obfd)->e_flags = in_flags;
5918 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5919 && bfd_get_arch_info (obfd)->the_default)
5920 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5921 bfd_get_mach (ibfd));
5926 /* Identical flags must be compatible. */
5927 if (in_flags == out_flags)
5930 /* Check to see if the input BFD actually contains any sections. If
5931 not, its flags may not have been initialised either, but it
5932 cannot actually cause any incompatiblity. Do not short-circuit
5933 dynamic objects; their section list may be emptied by
5934 elf_link_add_object_symbols.
5936 Also check to see if there are no code sections in the input.
5937 In this case there is no need to check for code specific flags.
5938 XXX - do we need to worry about floating-point format compatability
5939 in data sections ? */
5940 if (!(ibfd->flags & DYNAMIC))
5942 bfd_boolean null_input_bfd = TRUE;
5943 bfd_boolean only_data_sections = TRUE;
5945 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5947 if ((bfd_get_section_flags (ibfd, sec)
5948 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5949 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5950 only_data_sections = FALSE;
5952 null_input_bfd = FALSE;
5956 if (null_input_bfd || only_data_sections)
5960 return flags_compatible;
5963 /* Display the flags field. */
5966 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5968 FILE *file = (FILE *) ptr;
5969 unsigned long flags;
5971 BFD_ASSERT (abfd != NULL && ptr != NULL);
5973 /* Print normal ELF private data. */
5974 _bfd_elf_print_private_bfd_data (abfd, ptr);
5976 flags = elf_elfheader (abfd)->e_flags;
5977 /* Ignore init flag - it may not be set, despite the flags field
5978 containing valid data. */
5980 /* xgettext:c-format */
5981 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5984 fprintf (file, _("<Unrecognised flag bits set>"));
5991 /* Update the got entry reference counts for the section being removed. */
5994 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
5995 struct bfd_link_info *info,
5997 const Elf_Internal_Rela * relocs)
5999 struct elf_aarch64_link_hash_table *htab;
6000 Elf_Internal_Shdr *symtab_hdr;
6001 struct elf_link_hash_entry **sym_hashes;
6002 struct elf_aarch64_local_symbol *locals;
6003 const Elf_Internal_Rela *rel, *relend;
6005 if (info->relocatable)
6008 htab = elf_aarch64_hash_table (info);
6013 elf_section_data (sec)->local_dynrel = NULL;
6015 symtab_hdr = &elf_symtab_hdr (abfd);
6016 sym_hashes = elf_sym_hashes (abfd);
6018 locals = elf_aarch64_locals (abfd);
6020 relend = relocs + sec->reloc_count;
6021 for (rel = relocs; rel < relend; rel++)
6023 unsigned long r_symndx;
6024 unsigned int r_type;
6025 struct elf_link_hash_entry *h = NULL;
6027 r_symndx = ELFNN_R_SYM (rel->r_info);
6029 if (r_symndx >= symtab_hdr->sh_info)
6032 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6033 while (h->root.type == bfd_link_hash_indirect
6034 || h->root.type == bfd_link_hash_warning)
6035 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6039 Elf_Internal_Sym *isym;
6041 /* A local symbol. */
6042 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6045 /* Check relocation against local STT_GNU_IFUNC symbol. */
6047 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6049 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
6057 struct elf_aarch64_link_hash_entry *eh;
6058 struct elf_dyn_relocs **pp;
6059 struct elf_dyn_relocs *p;
6061 eh = (struct elf_aarch64_link_hash_entry *) h;
6063 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
6066 /* Everything must go for SEC. */
6072 r_type = ELFNN_R_TYPE (rel->r_info);
6073 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
6075 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6076 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6077 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6078 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6079 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6080 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6081 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6082 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6083 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6084 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6085 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6086 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6087 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6088 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6089 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6090 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6091 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6092 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6093 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6094 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6095 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6096 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6097 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6098 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6099 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6100 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6101 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6102 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6103 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6104 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6107 if (h->got.refcount > 0)
6108 h->got.refcount -= 1;
6110 if (h->type == STT_GNU_IFUNC)
6112 if (h->plt.refcount > 0)
6113 h->plt.refcount -= 1;
6116 else if (locals != NULL)
6118 if (locals[r_symndx].got_refcount > 0)
6119 locals[r_symndx].got_refcount -= 1;
6123 case BFD_RELOC_AARCH64_CALL26:
6124 case BFD_RELOC_AARCH64_JUMP26:
6125 /* If this is a local symbol then we resolve it
6126 directly without creating a PLT entry. */
6130 if (h->plt.refcount > 0)
6131 h->plt.refcount -= 1;
6134 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6135 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6136 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6137 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6138 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6139 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6140 case BFD_RELOC_AARCH64_MOVW_G3:
6141 case BFD_RELOC_AARCH64_NN:
6142 if (h != NULL && info->executable)
6144 if (h->plt.refcount > 0)
6145 h->plt.refcount -= 1;
6157 /* Adjust a symbol defined by a dynamic object and referenced by a
6158 regular object. The current definition is in some section of the
6159 dynamic object, but we're not including those sections. We have to
6160 change the definition to something the rest of the link can
6164 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
6165 struct elf_link_hash_entry *h)
6167 struct elf_aarch64_link_hash_table *htab;
6170 /* If this is a function, put it in the procedure linkage table. We
6171 will fill in the contents of the procedure linkage table later,
6172 when we know the address of the .got section. */
6173 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
6175 if (h->plt.refcount <= 0
6176 || (h->type != STT_GNU_IFUNC
6177 && (SYMBOL_CALLS_LOCAL (info, h)
6178 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
6179 && h->root.type == bfd_link_hash_undefweak))))
6181 /* This case can occur if we saw a CALL26 reloc in
6182 an input file, but the symbol wasn't referred to
6183 by a dynamic object or all references were
6184 garbage collected. In which case we can end up
6186 h->plt.offset = (bfd_vma) - 1;
6193 /* Otherwise, reset to -1. */
6194 h->plt.offset = (bfd_vma) - 1;
6197 /* If this is a weak symbol, and there is a real definition, the
6198 processor independent code will have arranged for us to see the
6199 real definition first, and we can just use the same value. */
6200 if (h->u.weakdef != NULL)
6202 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
6203 || h->u.weakdef->root.type == bfd_link_hash_defweak);
6204 h->root.u.def.section = h->u.weakdef->root.u.def.section;
6205 h->root.u.def.value = h->u.weakdef->root.u.def.value;
6206 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
6207 h->non_got_ref = h->u.weakdef->non_got_ref;
6211 /* If we are creating a shared library, we must presume that the
6212 only references to the symbol are via the global offset table.
6213 For such cases we need not do anything here; the relocations will
6214 be handled correctly by relocate_section. */
6218 /* If there are no references to this symbol that do not use the
6219 GOT, we don't need to generate a copy reloc. */
6220 if (!h->non_got_ref)
6223 /* If -z nocopyreloc was given, we won't generate them either. */
6224 if (info->nocopyreloc)
6230 /* We must allocate the symbol in our .dynbss section, which will
6231 become part of the .bss section of the executable. There will be
6232 an entry for this symbol in the .dynsym section. The dynamic
6233 object will contain position independent code, so all references
6234 from the dynamic object to this symbol will go through the global
6235 offset table. The dynamic linker will use the .dynsym entry to
6236 determine the address it must put in the global offset table, so
6237 both the dynamic object and the regular object will refer to the
6238 same memory location for the variable. */
6240 htab = elf_aarch64_hash_table (info);
6242 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
6243 to copy the initial value out of the dynamic object and into the
6244 runtime process image. */
6245 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
6247 htab->srelbss->size += RELOC_SIZE (htab);
6253 return _bfd_elf_adjust_dynamic_copy (info, h, s);
6258 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
6260 struct elf_aarch64_local_symbol *locals;
6261 locals = elf_aarch64_locals (abfd);
6264 locals = (struct elf_aarch64_local_symbol *)
6265 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
6268 elf_aarch64_locals (abfd) = locals;
6273 /* Create the .got section to hold the global offset table. */
6276 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
6278 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6281 struct elf_link_hash_entry *h;
6282 struct elf_link_hash_table *htab = elf_hash_table (info);
6284 /* This function may be called more than once. */
6285 s = bfd_get_linker_section (abfd, ".got");
6289 flags = bed->dynamic_sec_flags;
6291 s = bfd_make_section_anyway_with_flags (abfd,
6292 (bed->rela_plts_and_copies_p
6293 ? ".rela.got" : ".rel.got"),
6294 (bed->dynamic_sec_flags
6297 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6301 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
6303 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6306 htab->sgot->size += GOT_ENTRY_SIZE;
6308 if (bed->want_got_sym)
6310 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
6311 (or .got.plt) section. We don't do this in the linker script
6312 because we don't want to define the symbol if we are not creating
6313 a global offset table. */
6314 h = _bfd_elf_define_linkage_sym (abfd, info, s,
6315 "_GLOBAL_OFFSET_TABLE_");
6316 elf_hash_table (info)->hgot = h;
6321 if (bed->want_got_plt)
6323 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
6325 || !bfd_set_section_alignment (abfd, s,
6326 bed->s->log_file_align))
6331 /* The first bit of the global offset table is the header. */
6332 s->size += bed->got_header_size;
6337 /* Look through the relocs for a section during the first phase. */
6340 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
6341 asection *sec, const Elf_Internal_Rela *relocs)
6343 Elf_Internal_Shdr *symtab_hdr;
6344 struct elf_link_hash_entry **sym_hashes;
6345 const Elf_Internal_Rela *rel;
6346 const Elf_Internal_Rela *rel_end;
6349 struct elf_aarch64_link_hash_table *htab;
6351 if (info->relocatable)
6354 BFD_ASSERT (is_aarch64_elf (abfd));
6356 htab = elf_aarch64_hash_table (info);
6359 symtab_hdr = &elf_symtab_hdr (abfd);
6360 sym_hashes = elf_sym_hashes (abfd);
6362 rel_end = relocs + sec->reloc_count;
6363 for (rel = relocs; rel < rel_end; rel++)
6365 struct elf_link_hash_entry *h;
6366 unsigned long r_symndx;
6367 unsigned int r_type;
6368 bfd_reloc_code_real_type bfd_r_type;
6369 Elf_Internal_Sym *isym;
6371 r_symndx = ELFNN_R_SYM (rel->r_info);
6372 r_type = ELFNN_R_TYPE (rel->r_info);
6374 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
6376 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
6381 if (r_symndx < symtab_hdr->sh_info)
6383 /* A local symbol. */
6384 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6389 /* Check relocation against local STT_GNU_IFUNC symbol. */
6390 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6392 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
6397 /* Fake a STT_GNU_IFUNC symbol. */
6398 h->type = STT_GNU_IFUNC;
6401 h->forced_local = 1;
6402 h->root.type = bfd_link_hash_defined;
6409 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6410 while (h->root.type == bfd_link_hash_indirect
6411 || h->root.type == bfd_link_hash_warning)
6412 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6414 /* PR15323, ref flags aren't set for references in the same
6416 h->root.non_ir_ref = 1;
6419 /* Could be done earlier, if h were already available. */
6420 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
6424 /* Create the ifunc sections for static executables. If we
6425 never see an indirect function symbol nor we are building
6426 a static executable, those sections will be empty and
6427 won't appear in output. */
6433 case BFD_RELOC_AARCH64_ADD_LO12:
6434 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6435 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6436 case BFD_RELOC_AARCH64_CALL26:
6437 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6438 case BFD_RELOC_AARCH64_JUMP26:
6439 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6440 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6441 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6442 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6443 case BFD_RELOC_AARCH64_NN:
6444 if (htab->root.dynobj == NULL)
6445 htab->root.dynobj = abfd;
6446 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
6451 /* It is referenced by a non-shared object. */
6453 h->root.non_ir_ref = 1;
6458 case BFD_RELOC_AARCH64_NN:
6460 /* We don't need to handle relocs into sections not going into
6461 the "real" output. */
6462 if ((sec->flags & SEC_ALLOC) == 0)
6470 h->plt.refcount += 1;
6471 h->pointer_equality_needed = 1;
6474 /* No need to do anything if we're not creating a shared
6480 struct elf_dyn_relocs *p;
6481 struct elf_dyn_relocs **head;
6483 /* We must copy these reloc types into the output file.
6484 Create a reloc section in dynobj and make room for
6488 if (htab->root.dynobj == NULL)
6489 htab->root.dynobj = abfd;
6491 sreloc = _bfd_elf_make_dynamic_reloc_section
6492 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
6498 /* If this is a global symbol, we count the number of
6499 relocations we need for this symbol. */
6502 struct elf_aarch64_link_hash_entry *eh;
6503 eh = (struct elf_aarch64_link_hash_entry *) h;
6504 head = &eh->dyn_relocs;
6508 /* Track dynamic relocs needed for local syms too.
6509 We really need local syms available to do this
6515 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6520 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
6524 /* Beware of type punned pointers vs strict aliasing
6526 vpp = &(elf_section_data (s)->local_dynrel);
6527 head = (struct elf_dyn_relocs **) vpp;
6531 if (p == NULL || p->sec != sec)
6533 bfd_size_type amt = sizeof *p;
6534 p = ((struct elf_dyn_relocs *)
6535 bfd_zalloc (htab->root.dynobj, amt));
6548 /* RR: We probably want to keep a consistency check that
6549 there are no dangling GOT_PAGE relocs. */
6550 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6551 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6552 case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
6553 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6554 case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
6555 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6556 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6557 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6558 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6559 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6560 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6561 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6562 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6563 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6564 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6565 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6566 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6567 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6568 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6569 case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
6570 case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
6571 case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
6572 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6573 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6574 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6575 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6576 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6577 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6578 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6579 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6582 unsigned old_got_type;
6584 got_type = aarch64_reloc_got_type (bfd_r_type);
6588 h->got.refcount += 1;
6589 old_got_type = elf_aarch64_hash_entry (h)->got_type;
6593 struct elf_aarch64_local_symbol *locals;
6595 if (!elfNN_aarch64_allocate_local_symbols
6596 (abfd, symtab_hdr->sh_info))
6599 locals = elf_aarch64_locals (abfd);
6600 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6601 locals[r_symndx].got_refcount += 1;
6602 old_got_type = locals[r_symndx].got_type;
6605 /* If a variable is accessed with both general dynamic TLS
6606 methods, two slots may be created. */
6607 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
6608 got_type |= old_got_type;
6610 /* We will already have issued an error message if there
6611 is a TLS/non-TLS mismatch, based on the symbol type.
6612 So just combine any TLS types needed. */
6613 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
6614 && got_type != GOT_NORMAL)
6615 got_type |= old_got_type;
6617 /* If the symbol is accessed by both IE and GD methods, we
6618 are able to relax. Turn off the GD flag, without
6619 messing up with any other kind of TLS types that may be
6621 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
6622 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
6624 if (old_got_type != got_type)
6627 elf_aarch64_hash_entry (h)->got_type = got_type;
6630 struct elf_aarch64_local_symbol *locals;
6631 locals = elf_aarch64_locals (abfd);
6632 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6633 locals[r_symndx].got_type = got_type;
6637 if (htab->root.dynobj == NULL)
6638 htab->root.dynobj = abfd;
6639 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
6644 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6645 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6646 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6647 case BFD_RELOC_AARCH64_MOVW_G3:
6650 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6651 (*_bfd_error_handler)
6652 (_("%B: relocation %s against `%s' can not be used when making "
6653 "a shared object; recompile with -fPIC"),
6654 abfd, elfNN_aarch64_howto_table[howto_index].name,
6655 (h) ? h->root.root.string : "a local symbol");
6656 bfd_set_error (bfd_error_bad_value);
6660 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6661 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6662 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6663 if (h != NULL && info->executable)
6665 /* If this reloc is in a read-only section, we might
6666 need a copy reloc. We can't check reliably at this
6667 stage whether the section is read-only, as input
6668 sections have not yet been mapped to output sections.
6669 Tentatively set the flag for now, and correct in
6670 adjust_dynamic_symbol. */
6672 h->plt.refcount += 1;
6673 h->pointer_equality_needed = 1;
6675 /* FIXME:: RR need to handle these in shared libraries
6676 and essentially bomb out as these being non-PIC
6677 relocations in shared libraries. */
6680 case BFD_RELOC_AARCH64_CALL26:
6681 case BFD_RELOC_AARCH64_JUMP26:
6682 /* If this is a local symbol then we resolve it
6683 directly without creating a PLT entry. */
6688 if (h->plt.refcount <= 0)
6689 h->plt.refcount = 1;
6691 h->plt.refcount += 1;
6702 /* Treat mapping symbols as special target symbols. */
6705 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6708 return bfd_is_aarch64_special_symbol_name (sym->name,
6709 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6712 /* This is a copy of elf_find_function () from elf.c except that
6713 AArch64 mapping symbols are ignored when looking for function names. */
6716 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6720 const char **filename_ptr,
6721 const char **functionname_ptr)
6723 const char *filename = NULL;
6724 asymbol *func = NULL;
6725 bfd_vma low_func = 0;
6728 for (p = symbols; *p != NULL; p++)
6732 q = (elf_symbol_type *) * p;
6734 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6739 filename = bfd_asymbol_name (&q->symbol);
6743 /* Skip mapping symbols. */
6744 if ((q->symbol.flags & BSF_LOCAL)
6745 && (bfd_is_aarch64_special_symbol_name
6746 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6749 if (bfd_get_section (&q->symbol) == section
6750 && q->symbol.value >= low_func && q->symbol.value <= offset)
6752 func = (asymbol *) q;
6753 low_func = q->symbol.value;
6763 *filename_ptr = filename;
6764 if (functionname_ptr)
6765 *functionname_ptr = bfd_asymbol_name (func);
6771 /* Find the nearest line to a particular section and offset, for error
6772 reporting. This code is a duplicate of the code in elf.c, except
6773 that it uses aarch64_elf_find_function. */
6776 elfNN_aarch64_find_nearest_line (bfd *abfd,
6780 const char **filename_ptr,
6781 const char **functionname_ptr,
6782 unsigned int *line_ptr,
6783 unsigned int *discriminator_ptr)
6785 bfd_boolean found = FALSE;
6787 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6788 filename_ptr, functionname_ptr,
6789 line_ptr, discriminator_ptr,
6790 dwarf_debug_sections, 0,
6791 &elf_tdata (abfd)->dwarf2_find_line_info))
6793 if (!*functionname_ptr)
6794 aarch64_elf_find_function (abfd, symbols, section, offset,
6795 *filename_ptr ? NULL : filename_ptr,
6801 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6802 toolchain uses DWARF1. */
6804 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6805 &found, filename_ptr,
6806 functionname_ptr, line_ptr,
6807 &elf_tdata (abfd)->line_info))
6810 if (found && (*functionname_ptr || *line_ptr))
6813 if (symbols == NULL)
6816 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6817 filename_ptr, functionname_ptr))
6825 elfNN_aarch64_find_inliner_info (bfd *abfd,
6826 const char **filename_ptr,
6827 const char **functionname_ptr,
6828 unsigned int *line_ptr)
6831 found = _bfd_dwarf2_find_inliner_info
6832 (abfd, filename_ptr,
6833 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6839 elfNN_aarch64_post_process_headers (bfd *abfd,
6840 struct bfd_link_info *link_info)
6842 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6844 i_ehdrp = elf_elfheader (abfd);
6845 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6847 _bfd_elf_post_process_headers (abfd, link_info);
6850 static enum elf_reloc_type_class
6851 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6852 const asection *rel_sec ATTRIBUTE_UNUSED,
6853 const Elf_Internal_Rela *rela)
6855 switch ((int) ELFNN_R_TYPE (rela->r_info))
6857 case AARCH64_R (RELATIVE):
6858 return reloc_class_relative;
6859 case AARCH64_R (JUMP_SLOT):
6860 return reloc_class_plt;
6861 case AARCH64_R (COPY):
6862 return reloc_class_copy;
6864 return reloc_class_normal;
6868 /* Handle an AArch64 specific section when reading an object file. This is
6869 called when bfd_section_from_shdr finds a section with an unknown
6873 elfNN_aarch64_section_from_shdr (bfd *abfd,
6874 Elf_Internal_Shdr *hdr,
6875 const char *name, int shindex)
6877 /* There ought to be a place to keep ELF backend specific flags, but
6878 at the moment there isn't one. We just keep track of the
6879 sections by their name, instead. Fortunately, the ABI gives
6880 names for all the AArch64 specific sections, so we will probably get
6882 switch (hdr->sh_type)
6884 case SHT_AARCH64_ATTRIBUTES:
6891 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6897 /* A structure used to record a list of sections, independently
6898 of the next and prev fields in the asection structure. */
6899 typedef struct section_list
6902 struct section_list *next;
6903 struct section_list *prev;
6907 /* Unfortunately we need to keep a list of sections for which
6908 an _aarch64_elf_section_data structure has been allocated. This
6909 is because it is possible for functions like elfNN_aarch64_write_section
6910 to be called on a section which has had an elf_data_structure
6911 allocated for it (and so the used_by_bfd field is valid) but
6912 for which the AArch64 extended version of this structure - the
6913 _aarch64_elf_section_data structure - has not been allocated. */
6914 static section_list *sections_with_aarch64_elf_section_data = NULL;
6917 record_section_with_aarch64_elf_section_data (asection *sec)
6919 struct section_list *entry;
6921 entry = bfd_malloc (sizeof (*entry));
6925 entry->next = sections_with_aarch64_elf_section_data;
6927 if (entry->next != NULL)
6928 entry->next->prev = entry;
6929 sections_with_aarch64_elf_section_data = entry;
6932 static struct section_list *
6933 find_aarch64_elf_section_entry (asection *sec)
6935 struct section_list *entry;
6936 static struct section_list *last_entry = NULL;
6938 /* This is a short cut for the typical case where the sections are added
6939 to the sections_with_aarch64_elf_section_data list in forward order and
6940 then looked up here in backwards order. This makes a real difference
6941 to the ld-srec/sec64k.exp linker test. */
6942 entry = sections_with_aarch64_elf_section_data;
6943 if (last_entry != NULL)
6945 if (last_entry->sec == sec)
6947 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6948 entry = last_entry->next;
6951 for (; entry; entry = entry->next)
6952 if (entry->sec == sec)
6956 /* Record the entry prior to this one - it is the entry we are
6957 most likely to want to locate next time. Also this way if we
6958 have been called from
6959 unrecord_section_with_aarch64_elf_section_data () we will not
6960 be caching a pointer that is about to be freed. */
6961 last_entry = entry->prev;
6967 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6969 struct section_list *entry;
6971 entry = find_aarch64_elf_section_entry (sec);
6975 if (entry->prev != NULL)
6976 entry->prev->next = entry->next;
6977 if (entry->next != NULL)
6978 entry->next->prev = entry->prev;
6979 if (entry == sections_with_aarch64_elf_section_data)
6980 sections_with_aarch64_elf_section_data = entry->next;
6989 struct bfd_link_info *info;
6992 int (*func) (void *, const char *, Elf_Internal_Sym *,
6993 asection *, struct elf_link_hash_entry *);
6994 } output_arch_syminfo;
6996 enum map_symbol_type
7003 /* Output a single mapping symbol. */
7006 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
7007 enum map_symbol_type type, bfd_vma offset)
7009 static const char *names[2] = { "$x", "$d" };
7010 Elf_Internal_Sym sym;
7012 sym.st_value = (osi->sec->output_section->vma
7013 + osi->sec->output_offset + offset);
7016 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7017 sym.st_shndx = osi->sec_shndx;
7018 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
7023 /* Output mapping symbols for PLT entries associated with H. */
7026 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
7028 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
7031 if (h->root.type == bfd_link_hash_indirect)
7034 if (h->root.type == bfd_link_hash_warning)
7035 /* When warning symbols are created, they **replace** the "real"
7036 entry in the hash table, thus we never get to see the real
7037 symbol in a hash traversal. So look at it now. */
7038 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7040 if (h->plt.offset == (bfd_vma) - 1)
7043 addr = h->plt.offset;
7046 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7053 /* Output a single local symbol for a generated stub. */
7056 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
7057 bfd_vma offset, bfd_vma size)
7059 Elf_Internal_Sym sym;
7061 sym.st_value = (osi->sec->output_section->vma
7062 + osi->sec->output_offset + offset);
7065 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7066 sym.st_shndx = osi->sec_shndx;
7067 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
7071 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
7073 struct elf_aarch64_stub_hash_entry *stub_entry;
7077 output_arch_syminfo *osi;
7079 /* Massage our args to the form they really have. */
7080 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
7081 osi = (output_arch_syminfo *) in_arg;
7083 stub_sec = stub_entry->stub_sec;
7085 /* Ensure this stub is attached to the current section being
7087 if (stub_sec != osi->sec)
7090 addr = (bfd_vma) stub_entry->stub_offset;
7092 stub_name = stub_entry->output_name;
7094 switch (stub_entry->stub_type)
7096 case aarch64_stub_adrp_branch:
7097 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7098 sizeof (aarch64_adrp_branch_stub)))
7100 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7103 case aarch64_stub_long_branch:
7104 if (!elfNN_aarch64_output_stub_sym
7105 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
7107 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7109 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
7112 case aarch64_stub_erratum_835769_veneer:
7113 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7114 sizeof (aarch64_erratum_835769_stub)))
7116 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7119 case aarch64_stub_erratum_843419_veneer:
7120 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
7121 sizeof (aarch64_erratum_843419_stub)))
7123 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
7134 /* Output mapping symbols for linker generated sections. */
7137 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
7138 struct bfd_link_info *info,
7140 int (*func) (void *, const char *,
7143 struct elf_link_hash_entry
7146 output_arch_syminfo osi;
7147 struct elf_aarch64_link_hash_table *htab;
7149 htab = elf_aarch64_hash_table (info);
7155 /* Long calls stubs. */
7156 if (htab->stub_bfd && htab->stub_bfd->sections)
7160 for (stub_sec = htab->stub_bfd->sections;
7161 stub_sec != NULL; stub_sec = stub_sec->next)
7163 /* Ignore non-stub sections. */
7164 if (!strstr (stub_sec->name, STUB_SUFFIX))
7169 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7170 (output_bfd, osi.sec->output_section);
7172 /* The first instruction in a stub is always a branch. */
7173 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
7176 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
7181 /* Finally, output mapping symbols for the PLT. */
7182 if (!htab->root.splt || htab->root.splt->size == 0)
7185 /* For now live without mapping symbols for the plt. */
7186 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7187 (output_bfd, htab->root.splt->output_section);
7188 osi.sec = htab->root.splt;
7190 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
7197 /* Allocate target specific section data. */
7200 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
7202 if (!sec->used_by_bfd)
7204 _aarch64_elf_section_data *sdata;
7205 bfd_size_type amt = sizeof (*sdata);
7207 sdata = bfd_zalloc (abfd, amt);
7210 sec->used_by_bfd = sdata;
7213 record_section_with_aarch64_elf_section_data (sec);
7215 return _bfd_elf_new_section_hook (abfd, sec);
7220 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
7222 void *ignore ATTRIBUTE_UNUSED)
7224 unrecord_section_with_aarch64_elf_section_data (sec);
7228 elfNN_aarch64_close_and_cleanup (bfd *abfd)
7231 bfd_map_over_sections (abfd,
7232 unrecord_section_via_map_over_sections, NULL);
7234 return _bfd_elf_close_and_cleanup (abfd);
7238 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
7241 bfd_map_over_sections (abfd,
7242 unrecord_section_via_map_over_sections, NULL);
7244 return _bfd_free_cached_info (abfd);
7247 /* Create dynamic sections. This is different from the ARM backend in that
7248 the got, plt, gotplt and their relocation sections are all created in the
7249 standard part of the bfd elf backend. */
7252 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
7253 struct bfd_link_info *info)
7255 struct elf_aarch64_link_hash_table *htab;
7257 /* We need to create .got section. */
7258 if (!aarch64_elf_create_got_section (dynobj, info))
7261 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
7264 htab = elf_aarch64_hash_table (info);
7265 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
7267 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
7269 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
7276 /* Allocate space in .plt, .got and associated reloc sections for
7280 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
7282 struct bfd_link_info *info;
7283 struct elf_aarch64_link_hash_table *htab;
7284 struct elf_aarch64_link_hash_entry *eh;
7285 struct elf_dyn_relocs *p;
7287 /* An example of a bfd_link_hash_indirect symbol is versioned
7288 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7289 -> __gxx_personality_v0(bfd_link_hash_defined)
7291 There is no need to process bfd_link_hash_indirect symbols here
7292 because we will also be presented with the concrete instance of
7293 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7294 called to copy all relevant data from the generic to the concrete
7297 if (h->root.type == bfd_link_hash_indirect)
7300 if (h->root.type == bfd_link_hash_warning)
7301 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7303 info = (struct bfd_link_info *) inf;
7304 htab = elf_aarch64_hash_table (info);
7306 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7307 here if it is defined and referenced in a non-shared object. */
7308 if (h->type == STT_GNU_IFUNC
7311 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
7313 /* Make sure this symbol is output as a dynamic symbol.
7314 Undefined weak syms won't yet be marked as dynamic. */
7315 if (h->dynindx == -1 && !h->forced_local)
7317 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7321 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
7323 asection *s = htab->root.splt;
7325 /* If this is the first .plt entry, make room for the special
7328 s->size += htab->plt_header_size;
7330 h->plt.offset = s->size;
7332 /* If this symbol is not defined in a regular file, and we are
7333 not generating a shared library, then set the symbol to this
7334 location in the .plt. This is required to make function
7335 pointers compare as equal between the normal executable and
7336 the shared library. */
7337 if (!info->shared && !h->def_regular)
7339 h->root.u.def.section = s;
7340 h->root.u.def.value = h->plt.offset;
7343 /* Make room for this entry. For now we only create the
7344 small model PLT entries. We later need to find a way
7345 of relaxing into these from the large model PLT entries. */
7346 s->size += PLT_SMALL_ENTRY_SIZE;
7348 /* We also need to make an entry in the .got.plt section, which
7349 will be placed in the .got section by the linker script. */
7350 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
7352 /* We also need to make an entry in the .rela.plt section. */
7353 htab->root.srelplt->size += RELOC_SIZE (htab);
7355 /* We need to ensure that all GOT entries that serve the PLT
7356 are consecutive with the special GOT slots [0] [1] and
7357 [2]. Any addtional relocations, such as
7358 R_AARCH64_TLSDESC, must be placed after the PLT related
7359 entries. We abuse the reloc_count such that during
7360 sizing we adjust reloc_count to indicate the number of
7361 PLT related reserved entries. In subsequent phases when
7362 filling in the contents of the reloc entries, PLT related
7363 entries are placed by computing their PLT index (0
7364 .. reloc_count). While other none PLT relocs are placed
7365 at the slot indicated by reloc_count and reloc_count is
7368 htab->root.srelplt->reloc_count++;
7372 h->plt.offset = (bfd_vma) - 1;
7378 h->plt.offset = (bfd_vma) - 1;
7382 eh = (struct elf_aarch64_link_hash_entry *) h;
7383 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7385 if (h->got.refcount > 0)
7388 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
7390 h->got.offset = (bfd_vma) - 1;
7392 dyn = htab->root.dynamic_sections_created;
7394 /* Make sure this symbol is output as a dynamic symbol.
7395 Undefined weak syms won't yet be marked as dynamic. */
7396 if (dyn && h->dynindx == -1 && !h->forced_local)
7398 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7402 if (got_type == GOT_UNKNOWN)
7405 else if (got_type == GOT_NORMAL)
7407 h->got.offset = htab->root.sgot->size;
7408 htab->root.sgot->size += GOT_ENTRY_SIZE;
7409 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7410 || h->root.type != bfd_link_hash_undefweak)
7412 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7414 htab->root.srelgot->size += RELOC_SIZE (htab);
7420 if (got_type & GOT_TLSDESC_GD)
7422 eh->tlsdesc_got_jump_table_offset =
7423 (htab->root.sgotplt->size
7424 - aarch64_compute_jump_table_size (htab));
7425 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7426 h->got.offset = (bfd_vma) - 2;
7429 if (got_type & GOT_TLS_GD)
7431 h->got.offset = htab->root.sgot->size;
7432 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7435 if (got_type & GOT_TLS_IE)
7437 h->got.offset = htab->root.sgot->size;
7438 htab->root.sgot->size += GOT_ENTRY_SIZE;
7441 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7442 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7443 || h->root.type != bfd_link_hash_undefweak)
7446 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7448 if (got_type & GOT_TLSDESC_GD)
7450 htab->root.srelplt->size += RELOC_SIZE (htab);
7451 /* Note reloc_count not incremented here! We have
7452 already adjusted reloc_count for this relocation
7455 /* TLSDESC PLT is now needed, but not yet determined. */
7456 htab->tlsdesc_plt = (bfd_vma) - 1;
7459 if (got_type & GOT_TLS_GD)
7460 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7462 if (got_type & GOT_TLS_IE)
7463 htab->root.srelgot->size += RELOC_SIZE (htab);
7469 h->got.offset = (bfd_vma) - 1;
7472 if (eh->dyn_relocs == NULL)
7475 /* In the shared -Bsymbolic case, discard space allocated for
7476 dynamic pc-relative relocs against symbols which turn out to be
7477 defined in regular objects. For the normal shared case, discard
7478 space for pc-relative relocs that have become local due to symbol
7479 visibility changes. */
7483 /* Relocs that use pc_count are those that appear on a call
7484 insn, or certain REL relocs that can generated via assembly.
7485 We want calls to protected symbols to resolve directly to the
7486 function rather than going via the plt. If people want
7487 function pointer comparisons to work as expected then they
7488 should avoid writing weird assembly. */
7489 if (SYMBOL_CALLS_LOCAL (info, h))
7491 struct elf_dyn_relocs **pp;
7493 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
7495 p->count -= p->pc_count;
7504 /* Also discard relocs on undefined weak syms with non-default
7506 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
7508 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7509 eh->dyn_relocs = NULL;
7511 /* Make sure undefined weak symbols are output as a dynamic
7513 else if (h->dynindx == -1
7515 && !bfd_elf_link_record_dynamic_symbol (info, h))
7520 else if (ELIMINATE_COPY_RELOCS)
7522 /* For the non-shared case, discard space for relocs against
7523 symbols which turn out to need copy relocs or are not
7529 || (htab->root.dynamic_sections_created
7530 && (h->root.type == bfd_link_hash_undefweak
7531 || h->root.type == bfd_link_hash_undefined))))
7533 /* Make sure this symbol is output as a dynamic symbol.
7534 Undefined weak syms won't yet be marked as dynamic. */
7535 if (h->dynindx == -1
7537 && !bfd_elf_link_record_dynamic_symbol (info, h))
7540 /* If that succeeded, we know we'll be keeping all the
7542 if (h->dynindx != -1)
7546 eh->dyn_relocs = NULL;
7551 /* Finally, allocate space. */
7552 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7556 sreloc = elf_section_data (p->sec)->sreloc;
7558 BFD_ASSERT (sreloc != NULL);
7560 sreloc->size += p->count * RELOC_SIZE (htab);
7566 /* Allocate space in .plt, .got and associated reloc sections for
7567 ifunc dynamic relocs. */
7570 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
7573 struct bfd_link_info *info;
7574 struct elf_aarch64_link_hash_table *htab;
7575 struct elf_aarch64_link_hash_entry *eh;
7577 /* An example of a bfd_link_hash_indirect symbol is versioned
7578 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7579 -> __gxx_personality_v0(bfd_link_hash_defined)
7581 There is no need to process bfd_link_hash_indirect symbols here
7582 because we will also be presented with the concrete instance of
7583 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7584 called to copy all relevant data from the generic to the concrete
7587 if (h->root.type == bfd_link_hash_indirect)
7590 if (h->root.type == bfd_link_hash_warning)
7591 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7593 info = (struct bfd_link_info *) inf;
7594 htab = elf_aarch64_hash_table (info);
7596 eh = (struct elf_aarch64_link_hash_entry *) h;
7598 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7599 here if it is defined and referenced in a non-shared object. */
7600 if (h->type == STT_GNU_IFUNC
7602 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
7604 htab->plt_entry_size,
7605 htab->plt_header_size,
7610 /* Allocate space in .plt, .got and associated reloc sections for
7611 local dynamic relocs. */
7614 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
7616 struct elf_link_hash_entry *h
7617 = (struct elf_link_hash_entry *) *slot;
7619 if (h->type != STT_GNU_IFUNC
7623 || h->root.type != bfd_link_hash_defined)
7626 return elfNN_aarch64_allocate_dynrelocs (h, inf);
7629 /* Allocate space in .plt, .got and associated reloc sections for
7630 local ifunc dynamic relocs. */
7633 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
7635 struct elf_link_hash_entry *h
7636 = (struct elf_link_hash_entry *) *slot;
7638 if (h->type != STT_GNU_IFUNC
7642 || h->root.type != bfd_link_hash_defined)
7645 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
7648 /* Find any dynamic relocs that apply to read-only sections. */
7651 aarch64_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
7653 struct elf_aarch64_link_hash_entry * eh;
7654 struct elf_dyn_relocs * p;
7656 eh = (struct elf_aarch64_link_hash_entry *) h;
7657 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7659 asection *s = p->sec;
7661 if (s != NULL && (s->flags & SEC_READONLY) != 0)
7663 struct bfd_link_info *info = (struct bfd_link_info *) inf;
7665 info->flags |= DF_TEXTREL;
7667 /* Not an error, just cut short the traversal. */
7674 /* This is the most important function of all . Innocuosly named
7677 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
7678 struct bfd_link_info *info)
7680 struct elf_aarch64_link_hash_table *htab;
7686 htab = elf_aarch64_hash_table ((info));
7687 dynobj = htab->root.dynobj;
7689 BFD_ASSERT (dynobj != NULL);
7691 if (htab->root.dynamic_sections_created)
7693 if (info->executable)
7695 s = bfd_get_linker_section (dynobj, ".interp");
7698 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7699 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7703 /* Set up .got offsets for local syms, and space for local dynamic
7705 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7707 struct elf_aarch64_local_symbol *locals = NULL;
7708 Elf_Internal_Shdr *symtab_hdr;
7712 if (!is_aarch64_elf (ibfd))
7715 for (s = ibfd->sections; s != NULL; s = s->next)
7717 struct elf_dyn_relocs *p;
7719 for (p = (struct elf_dyn_relocs *)
7720 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7722 if (!bfd_is_abs_section (p->sec)
7723 && bfd_is_abs_section (p->sec->output_section))
7725 /* Input section has been discarded, either because
7726 it is a copy of a linkonce section or due to
7727 linker script /DISCARD/, so we'll be discarding
7730 else if (p->count != 0)
7732 srel = elf_section_data (p->sec)->sreloc;
7733 srel->size += p->count * RELOC_SIZE (htab);
7734 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7735 info->flags |= DF_TEXTREL;
7740 locals = elf_aarch64_locals (ibfd);
7744 symtab_hdr = &elf_symtab_hdr (ibfd);
7745 srel = htab->root.srelgot;
7746 for (i = 0; i < symtab_hdr->sh_info; i++)
7748 locals[i].got_offset = (bfd_vma) - 1;
7749 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7750 if (locals[i].got_refcount > 0)
7752 unsigned got_type = locals[i].got_type;
7753 if (got_type & GOT_TLSDESC_GD)
7755 locals[i].tlsdesc_got_jump_table_offset =
7756 (htab->root.sgotplt->size
7757 - aarch64_compute_jump_table_size (htab));
7758 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7759 locals[i].got_offset = (bfd_vma) - 2;
7762 if (got_type & GOT_TLS_GD)
7764 locals[i].got_offset = htab->root.sgot->size;
7765 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7768 if (got_type & GOT_TLS_IE
7769 || got_type & GOT_NORMAL)
7771 locals[i].got_offset = htab->root.sgot->size;
7772 htab->root.sgot->size += GOT_ENTRY_SIZE;
7775 if (got_type == GOT_UNKNOWN)
7781 if (got_type & GOT_TLSDESC_GD)
7783 htab->root.srelplt->size += RELOC_SIZE (htab);
7784 /* Note RELOC_COUNT not incremented here! */
7785 htab->tlsdesc_plt = (bfd_vma) - 1;
7788 if (got_type & GOT_TLS_GD)
7789 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7791 if (got_type & GOT_TLS_IE
7792 || got_type & GOT_NORMAL)
7793 htab->root.srelgot->size += RELOC_SIZE (htab);
7798 locals[i].got_refcount = (bfd_vma) - 1;
7804 /* Allocate global sym .plt and .got entries, and space for global
7805 sym dynamic relocs. */
7806 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7809 /* Allocate global ifunc sym .plt and .got entries, and space for global
7810 ifunc sym dynamic relocs. */
7811 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7814 /* Allocate .plt and .got entries, and space for local symbols. */
7815 htab_traverse (htab->loc_hash_table,
7816 elfNN_aarch64_allocate_local_dynrelocs,
7819 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7820 htab_traverse (htab->loc_hash_table,
7821 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7824 /* For every jump slot reserved in the sgotplt, reloc_count is
7825 incremented. However, when we reserve space for TLS descriptors,
7826 it's not incremented, so in order to compute the space reserved
7827 for them, it suffices to multiply the reloc count by the jump
7830 if (htab->root.srelplt)
7831 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7833 if (htab->tlsdesc_plt)
7835 if (htab->root.splt->size == 0)
7836 htab->root.splt->size += PLT_ENTRY_SIZE;
7838 htab->tlsdesc_plt = htab->root.splt->size;
7839 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7841 /* If we're not using lazy TLS relocations, don't generate the
7842 GOT entry required. */
7843 if (!(info->flags & DF_BIND_NOW))
7845 htab->dt_tlsdesc_got = htab->root.sgot->size;
7846 htab->root.sgot->size += GOT_ENTRY_SIZE;
7850 /* Init mapping symbols information to use later to distingush between
7851 code and data while scanning for errata. */
7852 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
7853 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7855 if (!is_aarch64_elf (ibfd))
7857 bfd_elfNN_aarch64_init_maps (ibfd);
7860 /* We now have determined the sizes of the various dynamic sections.
7861 Allocate memory for them. */
7863 for (s = dynobj->sections; s != NULL; s = s->next)
7865 if ((s->flags & SEC_LINKER_CREATED) == 0)
7868 if (s == htab->root.splt
7869 || s == htab->root.sgot
7870 || s == htab->root.sgotplt
7871 || s == htab->root.iplt
7872 || s == htab->root.igotplt || s == htab->sdynbss)
7874 /* Strip this section if we don't need it; see the
7877 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7879 if (s->size != 0 && s != htab->root.srelplt)
7882 /* We use the reloc_count field as a counter if we need
7883 to copy relocs into the output file. */
7884 if (s != htab->root.srelplt)
7889 /* It's not one of our sections, so don't allocate space. */
7895 /* If we don't need this section, strip it from the
7896 output file. This is mostly to handle .rela.bss and
7897 .rela.plt. We must create both sections in
7898 create_dynamic_sections, because they must be created
7899 before the linker maps input sections to output
7900 sections. The linker does that before
7901 adjust_dynamic_symbol is called, and it is that
7902 function which decides whether anything needs to go
7903 into these sections. */
7905 s->flags |= SEC_EXCLUDE;
7909 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7912 /* Allocate memory for the section contents. We use bfd_zalloc
7913 here in case unused entries are not reclaimed before the
7914 section's contents are written out. This should not happen,
7915 but this way if it does, we get a R_AARCH64_NONE reloc instead
7917 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7918 if (s->contents == NULL)
7922 if (htab->root.dynamic_sections_created)
7924 /* Add some entries to the .dynamic section. We fill in the
7925 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7926 must add the entries now so that we get the correct size for
7927 the .dynamic section. The DT_DEBUG entry is filled in by the
7928 dynamic linker and used by the debugger. */
7929 #define add_dynamic_entry(TAG, VAL) \
7930 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7932 if (info->executable)
7934 if (!add_dynamic_entry (DT_DEBUG, 0))
7938 if (htab->root.splt->size != 0)
7940 if (!add_dynamic_entry (DT_PLTGOT, 0)
7941 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7942 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7943 || !add_dynamic_entry (DT_JMPREL, 0))
7946 if (htab->tlsdesc_plt
7947 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7948 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7954 if (!add_dynamic_entry (DT_RELA, 0)
7955 || !add_dynamic_entry (DT_RELASZ, 0)
7956 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7959 /* If any dynamic relocs apply to a read-only section,
7960 then we need a DT_TEXTREL entry. */
7961 if ((info->flags & DF_TEXTREL) == 0)
7962 elf_link_hash_traverse (& htab->root, aarch64_readonly_dynrelocs,
7965 if ((info->flags & DF_TEXTREL) != 0)
7967 if (!add_dynamic_entry (DT_TEXTREL, 0))
7972 #undef add_dynamic_entry
7978 elf_aarch64_update_plt_entry (bfd *output_bfd,
7979 bfd_reloc_code_real_type r_type,
7980 bfd_byte *plt_entry, bfd_vma value)
7982 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
7984 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
7988 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
7989 struct elf_aarch64_link_hash_table
7990 *htab, bfd *output_bfd,
7991 struct bfd_link_info *info)
7993 bfd_byte *plt_entry;
7996 bfd_vma gotplt_entry_address;
7997 bfd_vma plt_entry_address;
7998 Elf_Internal_Rela rela;
8000 asection *plt, *gotplt, *relplt;
8002 /* When building a static executable, use .iplt, .igot.plt and
8003 .rela.iplt sections for STT_GNU_IFUNC symbols. */
8004 if (htab->root.splt != NULL)
8006 plt = htab->root.splt;
8007 gotplt = htab->root.sgotplt;
8008 relplt = htab->root.srelplt;
8012 plt = htab->root.iplt;
8013 gotplt = htab->root.igotplt;
8014 relplt = htab->root.irelplt;
8017 /* Get the index in the procedure linkage table which
8018 corresponds to this symbol. This is the index of this symbol
8019 in all the symbols for which we are making plt entries. The
8020 first entry in the procedure linkage table is reserved.
8022 Get the offset into the .got table of the entry that
8023 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
8024 bytes. The first three are reserved for the dynamic linker.
8026 For static executables, we don't reserve anything. */
8028 if (plt == htab->root.splt)
8030 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
8031 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
8035 plt_index = h->plt.offset / htab->plt_entry_size;
8036 got_offset = plt_index * GOT_ENTRY_SIZE;
8039 plt_entry = plt->contents + h->plt.offset;
8040 plt_entry_address = plt->output_section->vma
8041 + plt->output_offset + h->plt.offset;
8042 gotplt_entry_address = gotplt->output_section->vma +
8043 gotplt->output_offset + got_offset;
8045 /* Copy in the boiler-plate for the PLTn entry. */
8046 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
8048 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8049 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8050 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8052 PG (gotplt_entry_address) -
8053 PG (plt_entry_address));
8055 /* Fill in the lo12 bits for the load from the pltgot. */
8056 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8058 PG_OFFSET (gotplt_entry_address));
8060 /* Fill in the lo12 bits for the add from the pltgot entry. */
8061 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8063 PG_OFFSET (gotplt_entry_address));
8065 /* All the GOTPLT Entries are essentially initialized to PLT0. */
8066 bfd_put_NN (output_bfd,
8067 plt->output_section->vma + plt->output_offset,
8068 gotplt->contents + got_offset);
8070 rela.r_offset = gotplt_entry_address;
8072 if (h->dynindx == -1
8073 || ((info->executable
8074 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
8076 && h->type == STT_GNU_IFUNC))
8078 /* If an STT_GNU_IFUNC symbol is locally defined, generate
8079 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
8080 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
8081 rela.r_addend = (h->root.u.def.value
8082 + h->root.u.def.section->output_section->vma
8083 + h->root.u.def.section->output_offset);
8087 /* Fill in the entry in the .rela.plt section. */
8088 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
8092 /* Compute the relocation entry to used based on PLT index and do
8093 not adjust reloc_count. The reloc_count has already been adjusted
8094 to account for this entry. */
8095 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
8096 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8099 /* Size sections even though they're not dynamic. We use it to setup
8100 _TLS_MODULE_BASE_, if needed. */
8103 elfNN_aarch64_always_size_sections (bfd *output_bfd,
8104 struct bfd_link_info *info)
8108 if (info->relocatable)
8111 tls_sec = elf_hash_table (info)->tls_sec;
8115 struct elf_link_hash_entry *tlsbase;
8117 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
8118 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
8122 struct bfd_link_hash_entry *h = NULL;
8123 const struct elf_backend_data *bed =
8124 get_elf_backend_data (output_bfd);
8126 if (!(_bfd_generic_link_add_one_symbol
8127 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
8128 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
8131 tlsbase->type = STT_TLS;
8132 tlsbase = (struct elf_link_hash_entry *) h;
8133 tlsbase->def_regular = 1;
8134 tlsbase->other = STV_HIDDEN;
8135 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
8142 /* Finish up dynamic symbol handling. We set the contents of various
8143 dynamic sections here. */
8145 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
8146 struct bfd_link_info *info,
8147 struct elf_link_hash_entry *h,
8148 Elf_Internal_Sym *sym)
8150 struct elf_aarch64_link_hash_table *htab;
8151 htab = elf_aarch64_hash_table (info);
8153 if (h->plt.offset != (bfd_vma) - 1)
8155 asection *plt, *gotplt, *relplt;
8157 /* This symbol has an entry in the procedure linkage table. Set
8160 /* When building a static executable, use .iplt, .igot.plt and
8161 .rela.iplt sections for STT_GNU_IFUNC symbols. */
8162 if (htab->root.splt != NULL)
8164 plt = htab->root.splt;
8165 gotplt = htab->root.sgotplt;
8166 relplt = htab->root.srelplt;
8170 plt = htab->root.iplt;
8171 gotplt = htab->root.igotplt;
8172 relplt = htab->root.irelplt;
8175 /* This symbol has an entry in the procedure linkage table. Set
8177 if ((h->dynindx == -1
8178 && !((h->forced_local || info->executable)
8180 && h->type == STT_GNU_IFUNC))
8186 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
8187 if (!h->def_regular)
8189 /* Mark the symbol as undefined, rather than as defined in
8190 the .plt section. */
8191 sym->st_shndx = SHN_UNDEF;
8192 /* If the symbol is weak we need to clear the value.
8193 Otherwise, the PLT entry would provide a definition for
8194 the symbol even if the symbol wasn't defined anywhere,
8195 and so the symbol would never be NULL. Leave the value if
8196 there were any relocations where pointer equality matters
8197 (this is a clue for the dynamic linker, to make function
8198 pointer comparisons work between an application and shared
8200 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
8205 if (h->got.offset != (bfd_vma) - 1
8206 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
8208 Elf_Internal_Rela rela;
8211 /* This symbol has an entry in the global offset table. Set it
8213 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
8216 rela.r_offset = (htab->root.sgot->output_section->vma
8217 + htab->root.sgot->output_offset
8218 + (h->got.offset & ~(bfd_vma) 1));
8221 && h->type == STT_GNU_IFUNC)
8225 /* Generate R_AARCH64_GLOB_DAT. */
8232 if (!h->pointer_equality_needed)
8235 /* For non-shared object, we can't use .got.plt, which
8236 contains the real function address if we need pointer
8237 equality. We load the GOT entry with the PLT entry. */
8238 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
8239 bfd_put_NN (output_bfd, (plt->output_section->vma
8240 + plt->output_offset
8242 htab->root.sgot->contents
8243 + (h->got.offset & ~(bfd_vma) 1));
8247 else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
8249 if (!h->def_regular)
8252 BFD_ASSERT ((h->got.offset & 1) != 0);
8253 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
8254 rela.r_addend = (h->root.u.def.value
8255 + h->root.u.def.section->output_section->vma
8256 + h->root.u.def.section->output_offset);
8261 BFD_ASSERT ((h->got.offset & 1) == 0);
8262 bfd_put_NN (output_bfd, (bfd_vma) 0,
8263 htab->root.sgot->contents + h->got.offset);
8264 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
8268 loc = htab->root.srelgot->contents;
8269 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
8270 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8275 Elf_Internal_Rela rela;
8278 /* This symbol needs a copy reloc. Set it up. */
8280 if (h->dynindx == -1
8281 || (h->root.type != bfd_link_hash_defined
8282 && h->root.type != bfd_link_hash_defweak)
8283 || htab->srelbss == NULL)
8286 rela.r_offset = (h->root.u.def.value
8287 + h->root.u.def.section->output_section->vma
8288 + h->root.u.def.section->output_offset);
8289 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
8291 loc = htab->srelbss->contents;
8292 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
8293 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8296 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
8297 be NULL for local symbols. */
8299 && (h == elf_hash_table (info)->hdynamic
8300 || h == elf_hash_table (info)->hgot))
8301 sym->st_shndx = SHN_ABS;
8306 /* Finish up local dynamic symbol handling. We set the contents of
8307 various dynamic sections here. */
8310 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
8312 struct elf_link_hash_entry *h
8313 = (struct elf_link_hash_entry *) *slot;
8314 struct bfd_link_info *info
8315 = (struct bfd_link_info *) inf;
8317 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
8322 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
8323 struct elf_aarch64_link_hash_table
8326 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
8327 small and large plts and at the minute just generates
8330 /* PLT0 of the small PLT looks like this in ELF64 -
8331 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
8332 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
8333 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
8335 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
8336 // GOTPLT entry for this.
8338 PLT0 will be slightly different in ELF32 due to different got entry
8341 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
8345 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
8347 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
8350 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
8351 + htab->root.sgotplt->output_offset
8352 + GOT_ENTRY_SIZE * 2);
8354 plt_base = htab->root.splt->output_section->vma +
8355 htab->root.splt->output_offset;
8357 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8358 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8359 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8360 htab->root.splt->contents + 4,
8361 PG (plt_got_2nd_ent) - PG (plt_base + 4));
8363 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8364 htab->root.splt->contents + 8,
8365 PG_OFFSET (plt_got_2nd_ent));
8367 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8368 htab->root.splt->contents + 12,
8369 PG_OFFSET (plt_got_2nd_ent));
8373 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
8374 struct bfd_link_info *info)
8376 struct elf_aarch64_link_hash_table *htab;
8380 htab = elf_aarch64_hash_table (info);
8381 dynobj = htab->root.dynobj;
8382 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
8384 if (htab->root.dynamic_sections_created)
8386 ElfNN_External_Dyn *dyncon, *dynconend;
8388 if (sdyn == NULL || htab->root.sgot == NULL)
8391 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
8392 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
8393 for (; dyncon < dynconend; dyncon++)
8395 Elf_Internal_Dyn dyn;
8398 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
8406 s = htab->root.sgotplt;
8407 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
8411 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
8415 s = htab->root.srelplt;
8416 dyn.d_un.d_val = s->size;
8420 /* The procedure linkage table relocs (DT_JMPREL) should
8421 not be included in the overall relocs (DT_RELA).
8422 Therefore, we override the DT_RELASZ entry here to
8423 make it not include the JMPREL relocs. Since the
8424 linker script arranges for .rela.plt to follow all
8425 other relocation sections, we don't have to worry
8426 about changing the DT_RELA entry. */
8427 if (htab->root.srelplt != NULL)
8429 s = htab->root.srelplt;
8430 dyn.d_un.d_val -= s->size;
8434 case DT_TLSDESC_PLT:
8435 s = htab->root.splt;
8436 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8437 + htab->tlsdesc_plt;
8440 case DT_TLSDESC_GOT:
8441 s = htab->root.sgot;
8442 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8443 + htab->dt_tlsdesc_got;
8447 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
8452 /* Fill in the special first entry in the procedure linkage table. */
8453 if (htab->root.splt && htab->root.splt->size > 0)
8455 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
8457 elf_section_data (htab->root.splt->output_section)->
8458 this_hdr.sh_entsize = htab->plt_entry_size;
8461 if (htab->tlsdesc_plt)
8463 bfd_put_NN (output_bfd, (bfd_vma) 0,
8464 htab->root.sgot->contents + htab->dt_tlsdesc_got);
8466 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
8467 elfNN_aarch64_tlsdesc_small_plt_entry,
8468 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
8471 bfd_vma adrp1_addr =
8472 htab->root.splt->output_section->vma
8473 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
8475 bfd_vma adrp2_addr = adrp1_addr + 4;
8478 htab->root.sgot->output_section->vma
8479 + htab->root.sgot->output_offset;
8481 bfd_vma pltgot_addr =
8482 htab->root.sgotplt->output_section->vma
8483 + htab->root.sgotplt->output_offset;
8485 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
8487 bfd_byte *plt_entry =
8488 htab->root.splt->contents + htab->tlsdesc_plt;
8490 /* adrp x2, DT_TLSDESC_GOT */
8491 elf_aarch64_update_plt_entry (output_bfd,
8492 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8494 (PG (dt_tlsdesc_got)
8495 - PG (adrp1_addr)));
8498 elf_aarch64_update_plt_entry (output_bfd,
8499 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8502 - PG (adrp2_addr)));
8504 /* ldr x2, [x2, #0] */
8505 elf_aarch64_update_plt_entry (output_bfd,
8506 BFD_RELOC_AARCH64_LDSTNN_LO12,
8508 PG_OFFSET (dt_tlsdesc_got));
8511 elf_aarch64_update_plt_entry (output_bfd,
8512 BFD_RELOC_AARCH64_ADD_LO12,
8514 PG_OFFSET (pltgot_addr));
8519 if (htab->root.sgotplt)
8521 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
8523 (*_bfd_error_handler)
8524 (_("discarded output section: `%A'"), htab->root.sgotplt);
8528 /* Fill in the first three entries in the global offset table. */
8529 if (htab->root.sgotplt->size > 0)
8531 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
8533 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
8534 bfd_put_NN (output_bfd,
8536 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
8537 bfd_put_NN (output_bfd,
8539 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
8542 if (htab->root.sgot)
8544 if (htab->root.sgot->size > 0)
8547 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
8548 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
8552 elf_section_data (htab->root.sgotplt->output_section)->
8553 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
8556 if (htab->root.sgot && htab->root.sgot->size > 0)
8557 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
8560 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
8561 htab_traverse (htab->loc_hash_table,
8562 elfNN_aarch64_finish_local_dynamic_symbol,
8568 /* Return address for Ith PLT stub in section PLT, for relocation REL
8569 or (bfd_vma) -1 if it should not be included. */
8572 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
8573 const arelent *rel ATTRIBUTE_UNUSED)
8575 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
8579 /* We use this so we can override certain functions
8580 (though currently we don't). */
8582 const struct elf_size_info elfNN_aarch64_size_info =
8584 sizeof (ElfNN_External_Ehdr),
8585 sizeof (ElfNN_External_Phdr),
8586 sizeof (ElfNN_External_Shdr),
8587 sizeof (ElfNN_External_Rel),
8588 sizeof (ElfNN_External_Rela),
8589 sizeof (ElfNN_External_Sym),
8590 sizeof (ElfNN_External_Dyn),
8591 sizeof (Elf_External_Note),
8592 4, /* Hash table entry size. */
8593 1, /* Internal relocs per external relocs. */
8594 ARCH_SIZE, /* Arch size. */
8595 LOG_FILE_ALIGN, /* Log_file_align. */
8596 ELFCLASSNN, EV_CURRENT,
8597 bfd_elfNN_write_out_phdrs,
8598 bfd_elfNN_write_shdrs_and_ehdr,
8599 bfd_elfNN_checksum_contents,
8600 bfd_elfNN_write_relocs,
8601 bfd_elfNN_swap_symbol_in,
8602 bfd_elfNN_swap_symbol_out,
8603 bfd_elfNN_slurp_reloc_table,
8604 bfd_elfNN_slurp_symbol_table,
8605 bfd_elfNN_swap_dyn_in,
8606 bfd_elfNN_swap_dyn_out,
8607 bfd_elfNN_swap_reloc_in,
8608 bfd_elfNN_swap_reloc_out,
8609 bfd_elfNN_swap_reloca_in,
8610 bfd_elfNN_swap_reloca_out
8613 #define ELF_ARCH bfd_arch_aarch64
8614 #define ELF_MACHINE_CODE EM_AARCH64
8615 #define ELF_MAXPAGESIZE 0x10000
8616 #define ELF_MINPAGESIZE 0x1000
8617 #define ELF_COMMONPAGESIZE 0x1000
8619 #define bfd_elfNN_close_and_cleanup \
8620 elfNN_aarch64_close_and_cleanup
8622 #define bfd_elfNN_bfd_free_cached_info \
8623 elfNN_aarch64_bfd_free_cached_info
8625 #define bfd_elfNN_bfd_is_target_special_symbol \
8626 elfNN_aarch64_is_target_special_symbol
8628 #define bfd_elfNN_bfd_link_hash_table_create \
8629 elfNN_aarch64_link_hash_table_create
8631 #define bfd_elfNN_bfd_merge_private_bfd_data \
8632 elfNN_aarch64_merge_private_bfd_data
8634 #define bfd_elfNN_bfd_print_private_bfd_data \
8635 elfNN_aarch64_print_private_bfd_data
8637 #define bfd_elfNN_bfd_reloc_type_lookup \
8638 elfNN_aarch64_reloc_type_lookup
8640 #define bfd_elfNN_bfd_reloc_name_lookup \
8641 elfNN_aarch64_reloc_name_lookup
8643 #define bfd_elfNN_bfd_set_private_flags \
8644 elfNN_aarch64_set_private_flags
8646 #define bfd_elfNN_find_inliner_info \
8647 elfNN_aarch64_find_inliner_info
8649 #define bfd_elfNN_find_nearest_line \
8650 elfNN_aarch64_find_nearest_line
8652 #define bfd_elfNN_mkobject \
8653 elfNN_aarch64_mkobject
8655 #define bfd_elfNN_new_section_hook \
8656 elfNN_aarch64_new_section_hook
8658 #define elf_backend_adjust_dynamic_symbol \
8659 elfNN_aarch64_adjust_dynamic_symbol
8661 #define elf_backend_always_size_sections \
8662 elfNN_aarch64_always_size_sections
8664 #define elf_backend_check_relocs \
8665 elfNN_aarch64_check_relocs
8667 #define elf_backend_copy_indirect_symbol \
8668 elfNN_aarch64_copy_indirect_symbol
8670 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
8671 to them in our hash. */
8672 #define elf_backend_create_dynamic_sections \
8673 elfNN_aarch64_create_dynamic_sections
8675 #define elf_backend_init_index_section \
8676 _bfd_elf_init_2_index_sections
8678 #define elf_backend_finish_dynamic_sections \
8679 elfNN_aarch64_finish_dynamic_sections
8681 #define elf_backend_finish_dynamic_symbol \
8682 elfNN_aarch64_finish_dynamic_symbol
8684 #define elf_backend_gc_sweep_hook \
8685 elfNN_aarch64_gc_sweep_hook
8687 #define elf_backend_object_p \
8688 elfNN_aarch64_object_p
8690 #define elf_backend_output_arch_local_syms \
8691 elfNN_aarch64_output_arch_local_syms
8693 #define elf_backend_plt_sym_val \
8694 elfNN_aarch64_plt_sym_val
8696 #define elf_backend_post_process_headers \
8697 elfNN_aarch64_post_process_headers
8699 #define elf_backend_relocate_section \
8700 elfNN_aarch64_relocate_section
8702 #define elf_backend_reloc_type_class \
8703 elfNN_aarch64_reloc_type_class
8705 #define elf_backend_section_from_shdr \
8706 elfNN_aarch64_section_from_shdr
8708 #define elf_backend_size_dynamic_sections \
8709 elfNN_aarch64_size_dynamic_sections
8711 #define elf_backend_size_info \
8712 elfNN_aarch64_size_info
8714 #define elf_backend_write_section \
8715 elfNN_aarch64_write_section
8717 #define elf_backend_can_refcount 1
8718 #define elf_backend_can_gc_sections 1
8719 #define elf_backend_plt_readonly 1
8720 #define elf_backend_want_got_plt 1
8721 #define elf_backend_want_plt_sym 0
8722 #define elf_backend_may_use_rel_p 0
8723 #define elf_backend_may_use_rela_p 1
8724 #define elf_backend_default_use_rela_p 1
8725 #define elf_backend_rela_normal 1
8726 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8727 #define elf_backend_default_execstack 0
8728 #define elf_backend_extern_protected_data 1
8730 #undef elf_backend_obj_attrs_section
8731 #define elf_backend_obj_attrs_section ".ARM.attributes"
8733 #include "elfNN-target.h"