1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 /* Notes on implementation:
23 Thread Local Store (TLS)
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
95 elfNN_aarch64_check_relocs()
97 This function is invoked for each relocation.
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
107 elfNN_aarch64_allocate_dynrelocs ()
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
115 elfNN_aarch64_size_dynamic_sections ()
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
122 elfNN_aarch64_relocate_section ()
124 Calls elfNN_aarch64_final_link_relocate ()
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
134 elfNN_aarch64_final_link_relocate ()
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
140 #include "libiberty.h"
142 #include "bfd_stdint.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
188 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
190 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
191 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC)
204 #define ELIMINATE_COPY_RELOCS 0
206 /* Return size of a relocation entry. HTAB is the bfd's
207 elf_aarch64_link_hash_entry. */
208 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
210 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
211 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
212 #define PLT_ENTRY_SIZE (32)
213 #define PLT_SMALL_ENTRY_SIZE (16)
214 #define PLT_TLSDESC_ENTRY_SIZE (32)
216 /* Encoding of the nop instruction */
217 #define INSN_NOP 0xd503201f
219 #define aarch64_compute_jump_table_size(htab) \
220 (((htab)->root.srelplt == NULL) ? 0 \
221 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
223 /* The first entry in a procedure linkage table looks like this
224 if the distance between the PLTGOT and the PLT is < 4GB use
225 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
226 in x16 and needs to work out PLTGOT[1] by using an address of
227 [x16,#-GOT_ENTRY_SIZE]. */
228 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
230 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
231 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
233 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
234 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
236 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
237 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
239 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
240 0x1f, 0x20, 0x03, 0xd5, /* nop */
241 0x1f, 0x20, 0x03, 0xd5, /* nop */
242 0x1f, 0x20, 0x03, 0xd5, /* nop */
245 /* Per function entry in a procedure linkage table looks like this
246 if the distance between the PLTGOT and the PLT is < 4GB use
247 these PLT entries. */
248 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
250 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
252 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
253 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
255 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
256 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
258 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
261 static const bfd_byte
262 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
264 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
265 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
266 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
268 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
269 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
271 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
272 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
274 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
275 0x1f, 0x20, 0x03, 0xd5, /* nop */
276 0x1f, 0x20, 0x03, 0xd5, /* nop */
279 #define elf_info_to_howto elfNN_aarch64_info_to_howto
280 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
282 #define AARCH64_ELF_ABI_VERSION 0
284 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
285 #define ALL_ONES (~ (bfd_vma) 0)
287 /* Indexed by the bfd interal reloc enumerators.
288 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
291 static reloc_howto_type elfNN_aarch64_howto_table[] =
295 /* Basic data relocations. */
298 HOWTO (R_AARCH64_NULL, /* type */
300 3, /* size (0 = byte, 1 = short, 2 = long) */
302 FALSE, /* pc_relative */
304 complain_overflow_dont, /* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_AARCH64_NULL", /* name */
307 FALSE, /* partial_inplace */
310 FALSE), /* pcrel_offset */
312 HOWTO (R_AARCH64_NONE, /* type */
314 3, /* size (0 = byte, 1 = short, 2 = long) */
316 FALSE, /* pc_relative */
318 complain_overflow_dont, /* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_AARCH64_NONE", /* name */
321 FALSE, /* partial_inplace */
324 FALSE), /* pcrel_offset */
328 HOWTO64 (AARCH64_R (ABS64), /* type */
330 4, /* size (4 = long long) */
332 FALSE, /* pc_relative */
334 complain_overflow_unsigned, /* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 AARCH64_R_STR (ABS64), /* name */
337 FALSE, /* partial_inplace */
338 ALL_ONES, /* src_mask */
339 ALL_ONES, /* dst_mask */
340 FALSE), /* pcrel_offset */
343 HOWTO (AARCH64_R (ABS32), /* type */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
347 FALSE, /* pc_relative */
349 complain_overflow_unsigned, /* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 AARCH64_R_STR (ABS32), /* name */
352 FALSE, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 FALSE), /* pcrel_offset */
358 HOWTO (AARCH64_R (ABS16), /* type */
360 1, /* size (0 = byte, 1 = short, 2 = long) */
362 FALSE, /* pc_relative */
364 complain_overflow_unsigned, /* complain_on_overflow */
365 bfd_elf_generic_reloc, /* special_function */
366 AARCH64_R_STR (ABS16), /* name */
367 FALSE, /* partial_inplace */
368 0xffff, /* src_mask */
369 0xffff, /* dst_mask */
370 FALSE), /* pcrel_offset */
372 /* .xword: (S+A-P) */
373 HOWTO64 (AARCH64_R (PREL64), /* type */
375 4, /* size (4 = long long) */
377 TRUE, /* pc_relative */
379 complain_overflow_signed, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 AARCH64_R_STR (PREL64), /* name */
382 FALSE, /* partial_inplace */
383 ALL_ONES, /* src_mask */
384 ALL_ONES, /* dst_mask */
385 TRUE), /* pcrel_offset */
388 HOWTO (AARCH64_R (PREL32), /* type */
390 2, /* size (0 = byte, 1 = short, 2 = long) */
392 TRUE, /* pc_relative */
394 complain_overflow_signed, /* complain_on_overflow */
395 bfd_elf_generic_reloc, /* special_function */
396 AARCH64_R_STR (PREL32), /* name */
397 FALSE, /* partial_inplace */
398 0xffffffff, /* src_mask */
399 0xffffffff, /* dst_mask */
400 TRUE), /* pcrel_offset */
403 HOWTO (AARCH64_R (PREL16), /* type */
405 1, /* size (0 = byte, 1 = short, 2 = long) */
407 TRUE, /* pc_relative */
409 complain_overflow_signed, /* complain_on_overflow */
410 bfd_elf_generic_reloc, /* special_function */
411 AARCH64_R_STR (PREL16), /* name */
412 FALSE, /* partial_inplace */
413 0xffff, /* src_mask */
414 0xffff, /* dst_mask */
415 TRUE), /* pcrel_offset */
417 /* Group relocations to create a 16, 32, 48 or 64 bit
418 unsigned data or abs address inline. */
420 /* MOVZ: ((S+A) >> 0) & 0xffff */
421 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
423 2, /* size (0 = byte, 1 = short, 2 = long) */
425 FALSE, /* pc_relative */
427 complain_overflow_unsigned, /* complain_on_overflow */
428 bfd_elf_generic_reloc, /* special_function */
429 AARCH64_R_STR (MOVW_UABS_G0), /* name */
430 FALSE, /* partial_inplace */
431 0xffff, /* src_mask */
432 0xffff, /* dst_mask */
433 FALSE), /* pcrel_offset */
435 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
436 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
438 2, /* size (0 = byte, 1 = short, 2 = long) */
440 FALSE, /* pc_relative */
442 complain_overflow_dont, /* complain_on_overflow */
443 bfd_elf_generic_reloc, /* special_function */
444 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
445 FALSE, /* partial_inplace */
446 0xffff, /* src_mask */
447 0xffff, /* dst_mask */
448 FALSE), /* pcrel_offset */
450 /* MOVZ: ((S+A) >> 16) & 0xffff */
451 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
453 2, /* size (0 = byte, 1 = short, 2 = long) */
455 FALSE, /* pc_relative */
457 complain_overflow_unsigned, /* complain_on_overflow */
458 bfd_elf_generic_reloc, /* special_function */
459 AARCH64_R_STR (MOVW_UABS_G1), /* name */
460 FALSE, /* partial_inplace */
461 0xffff, /* src_mask */
462 0xffff, /* dst_mask */
463 FALSE), /* pcrel_offset */
465 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
466 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
470 FALSE, /* pc_relative */
472 complain_overflow_dont, /* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
475 FALSE, /* partial_inplace */
476 0xffff, /* src_mask */
477 0xffff, /* dst_mask */
478 FALSE), /* pcrel_offset */
480 /* MOVZ: ((S+A) >> 32) & 0xffff */
481 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
483 2, /* size (0 = byte, 1 = short, 2 = long) */
485 FALSE, /* pc_relative */
487 complain_overflow_unsigned, /* complain_on_overflow */
488 bfd_elf_generic_reloc, /* special_function */
489 AARCH64_R_STR (MOVW_UABS_G2), /* name */
490 FALSE, /* partial_inplace */
491 0xffff, /* src_mask */
492 0xffff, /* dst_mask */
493 FALSE), /* pcrel_offset */
495 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
496 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
500 FALSE, /* pc_relative */
502 complain_overflow_dont, /* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
505 FALSE, /* partial_inplace */
506 0xffff, /* src_mask */
507 0xffff, /* dst_mask */
508 FALSE), /* pcrel_offset */
510 /* MOVZ: ((S+A) >> 48) & 0xffff */
511 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
513 2, /* size (0 = byte, 1 = short, 2 = long) */
515 FALSE, /* pc_relative */
517 complain_overflow_unsigned, /* complain_on_overflow */
518 bfd_elf_generic_reloc, /* special_function */
519 AARCH64_R_STR (MOVW_UABS_G3), /* name */
520 FALSE, /* partial_inplace */
521 0xffff, /* src_mask */
522 0xffff, /* dst_mask */
523 FALSE), /* pcrel_offset */
525 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
526 signed data or abs address inline. Will change instruction
527 to MOVN or MOVZ depending on sign of calculated value. */
529 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
530 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
532 2, /* size (0 = byte, 1 = short, 2 = long) */
534 FALSE, /* pc_relative */
536 complain_overflow_signed, /* complain_on_overflow */
537 bfd_elf_generic_reloc, /* special_function */
538 AARCH64_R_STR (MOVW_SABS_G0), /* name */
539 FALSE, /* partial_inplace */
540 0xffff, /* src_mask */
541 0xffff, /* dst_mask */
542 FALSE), /* pcrel_offset */
544 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
545 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
547 2, /* size (0 = byte, 1 = short, 2 = long) */
549 FALSE, /* pc_relative */
551 complain_overflow_signed, /* complain_on_overflow */
552 bfd_elf_generic_reloc, /* special_function */
553 AARCH64_R_STR (MOVW_SABS_G1), /* name */
554 FALSE, /* partial_inplace */
555 0xffff, /* src_mask */
556 0xffff, /* dst_mask */
557 FALSE), /* pcrel_offset */
559 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
560 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
562 2, /* size (0 = byte, 1 = short, 2 = long) */
564 FALSE, /* pc_relative */
566 complain_overflow_signed, /* complain_on_overflow */
567 bfd_elf_generic_reloc, /* special_function */
568 AARCH64_R_STR (MOVW_SABS_G2), /* name */
569 FALSE, /* partial_inplace */
570 0xffff, /* src_mask */
571 0xffff, /* dst_mask */
572 FALSE), /* pcrel_offset */
574 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
575 addresses: PG(x) is (x & ~0xfff). */
577 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
578 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
582 TRUE, /* pc_relative */
584 complain_overflow_signed, /* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 AARCH64_R_STR (LD_PREL_LO19), /* name */
587 FALSE, /* partial_inplace */
588 0x7ffff, /* src_mask */
589 0x7ffff, /* dst_mask */
590 TRUE), /* pcrel_offset */
592 /* ADR: (S+A-P) & 0x1fffff */
593 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
595 2, /* size (0 = byte, 1 = short, 2 = long) */
597 TRUE, /* pc_relative */
599 complain_overflow_signed, /* complain_on_overflow */
600 bfd_elf_generic_reloc, /* special_function */
601 AARCH64_R_STR (ADR_PREL_LO21), /* name */
602 FALSE, /* partial_inplace */
603 0x1fffff, /* src_mask */
604 0x1fffff, /* dst_mask */
605 TRUE), /* pcrel_offset */
607 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
608 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
612 TRUE, /* pc_relative */
614 complain_overflow_signed, /* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
617 FALSE, /* partial_inplace */
618 0x1fffff, /* src_mask */
619 0x1fffff, /* dst_mask */
620 TRUE), /* pcrel_offset */
622 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
623 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
625 2, /* size (0 = byte, 1 = short, 2 = long) */
627 TRUE, /* pc_relative */
629 complain_overflow_dont, /* complain_on_overflow */
630 bfd_elf_generic_reloc, /* special_function */
631 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
632 FALSE, /* partial_inplace */
633 0x1fffff, /* src_mask */
634 0x1fffff, /* dst_mask */
635 TRUE), /* pcrel_offset */
637 /* ADD: (S+A) & 0xfff [no overflow check] */
638 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
642 FALSE, /* pc_relative */
644 complain_overflow_dont, /* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
647 FALSE, /* partial_inplace */
648 0x3ffc00, /* src_mask */
649 0x3ffc00, /* dst_mask */
650 FALSE), /* pcrel_offset */
652 /* LD/ST8: (S+A) & 0xfff */
653 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
657 FALSE, /* pc_relative */
659 complain_overflow_dont, /* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
662 FALSE, /* partial_inplace */
663 0xfff, /* src_mask */
664 0xfff, /* dst_mask */
665 FALSE), /* pcrel_offset */
667 /* Relocations for control-flow instructions. */
669 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
670 HOWTO (AARCH64_R (TSTBR14), /* type */
672 2, /* size (0 = byte, 1 = short, 2 = long) */
674 TRUE, /* pc_relative */
676 complain_overflow_signed, /* complain_on_overflow */
677 bfd_elf_generic_reloc, /* special_function */
678 AARCH64_R_STR (TSTBR14), /* name */
679 FALSE, /* partial_inplace */
680 0x3fff, /* src_mask */
681 0x3fff, /* dst_mask */
682 TRUE), /* pcrel_offset */
684 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
685 HOWTO (AARCH64_R (CONDBR19), /* type */
687 2, /* size (0 = byte, 1 = short, 2 = long) */
689 TRUE, /* pc_relative */
691 complain_overflow_signed, /* complain_on_overflow */
692 bfd_elf_generic_reloc, /* special_function */
693 AARCH64_R_STR (CONDBR19), /* name */
694 FALSE, /* partial_inplace */
695 0x7ffff, /* src_mask */
696 0x7ffff, /* dst_mask */
697 TRUE), /* pcrel_offset */
699 /* B: ((S+A-P) >> 2) & 0x3ffffff */
700 HOWTO (AARCH64_R (JUMP26), /* type */
702 2, /* size (0 = byte, 1 = short, 2 = long) */
704 TRUE, /* pc_relative */
706 complain_overflow_signed, /* complain_on_overflow */
707 bfd_elf_generic_reloc, /* special_function */
708 AARCH64_R_STR (JUMP26), /* name */
709 FALSE, /* partial_inplace */
710 0x3ffffff, /* src_mask */
711 0x3ffffff, /* dst_mask */
712 TRUE), /* pcrel_offset */
714 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
715 HOWTO (AARCH64_R (CALL26), /* type */
717 2, /* size (0 = byte, 1 = short, 2 = long) */
719 TRUE, /* pc_relative */
721 complain_overflow_signed, /* complain_on_overflow */
722 bfd_elf_generic_reloc, /* special_function */
723 AARCH64_R_STR (CALL26), /* name */
724 FALSE, /* partial_inplace */
725 0x3ffffff, /* src_mask */
726 0x3ffffff, /* dst_mask */
727 TRUE), /* pcrel_offset */
729 /* LD/ST16: (S+A) & 0xffe */
730 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
732 2, /* size (0 = byte, 1 = short, 2 = long) */
734 FALSE, /* pc_relative */
736 complain_overflow_dont, /* complain_on_overflow */
737 bfd_elf_generic_reloc, /* special_function */
738 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
739 FALSE, /* partial_inplace */
740 0xffe, /* src_mask */
741 0xffe, /* dst_mask */
742 FALSE), /* pcrel_offset */
744 /* LD/ST32: (S+A) & 0xffc */
745 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
747 2, /* size (0 = byte, 1 = short, 2 = long) */
749 FALSE, /* pc_relative */
751 complain_overflow_dont, /* complain_on_overflow */
752 bfd_elf_generic_reloc, /* special_function */
753 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
754 FALSE, /* partial_inplace */
755 0xffc, /* src_mask */
756 0xffc, /* dst_mask */
757 FALSE), /* pcrel_offset */
759 /* LD/ST64: (S+A) & 0xff8 */
760 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
764 FALSE, /* pc_relative */
766 complain_overflow_dont, /* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
769 FALSE, /* partial_inplace */
770 0xff8, /* src_mask */
771 0xff8, /* dst_mask */
772 FALSE), /* pcrel_offset */
774 /* LD/ST128: (S+A) & 0xff0 */
775 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
777 2, /* size (0 = byte, 1 = short, 2 = long) */
779 FALSE, /* pc_relative */
781 complain_overflow_dont, /* complain_on_overflow */
782 bfd_elf_generic_reloc, /* special_function */
783 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
784 FALSE, /* partial_inplace */
785 0xff0, /* src_mask */
786 0xff0, /* dst_mask */
787 FALSE), /* pcrel_offset */
789 /* Set a load-literal immediate field to bits
790 0x1FFFFC of G(S)-P */
791 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
793 2, /* size (0 = byte,1 = short,2 = long) */
795 TRUE, /* pc_relative */
797 complain_overflow_signed, /* complain_on_overflow */
798 bfd_elf_generic_reloc, /* special_function */
799 AARCH64_R_STR (GOT_LD_PREL19), /* name */
800 FALSE, /* partial_inplace */
801 0xffffe0, /* src_mask */
802 0xffffe0, /* dst_mask */
803 TRUE), /* pcrel_offset */
805 /* Get to the page for the GOT entry for the symbol
806 (G(S) - P) using an ADRP instruction. */
807 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
811 TRUE, /* pc_relative */
813 complain_overflow_dont, /* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
816 FALSE, /* partial_inplace */
817 0x1fffff, /* src_mask */
818 0x1fffff, /* dst_mask */
819 TRUE), /* pcrel_offset */
821 /* LD64: GOT offset G(S) & 0xff8 */
822 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
824 2, /* size (0 = byte, 1 = short, 2 = long) */
826 FALSE, /* pc_relative */
828 complain_overflow_dont, /* complain_on_overflow */
829 bfd_elf_generic_reloc, /* special_function */
830 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
831 FALSE, /* partial_inplace */
832 0xff8, /* src_mask */
833 0xff8, /* dst_mask */
834 FALSE), /* pcrel_offset */
836 /* LD32: GOT offset G(S) & 0xffc */
837 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
841 FALSE, /* pc_relative */
843 complain_overflow_dont, /* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
846 FALSE, /* partial_inplace */
847 0xffc, /* src_mask */
848 0xffc, /* dst_mask */
849 FALSE), /* pcrel_offset */
851 /* Get to the page for the GOT entry for the symbol
852 (G(S) - P) using an ADRP instruction. */
853 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
855 2, /* size (0 = byte, 1 = short, 2 = long) */
857 TRUE, /* pc_relative */
859 complain_overflow_dont, /* complain_on_overflow */
860 bfd_elf_generic_reloc, /* special_function */
861 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
862 FALSE, /* partial_inplace */
863 0x1fffff, /* src_mask */
864 0x1fffff, /* dst_mask */
865 TRUE), /* pcrel_offset */
867 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
869 2, /* size (0 = byte, 1 = short, 2 = long) */
871 TRUE, /* pc_relative */
873 complain_overflow_dont, /* complain_on_overflow */
874 bfd_elf_generic_reloc, /* special_function */
875 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
876 FALSE, /* partial_inplace */
877 0x1fffff, /* src_mask */
878 0x1fffff, /* dst_mask */
879 TRUE), /* pcrel_offset */
881 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
882 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
884 2, /* size (0 = byte, 1 = short, 2 = long) */
886 FALSE, /* pc_relative */
888 complain_overflow_dont, /* complain_on_overflow */
889 bfd_elf_generic_reloc, /* special_function */
890 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
891 FALSE, /* partial_inplace */
892 0xfff, /* src_mask */
893 0xfff, /* dst_mask */
894 FALSE), /* pcrel_offset */
896 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
900 FALSE, /* pc_relative */
902 complain_overflow_dont, /* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
905 FALSE, /* partial_inplace */
906 0xffff, /* src_mask */
907 0xffff, /* dst_mask */
908 FALSE), /* pcrel_offset */
910 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
914 FALSE, /* pc_relative */
916 complain_overflow_dont, /* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
919 FALSE, /* partial_inplace */
920 0xffff, /* src_mask */
921 0xffff, /* dst_mask */
922 FALSE), /* pcrel_offset */
924 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
928 FALSE, /* pc_relative */
930 complain_overflow_dont, /* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
933 FALSE, /* partial_inplace */
934 0x1fffff, /* src_mask */
935 0x1fffff, /* dst_mask */
936 FALSE), /* pcrel_offset */
938 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
942 FALSE, /* pc_relative */
944 complain_overflow_dont, /* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
947 FALSE, /* partial_inplace */
948 0xff8, /* src_mask */
949 0xff8, /* dst_mask */
950 FALSE), /* pcrel_offset */
952 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
956 FALSE, /* pc_relative */
958 complain_overflow_dont, /* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
961 FALSE, /* partial_inplace */
962 0xffc, /* src_mask */
963 0xffc, /* dst_mask */
964 FALSE), /* pcrel_offset */
966 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
970 FALSE, /* pc_relative */
972 complain_overflow_dont, /* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
975 FALSE, /* partial_inplace */
976 0x1ffffc, /* src_mask */
977 0x1ffffc, /* dst_mask */
978 FALSE), /* pcrel_offset */
980 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
984 FALSE, /* pc_relative */
986 complain_overflow_unsigned, /* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
989 FALSE, /* partial_inplace */
990 0xffff, /* src_mask */
991 0xffff, /* dst_mask */
992 FALSE), /* pcrel_offset */
994 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
998 FALSE, /* pc_relative */
1000 complain_overflow_dont, /* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1003 FALSE, /* partial_inplace */
1004 0xffff, /* src_mask */
1005 0xffff, /* dst_mask */
1006 FALSE), /* pcrel_offset */
1008 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1009 16, /* rightshift */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1012 FALSE, /* pc_relative */
1014 complain_overflow_dont, /* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1017 FALSE, /* partial_inplace */
1018 0xffff, /* src_mask */
1019 0xffff, /* dst_mask */
1020 FALSE), /* pcrel_offset */
1022 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1026 FALSE, /* pc_relative */
1028 complain_overflow_dont, /* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1031 FALSE, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE), /* pcrel_offset */
1036 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1040 FALSE, /* pc_relative */
1042 complain_overflow_dont, /* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1045 FALSE, /* partial_inplace */
1046 0xffff, /* src_mask */
1047 0xffff, /* dst_mask */
1048 FALSE), /* pcrel_offset */
1050 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1051 12, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1054 FALSE, /* pc_relative */
1056 complain_overflow_unsigned, /* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1059 FALSE, /* partial_inplace */
1060 0xfff, /* src_mask */
1061 0xfff, /* dst_mask */
1062 FALSE), /* pcrel_offset */
1064 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1068 FALSE, /* pc_relative */
1070 complain_overflow_dont, /* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1073 FALSE, /* partial_inplace */
1074 0xfff, /* src_mask */
1075 0xfff, /* dst_mask */
1076 FALSE), /* pcrel_offset */
1078 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1082 FALSE, /* pc_relative */
1084 complain_overflow_dont, /* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1087 FALSE, /* partial_inplace */
1088 0xfff, /* src_mask */
1089 0xfff, /* dst_mask */
1090 FALSE), /* pcrel_offset */
1092 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1096 TRUE, /* pc_relative */
1098 complain_overflow_dont, /* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1101 FALSE, /* partial_inplace */
1102 0x0ffffe0, /* src_mask */
1103 0x0ffffe0, /* dst_mask */
1104 TRUE), /* pcrel_offset */
1106 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1110 TRUE, /* pc_relative */
1112 complain_overflow_dont, /* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1115 FALSE, /* partial_inplace */
1116 0x1fffff, /* src_mask */
1117 0x1fffff, /* dst_mask */
1118 TRUE), /* pcrel_offset */
1120 /* Get to the page for the GOT entry for the symbol
1121 (G(S) - P) using an ADRP instruction. */
1122 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1123 12, /* rightshift */
1124 2, /* size (0 = byte, 1 = short, 2 = long) */
1126 TRUE, /* pc_relative */
1128 complain_overflow_dont, /* complain_on_overflow */
1129 bfd_elf_generic_reloc, /* special_function */
1130 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1131 FALSE, /* partial_inplace */
1132 0x1fffff, /* src_mask */
1133 0x1fffff, /* dst_mask */
1134 TRUE), /* pcrel_offset */
1136 /* LD64: GOT offset G(S) & 0xff8. */
1137 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1139 2, /* size (0 = byte, 1 = short, 2 = long) */
1141 FALSE, /* pc_relative */
1143 complain_overflow_dont, /* complain_on_overflow */
1144 bfd_elf_generic_reloc, /* special_function */
1145 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1146 FALSE, /* partial_inplace */
1147 0xff8, /* src_mask */
1148 0xff8, /* dst_mask */
1149 FALSE), /* pcrel_offset */
1151 /* LD32: GOT offset G(S) & 0xffc. */
1152 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1154 2, /* size (0 = byte, 1 = short, 2 = long) */
1156 FALSE, /* pc_relative */
1158 complain_overflow_dont, /* complain_on_overflow */
1159 bfd_elf_generic_reloc, /* special_function */
1160 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1161 FALSE, /* partial_inplace */
1162 0xffc, /* src_mask */
1163 0xffc, /* dst_mask */
1164 FALSE), /* pcrel_offset */
1166 /* ADD: GOT offset G(S) & 0xfff. */
1167 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1169 2, /* size (0 = byte, 1 = short, 2 = long) */
1171 FALSE, /* pc_relative */
1173 complain_overflow_dont, /* complain_on_overflow */
1174 bfd_elf_generic_reloc, /* special_function */
1175 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1176 FALSE, /* partial_inplace */
1177 0xfff, /* src_mask */
1178 0xfff, /* dst_mask */
1179 FALSE), /* pcrel_offset */
1181 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1182 16, /* rightshift */
1183 2, /* size (0 = byte, 1 = short, 2 = long) */
1185 FALSE, /* pc_relative */
1187 complain_overflow_dont, /* complain_on_overflow */
1188 bfd_elf_generic_reloc, /* special_function */
1189 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1190 FALSE, /* partial_inplace */
1191 0xffff, /* src_mask */
1192 0xffff, /* dst_mask */
1193 FALSE), /* pcrel_offset */
1195 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1197 2, /* size (0 = byte, 1 = short, 2 = long) */
1199 FALSE, /* pc_relative */
1201 complain_overflow_dont, /* complain_on_overflow */
1202 bfd_elf_generic_reloc, /* special_function */
1203 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1204 FALSE, /* partial_inplace */
1205 0xffff, /* src_mask */
1206 0xffff, /* dst_mask */
1207 FALSE), /* pcrel_offset */
1209 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1211 2, /* size (0 = byte, 1 = short, 2 = long) */
1213 FALSE, /* pc_relative */
1215 complain_overflow_dont, /* complain_on_overflow */
1216 bfd_elf_generic_reloc, /* special_function */
1217 AARCH64_R_STR (TLSDESC_LDR), /* name */
1218 FALSE, /* partial_inplace */
1221 FALSE), /* pcrel_offset */
1223 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1225 2, /* size (0 = byte, 1 = short, 2 = long) */
1227 FALSE, /* pc_relative */
1229 complain_overflow_dont, /* complain_on_overflow */
1230 bfd_elf_generic_reloc, /* special_function */
1231 AARCH64_R_STR (TLSDESC_ADD), /* name */
1232 FALSE, /* partial_inplace */
1235 FALSE), /* pcrel_offset */
1237 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1239 2, /* size (0 = byte, 1 = short, 2 = long) */
1241 FALSE, /* pc_relative */
1243 complain_overflow_dont, /* complain_on_overflow */
1244 bfd_elf_generic_reloc, /* special_function */
1245 AARCH64_R_STR (TLSDESC_CALL), /* name */
1246 FALSE, /* partial_inplace */
1249 FALSE), /* pcrel_offset */
1251 HOWTO (AARCH64_R (COPY), /* type */
1253 2, /* size (0 = byte, 1 = short, 2 = long) */
1255 FALSE, /* pc_relative */
1257 complain_overflow_bitfield, /* complain_on_overflow */
1258 bfd_elf_generic_reloc, /* special_function */
1259 AARCH64_R_STR (COPY), /* name */
1260 TRUE, /* partial_inplace */
1261 0xffffffff, /* src_mask */
1262 0xffffffff, /* dst_mask */
1263 FALSE), /* pcrel_offset */
1265 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1267 2, /* size (0 = byte, 1 = short, 2 = long) */
1269 FALSE, /* pc_relative */
1271 complain_overflow_bitfield, /* complain_on_overflow */
1272 bfd_elf_generic_reloc, /* special_function */
1273 AARCH64_R_STR (GLOB_DAT), /* name */
1274 TRUE, /* partial_inplace */
1275 0xffffffff, /* src_mask */
1276 0xffffffff, /* dst_mask */
1277 FALSE), /* pcrel_offset */
1279 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1281 2, /* size (0 = byte, 1 = short, 2 = long) */
1283 FALSE, /* pc_relative */
1285 complain_overflow_bitfield, /* complain_on_overflow */
1286 bfd_elf_generic_reloc, /* special_function */
1287 AARCH64_R_STR (JUMP_SLOT), /* name */
1288 TRUE, /* partial_inplace */
1289 0xffffffff, /* src_mask */
1290 0xffffffff, /* dst_mask */
1291 FALSE), /* pcrel_offset */
1293 HOWTO (AARCH64_R (RELATIVE), /* type */
1295 2, /* size (0 = byte, 1 = short, 2 = long) */
1297 FALSE, /* pc_relative */
1299 complain_overflow_bitfield, /* complain_on_overflow */
1300 bfd_elf_generic_reloc, /* special_function */
1301 AARCH64_R_STR (RELATIVE), /* name */
1302 TRUE, /* partial_inplace */
1303 ALL_ONES, /* src_mask */
1304 ALL_ONES, /* dst_mask */
1305 FALSE), /* pcrel_offset */
1307 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1309 2, /* size (0 = byte, 1 = short, 2 = long) */
1311 FALSE, /* pc_relative */
1313 complain_overflow_dont, /* complain_on_overflow */
1314 bfd_elf_generic_reloc, /* special_function */
1316 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1318 AARCH64_R_STR (TLS_DTPMOD), /* name */
1320 FALSE, /* partial_inplace */
1322 ALL_ONES, /* dst_mask */
1323 FALSE), /* pc_reloffset */
1325 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1327 2, /* size (0 = byte, 1 = short, 2 = long) */
1329 FALSE, /* pc_relative */
1331 complain_overflow_dont, /* complain_on_overflow */
1332 bfd_elf_generic_reloc, /* special_function */
1334 AARCH64_R_STR (TLS_DTPREL64), /* name */
1336 AARCH64_R_STR (TLS_DTPREL), /* name */
1338 FALSE, /* partial_inplace */
1340 ALL_ONES, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1343 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1347 FALSE, /* pc_relative */
1349 complain_overflow_dont, /* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1352 AARCH64_R_STR (TLS_TPREL64), /* name */
1354 AARCH64_R_STR (TLS_TPREL), /* name */
1356 FALSE, /* partial_inplace */
1358 ALL_ONES, /* dst_mask */
1359 FALSE), /* pcrel_offset */
1361 HOWTO (AARCH64_R (TLSDESC), /* type */
1363 2, /* size (0 = byte, 1 = short, 2 = long) */
1365 FALSE, /* pc_relative */
1367 complain_overflow_dont, /* complain_on_overflow */
1368 bfd_elf_generic_reloc, /* special_function */
1369 AARCH64_R_STR (TLSDESC), /* name */
1370 FALSE, /* partial_inplace */
1372 ALL_ONES, /* dst_mask */
1373 FALSE), /* pcrel_offset */
1375 HOWTO (AARCH64_R (IRELATIVE), /* type */
1377 2, /* size (0 = byte, 1 = short, 2 = long) */
1379 FALSE, /* pc_relative */
1381 complain_overflow_bitfield, /* complain_on_overflow */
1382 bfd_elf_generic_reloc, /* special_function */
1383 AARCH64_R_STR (IRELATIVE), /* name */
1384 FALSE, /* partial_inplace */
1386 ALL_ONES, /* dst_mask */
1387 FALSE), /* pcrel_offset */
1392 static reloc_howto_type elfNN_aarch64_howto_none =
1393 HOWTO (R_AARCH64_NONE, /* type */
1395 3, /* size (0 = byte, 1 = short, 2 = long) */
1397 FALSE, /* pc_relative */
1399 complain_overflow_dont,/* complain_on_overflow */
1400 bfd_elf_generic_reloc, /* special_function */
1401 "R_AARCH64_NONE", /* name */
1402 FALSE, /* partial_inplace */
1405 FALSE); /* pcrel_offset */
1407 /* Given HOWTO, return the bfd internal relocation enumerator. */
1409 static bfd_reloc_code_real_type
1410 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1413 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1414 const ptrdiff_t offset
1415 = howto - elfNN_aarch64_howto_table;
1417 if (offset > 0 && offset < size - 1)
1418 return BFD_RELOC_AARCH64_RELOC_START + offset;
1420 if (howto == &elfNN_aarch64_howto_none)
1421 return BFD_RELOC_AARCH64_NONE;
1423 return BFD_RELOC_AARCH64_RELOC_START;
1426 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1428 static bfd_reloc_code_real_type
1429 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1431 static bfd_boolean initialized_p = FALSE;
1432 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1433 static unsigned int offsets[R_AARCH64_end];
1435 if (initialized_p == FALSE)
1439 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1440 if (elfNN_aarch64_howto_table[i].type != 0)
1441 offsets[elfNN_aarch64_howto_table[i].type] = i;
1443 initialized_p = TRUE;
1446 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1447 return BFD_RELOC_AARCH64_NONE;
1449 /* PR 17512: file: b371e70a. */
1450 if (r_type >= R_AARCH64_end)
1452 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1453 bfd_set_error (bfd_error_bad_value);
1454 return BFD_RELOC_AARCH64_NONE;
1457 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1460 struct elf_aarch64_reloc_map
1462 bfd_reloc_code_real_type from;
1463 bfd_reloc_code_real_type to;
1466 /* Map bfd generic reloc to AArch64-specific reloc. */
1467 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1469 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1471 /* Basic data relocations. */
1472 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1473 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1474 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1475 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1476 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1477 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1478 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1481 /* Given the bfd internal relocation enumerator in CODE, return the
1482 corresponding howto entry. */
1484 static reloc_howto_type *
1485 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1489 /* Convert bfd generic reloc to AArch64-specific reloc. */
1490 if (code < BFD_RELOC_AARCH64_RELOC_START
1491 || code > BFD_RELOC_AARCH64_RELOC_END)
1492 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1493 if (elf_aarch64_reloc_map[i].from == code)
1495 code = elf_aarch64_reloc_map[i].to;
1499 if (code > BFD_RELOC_AARCH64_RELOC_START
1500 && code < BFD_RELOC_AARCH64_RELOC_END)
1501 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1502 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1504 if (code == BFD_RELOC_AARCH64_NONE)
1505 return &elfNN_aarch64_howto_none;
1510 static reloc_howto_type *
1511 elfNN_aarch64_howto_from_type (unsigned int r_type)
1513 bfd_reloc_code_real_type val;
1514 reloc_howto_type *howto;
1519 bfd_set_error (bfd_error_bad_value);
1524 if (r_type == R_AARCH64_NONE)
1525 return &elfNN_aarch64_howto_none;
1527 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1528 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1533 bfd_set_error (bfd_error_bad_value);
1538 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1539 Elf_Internal_Rela *elf_reloc)
1541 unsigned int r_type;
1543 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1544 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1547 static reloc_howto_type *
1548 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1549 bfd_reloc_code_real_type code)
1551 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1556 bfd_set_error (bfd_error_bad_value);
1560 static reloc_howto_type *
1561 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1566 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1567 if (elfNN_aarch64_howto_table[i].name != NULL
1568 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1569 return &elfNN_aarch64_howto_table[i];
1574 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1575 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1576 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1577 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1579 /* The linker script knows the section names for placement.
1580 The entry_names are used to do simple name mangling on the stubs.
1581 Given a function name, and its type, the stub can be found. The
1582 name can be changed. The only requirement is the %s be present. */
1583 #define STUB_ENTRY_NAME "__%s_veneer"
1585 /* The name of the dynamic interpreter. This is put in the .interp
1587 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1589 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1590 (((1 << 25) - 1) << 2)
1591 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1594 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1595 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1598 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1600 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1601 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1605 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1607 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1608 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1609 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1612 static const uint32_t aarch64_adrp_branch_stub [] =
1614 0x90000010, /* adrp ip0, X */
1615 /* R_AARCH64_ADR_HI21_PCREL(X) */
1616 0x91000210, /* add ip0, ip0, :lo12:X */
1617 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1618 0xd61f0200, /* br ip0 */
1621 static const uint32_t aarch64_long_branch_stub[] =
1624 0x58000090, /* ldr ip0, 1f */
1626 0x18000090, /* ldr wip0, 1f */
1628 0x10000011, /* adr ip1, #0 */
1629 0x8b110210, /* add ip0, ip0, ip1 */
1630 0xd61f0200, /* br ip0 */
1631 0x00000000, /* 1: .xword or .word
1632 R_AARCH64_PRELNN(X) + 12
1637 static const uint32_t aarch64_erratum_835769_stub[] =
1639 0x00000000, /* Placeholder for multiply accumulate. */
1640 0x14000000, /* b <label> */
1643 /* Section name for stubs is the associated section name plus this
1645 #define STUB_SUFFIX ".stub"
1647 enum elf_aarch64_stub_type
1650 aarch64_stub_adrp_branch,
1651 aarch64_stub_long_branch,
1652 aarch64_stub_erratum_835769_veneer,
1655 struct elf_aarch64_stub_hash_entry
1657 /* Base hash table entry structure. */
1658 struct bfd_hash_entry root;
1660 /* The stub section. */
1663 /* Offset within stub_sec of the beginning of this stub. */
1664 bfd_vma stub_offset;
1666 /* Given the symbol's value and its section we can determine its final
1667 value when building the stubs (so the stub knows where to jump). */
1668 bfd_vma target_value;
1669 asection *target_section;
1671 enum elf_aarch64_stub_type stub_type;
1673 /* The symbol table entry, if any, that this was derived from. */
1674 struct elf_aarch64_link_hash_entry *h;
1676 /* Destination symbol type */
1677 unsigned char st_type;
1679 /* Where this stub is being called from, or, in the case of combined
1680 stub sections, the first input section in the group. */
1683 /* The name for the local symbol at the start of this stub. The
1684 stub name in the hash table has to be unique; this does not, so
1685 it can be friendlier. */
1688 /* The instruction which caused this stub to be generated (only valid for
1689 erratum 835769 workaround stubs at present). */
1690 uint32_t veneered_insn;
1693 /* Used to build a map of a section. This is required for mixed-endian
1696 typedef struct elf_elf_section_map
1701 elf_aarch64_section_map;
1704 typedef struct _aarch64_elf_section_data
1706 struct bfd_elf_section_data elf;
1707 unsigned int mapcount;
1708 unsigned int mapsize;
1709 elf_aarch64_section_map *map;
1711 _aarch64_elf_section_data;
1713 #define elf_aarch64_section_data(sec) \
1714 ((_aarch64_elf_section_data *) elf_section_data (sec))
1716 /* A fix-descriptor for erratum 835769. */
1717 struct aarch64_erratum_835769_fix
1722 uint32_t veneered_insn;
1724 enum elf_aarch64_stub_type stub_type;
1727 /* The size of the thread control block which is defined to be two pointers. */
1728 #define TCB_SIZE (ARCH_SIZE/8)*2
1730 struct elf_aarch64_local_symbol
1732 unsigned int got_type;
1733 bfd_signed_vma got_refcount;
1736 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1737 offset is from the end of the jump table and reserved entries
1740 The magic value (bfd_vma) -1 indicates that an offset has not be
1742 bfd_vma tlsdesc_got_jump_table_offset;
1745 struct elf_aarch64_obj_tdata
1747 struct elf_obj_tdata root;
1749 /* local symbol descriptors */
1750 struct elf_aarch64_local_symbol *locals;
1752 /* Zero to warn when linking objects with incompatible enum sizes. */
1753 int no_enum_size_warning;
1755 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1756 int no_wchar_size_warning;
1759 #define elf_aarch64_tdata(bfd) \
1760 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1762 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1764 #define is_aarch64_elf(bfd) \
1765 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1766 && elf_tdata (bfd) != NULL \
1767 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1770 elfNN_aarch64_mkobject (bfd *abfd)
1772 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1776 #define elf_aarch64_hash_entry(ent) \
1777 ((struct elf_aarch64_link_hash_entry *)(ent))
1779 #define GOT_UNKNOWN 0
1780 #define GOT_NORMAL 1
1781 #define GOT_TLS_GD 2
1782 #define GOT_TLS_IE 4
1783 #define GOT_TLSDESC_GD 8
1785 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1787 /* AArch64 ELF linker hash entry. */
1788 struct elf_aarch64_link_hash_entry
1790 struct elf_link_hash_entry root;
1792 /* Track dynamic relocs copied for this symbol. */
1793 struct elf_dyn_relocs *dyn_relocs;
1795 /* Since PLT entries have variable size, we need to record the
1796 index into .got.plt instead of recomputing it from the PLT
1798 bfd_signed_vma plt_got_offset;
1800 /* Bit mask representing the type of GOT entry(s) if any required by
1802 unsigned int got_type;
1804 /* A pointer to the most recently used stub hash entry against this
1806 struct elf_aarch64_stub_hash_entry *stub_cache;
1808 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1809 is from the end of the jump table and reserved entries within the PLTGOT.
1811 The magic value (bfd_vma) -1 indicates that an offset has not
1813 bfd_vma tlsdesc_got_jump_table_offset;
1817 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1819 unsigned long r_symndx)
1822 return elf_aarch64_hash_entry (h)->got_type;
1824 if (! elf_aarch64_locals (abfd))
1827 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1830 /* Get the AArch64 elf linker hash table from a link_info structure. */
1831 #define elf_aarch64_hash_table(info) \
1832 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1834 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1835 ((struct elf_aarch64_stub_hash_entry *) \
1836 bfd_hash_lookup ((table), (string), (create), (copy)))
1838 /* AArch64 ELF linker hash table. */
1839 struct elf_aarch64_link_hash_table
1841 /* The main hash table. */
1842 struct elf_link_hash_table root;
1844 /* Nonzero to force PIC branch veneers. */
1847 /* Fix erratum 835769. */
1848 int fix_erratum_835769;
1850 /* The number of bytes in the initial entry in the PLT. */
1851 bfd_size_type plt_header_size;
1853 /* The number of bytes in the subsequent PLT etries. */
1854 bfd_size_type plt_entry_size;
1856 /* Short-cuts to get to dynamic linker sections. */
1860 /* Small local sym cache. */
1861 struct sym_cache sym_cache;
1863 /* For convenience in allocate_dynrelocs. */
1866 /* The amount of space used by the reserved portion of the sgotplt
1867 section, plus whatever space is used by the jump slots. */
1868 bfd_vma sgotplt_jump_table_size;
1870 /* The stub hash table. */
1871 struct bfd_hash_table stub_hash_table;
1873 /* Linker stub bfd. */
1876 /* Linker call-backs. */
1877 asection *(*add_stub_section) (const char *, asection *);
1878 void (*layout_sections_again) (void);
1880 /* Array to keep track of which stub sections have been created, and
1881 information on stub grouping. */
1884 /* This is the section to which stubs in the group will be
1887 /* The stub section. */
1891 /* Assorted information used by elfNN_aarch64_size_stubs. */
1892 unsigned int bfd_count;
1894 asection **input_list;
1896 /* The offset into splt of the PLT entry for the TLS descriptor
1897 resolver. Special values are 0, if not necessary (or not found
1898 to be necessary yet), and -1 if needed but not determined
1900 bfd_vma tlsdesc_plt;
1902 /* The GOT offset for the lazy trampoline. Communicated to the
1903 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1904 indicates an offset is not allocated. */
1905 bfd_vma dt_tlsdesc_got;
1907 /* Used by local STT_GNU_IFUNC symbols. */
1908 htab_t loc_hash_table;
1909 void * loc_hash_memory;
1912 /* Create an entry in an AArch64 ELF linker hash table. */
1914 static struct bfd_hash_entry *
1915 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1916 struct bfd_hash_table *table,
1919 struct elf_aarch64_link_hash_entry *ret =
1920 (struct elf_aarch64_link_hash_entry *) entry;
1922 /* Allocate the structure if it has not already been allocated by a
1925 ret = bfd_hash_allocate (table,
1926 sizeof (struct elf_aarch64_link_hash_entry));
1928 return (struct bfd_hash_entry *) ret;
1930 /* Call the allocation method of the superclass. */
1931 ret = ((struct elf_aarch64_link_hash_entry *)
1932 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1936 ret->dyn_relocs = NULL;
1937 ret->got_type = GOT_UNKNOWN;
1938 ret->plt_got_offset = (bfd_vma) - 1;
1939 ret->stub_cache = NULL;
1940 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1943 return (struct bfd_hash_entry *) ret;
1946 /* Initialize an entry in the stub hash table. */
1948 static struct bfd_hash_entry *
1949 stub_hash_newfunc (struct bfd_hash_entry *entry,
1950 struct bfd_hash_table *table, const char *string)
1952 /* Allocate the structure if it has not already been allocated by a
1956 entry = bfd_hash_allocate (table,
1958 elf_aarch64_stub_hash_entry));
1963 /* Call the allocation method of the superclass. */
1964 entry = bfd_hash_newfunc (entry, table, string);
1967 struct elf_aarch64_stub_hash_entry *eh;
1969 /* Initialize the local fields. */
1970 eh = (struct elf_aarch64_stub_hash_entry *) entry;
1971 eh->stub_sec = NULL;
1972 eh->stub_offset = 0;
1973 eh->target_value = 0;
1974 eh->target_section = NULL;
1975 eh->stub_type = aarch64_stub_none;
1983 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
1984 for local symbol so that we can handle local STT_GNU_IFUNC symbols
1985 as global symbol. We reuse indx and dynstr_index for local symbol
1986 hash since they aren't used by global symbols in this backend. */
1989 elfNN_aarch64_local_htab_hash (const void *ptr)
1991 struct elf_link_hash_entry *h
1992 = (struct elf_link_hash_entry *) ptr;
1993 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
1996 /* Compare local hash entries. */
1999 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2001 struct elf_link_hash_entry *h1
2002 = (struct elf_link_hash_entry *) ptr1;
2003 struct elf_link_hash_entry *h2
2004 = (struct elf_link_hash_entry *) ptr2;
2006 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2009 /* Find and/or create a hash entry for local symbol. */
2011 static struct elf_link_hash_entry *
2012 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2013 bfd *abfd, const Elf_Internal_Rela *rel,
2016 struct elf_aarch64_link_hash_entry e, *ret;
2017 asection *sec = abfd->sections;
2018 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2019 ELFNN_R_SYM (rel->r_info));
2022 e.root.indx = sec->id;
2023 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2024 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2025 create ? INSERT : NO_INSERT);
2032 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2036 ret = (struct elf_aarch64_link_hash_entry *)
2037 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2038 sizeof (struct elf_aarch64_link_hash_entry));
2041 memset (ret, 0, sizeof (*ret));
2042 ret->root.indx = sec->id;
2043 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2044 ret->root.dynindx = -1;
2050 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2053 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2054 struct elf_link_hash_entry *dir,
2055 struct elf_link_hash_entry *ind)
2057 struct elf_aarch64_link_hash_entry *edir, *eind;
2059 edir = (struct elf_aarch64_link_hash_entry *) dir;
2060 eind = (struct elf_aarch64_link_hash_entry *) ind;
2062 if (eind->dyn_relocs != NULL)
2064 if (edir->dyn_relocs != NULL)
2066 struct elf_dyn_relocs **pp;
2067 struct elf_dyn_relocs *p;
2069 /* Add reloc counts against the indirect sym to the direct sym
2070 list. Merge any entries against the same section. */
2071 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2073 struct elf_dyn_relocs *q;
2075 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2076 if (q->sec == p->sec)
2078 q->pc_count += p->pc_count;
2079 q->count += p->count;
2086 *pp = edir->dyn_relocs;
2089 edir->dyn_relocs = eind->dyn_relocs;
2090 eind->dyn_relocs = NULL;
2093 if (ind->root.type == bfd_link_hash_indirect)
2095 /* Copy over PLT info. */
2096 if (dir->got.refcount <= 0)
2098 edir->got_type = eind->got_type;
2099 eind->got_type = GOT_UNKNOWN;
2103 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2106 /* Destroy an AArch64 elf linker hash table. */
2109 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2111 struct elf_aarch64_link_hash_table *ret
2112 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2114 if (ret->loc_hash_table)
2115 htab_delete (ret->loc_hash_table);
2116 if (ret->loc_hash_memory)
2117 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2119 bfd_hash_table_free (&ret->stub_hash_table);
2120 _bfd_elf_link_hash_table_free (obfd);
2123 /* Create an AArch64 elf linker hash table. */
2125 static struct bfd_link_hash_table *
2126 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2128 struct elf_aarch64_link_hash_table *ret;
2129 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2131 ret = bfd_zmalloc (amt);
2135 if (!_bfd_elf_link_hash_table_init
2136 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2137 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2143 ret->plt_header_size = PLT_ENTRY_SIZE;
2144 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2146 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2148 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2149 sizeof (struct elf_aarch64_stub_hash_entry)))
2151 _bfd_elf_link_hash_table_free (abfd);
2155 ret->loc_hash_table = htab_try_create (1024,
2156 elfNN_aarch64_local_htab_hash,
2157 elfNN_aarch64_local_htab_eq,
2159 ret->loc_hash_memory = objalloc_create ();
2160 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2162 elfNN_aarch64_link_hash_table_free (abfd);
2165 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2167 return &ret->root.root;
2171 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2172 bfd_vma offset, bfd_vma value)
2174 reloc_howto_type *howto;
2177 howto = elfNN_aarch64_howto_from_type (r_type);
2178 place = (input_section->output_section->vma + input_section->output_offset
2181 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2182 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2183 return _bfd_aarch64_elf_put_addend (input_bfd,
2184 input_section->contents + offset, r_type,
2188 static enum elf_aarch64_stub_type
2189 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2191 if (aarch64_valid_for_adrp_p (value, place))
2192 return aarch64_stub_adrp_branch;
2193 return aarch64_stub_long_branch;
2196 /* Determine the type of stub needed, if any, for a call. */
2198 static enum elf_aarch64_stub_type
2199 aarch64_type_of_stub (struct bfd_link_info *info,
2200 asection *input_sec,
2201 const Elf_Internal_Rela *rel,
2202 unsigned char st_type,
2203 struct elf_aarch64_link_hash_entry *hash,
2204 bfd_vma destination)
2207 bfd_signed_vma branch_offset;
2208 unsigned int r_type;
2209 struct elf_aarch64_link_hash_table *globals;
2210 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2211 bfd_boolean via_plt_p;
2213 if (st_type != STT_FUNC)
2216 globals = elf_aarch64_hash_table (info);
2217 via_plt_p = (globals->root.splt != NULL && hash != NULL
2218 && hash->root.plt.offset != (bfd_vma) - 1);
2223 /* Determine where the call point is. */
2224 location = (input_sec->output_offset
2225 + input_sec->output_section->vma + rel->r_offset);
2227 branch_offset = (bfd_signed_vma) (destination - location);
2229 r_type = ELFNN_R_TYPE (rel->r_info);
2231 /* We don't want to redirect any old unconditional jump in this way,
2232 only one which is being used for a sibcall, where it is
2233 acceptable for the IP0 and IP1 registers to be clobbered. */
2234 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2235 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2236 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2238 stub_type = aarch64_stub_long_branch;
2244 /* Build a name for an entry in the stub hash table. */
2247 elfNN_aarch64_stub_name (const asection *input_section,
2248 const asection *sym_sec,
2249 const struct elf_aarch64_link_hash_entry *hash,
2250 const Elf_Internal_Rela *rel)
2257 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2258 stub_name = bfd_malloc (len);
2259 if (stub_name != NULL)
2260 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2261 (unsigned int) input_section->id,
2262 hash->root.root.root.string,
2267 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2268 stub_name = bfd_malloc (len);
2269 if (stub_name != NULL)
2270 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2271 (unsigned int) input_section->id,
2272 (unsigned int) sym_sec->id,
2273 (unsigned int) ELFNN_R_SYM (rel->r_info),
2280 /* Look up an entry in the stub hash. Stub entries are cached because
2281 creating the stub name takes a bit of time. */
2283 static struct elf_aarch64_stub_hash_entry *
2284 elfNN_aarch64_get_stub_entry (const asection *input_section,
2285 const asection *sym_sec,
2286 struct elf_link_hash_entry *hash,
2287 const Elf_Internal_Rela *rel,
2288 struct elf_aarch64_link_hash_table *htab)
2290 struct elf_aarch64_stub_hash_entry *stub_entry;
2291 struct elf_aarch64_link_hash_entry *h =
2292 (struct elf_aarch64_link_hash_entry *) hash;
2293 const asection *id_sec;
2295 if ((input_section->flags & SEC_CODE) == 0)
2298 /* If this input section is part of a group of sections sharing one
2299 stub section, then use the id of the first section in the group.
2300 Stub names need to include a section id, as there may well be
2301 more than one stub used to reach say, printf, and we need to
2302 distinguish between them. */
2303 id_sec = htab->stub_group[input_section->id].link_sec;
2305 if (h != NULL && h->stub_cache != NULL
2306 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2308 stub_entry = h->stub_cache;
2314 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2315 if (stub_name == NULL)
2318 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2319 stub_name, FALSE, FALSE);
2321 h->stub_cache = stub_entry;
2330 /* Create a stub section. */
2333 _bfd_aarch64_create_stub_section (asection *section,
2334 struct elf_aarch64_link_hash_table *htab)
2340 namelen = strlen (section->name);
2341 len = namelen + sizeof (STUB_SUFFIX);
2342 s_name = bfd_alloc (htab->stub_bfd, len);
2346 memcpy (s_name, section->name, namelen);
2347 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2348 return (*htab->add_stub_section) (s_name, section);
2352 /* Find or create a stub section in the stub group for an input
2356 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2357 struct elf_aarch64_link_hash_table *htab)
2362 link_sec = htab->stub_group[section->id].link_sec;
2363 BFD_ASSERT (link_sec != NULL);
2364 stub_sec = htab->stub_group[section->id].stub_sec;
2366 if (stub_sec == NULL)
2368 stub_sec = htab->stub_group[link_sec->id].stub_sec;
2369 if (stub_sec == NULL)
2371 stub_sec = _bfd_aarch64_create_stub_section (link_sec, htab)
2372 if (stub_sec == NULL)
2374 htab->stub_group[link_sec->id].stub_sec = stub_sec;
2376 htab->stub_group[section->id].stub_sec = stub_sec;
2383 /* Add a new stub entry in the stub group associated with an input
2384 section to the stub hash. Not all fields of the new stub entry are
2387 static struct elf_aarch64_stub_hash_entry *
2388 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2390 struct elf_aarch64_link_hash_table *htab)
2394 struct elf_aarch64_stub_hash_entry *stub_entry;
2396 link_sec = htab->stub_group[section->id].link_sec;
2397 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2399 /* Enter this entry into the linker stub hash table. */
2400 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2402 if (stub_entry == NULL)
2404 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2405 section->owner, stub_name);
2409 stub_entry->stub_sec = stub_sec;
2410 stub_entry->stub_offset = 0;
2411 stub_entry->id_sec = link_sec;
2417 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2418 void *in_arg ATTRIBUTE_UNUSED)
2420 struct elf_aarch64_stub_hash_entry *stub_entry;
2425 bfd_vma veneered_insn_loc;
2426 bfd_vma veneer_entry_loc;
2427 bfd_signed_vma branch_offset = 0;
2428 unsigned int template_size;
2429 const uint32_t *template;
2432 /* Massage our args to the form they really have. */
2433 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2435 stub_sec = stub_entry->stub_sec;
2437 /* Make a note of the offset within the stubs for this entry. */
2438 stub_entry->stub_offset = stub_sec->size;
2439 loc = stub_sec->contents + stub_entry->stub_offset;
2441 stub_bfd = stub_sec->owner;
2443 /* This is the address of the stub destination. */
2444 sym_value = (stub_entry->target_value
2445 + stub_entry->target_section->output_offset
2446 + stub_entry->target_section->output_section->vma);
2448 if (stub_entry->stub_type == aarch64_stub_long_branch)
2450 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2451 + stub_sec->output_offset);
2453 /* See if we can relax the stub. */
2454 if (aarch64_valid_for_adrp_p (sym_value, place))
2455 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2458 switch (stub_entry->stub_type)
2460 case aarch64_stub_adrp_branch:
2461 template = aarch64_adrp_branch_stub;
2462 template_size = sizeof (aarch64_adrp_branch_stub);
2464 case aarch64_stub_long_branch:
2465 template = aarch64_long_branch_stub;
2466 template_size = sizeof (aarch64_long_branch_stub);
2468 case aarch64_stub_erratum_835769_veneer:
2469 template = aarch64_erratum_835769_stub;
2470 template_size = sizeof (aarch64_erratum_835769_stub);
2476 for (i = 0; i < (template_size / sizeof template[0]); i++)
2478 bfd_putl32 (template[i], loc);
2482 template_size = (template_size + 7) & ~7;
2483 stub_sec->size += template_size;
2485 switch (stub_entry->stub_type)
2487 case aarch64_stub_adrp_branch:
2488 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2489 stub_entry->stub_offset, sym_value))
2490 /* The stub would not have been relaxed if the offset was out
2494 _bfd_final_link_relocate
2495 (elfNN_aarch64_howto_from_type (AARCH64_R (ADD_ABS_LO12_NC)),
2499 stub_entry->stub_offset + 4,
2504 case aarch64_stub_long_branch:
2505 /* We want the value relative to the address 12 bytes back from the
2507 _bfd_final_link_relocate (elfNN_aarch64_howto_from_type
2508 (AARCH64_R (PRELNN)), stub_bfd, stub_sec,
2510 stub_entry->stub_offset + 16,
2514 case aarch64_stub_erratum_835769_veneer:
2515 veneered_insn_loc = stub_entry->target_section->output_section->vma
2516 + stub_entry->target_section->output_offset
2517 + stub_entry->target_value;
2518 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2519 + stub_entry->stub_sec->output_offset
2520 + stub_entry->stub_offset;
2521 branch_offset = veneered_insn_loc - veneer_entry_loc;
2522 branch_offset >>= 2;
2523 branch_offset &= 0x3ffffff;
2524 bfd_putl32 (stub_entry->veneered_insn,
2525 stub_sec->contents + stub_entry->stub_offset);
2526 bfd_putl32 (template[1] | branch_offset,
2527 stub_sec->contents + stub_entry->stub_offset + 4);
2537 /* As above, but don't actually build the stub. Just bump offset so
2538 we know stub section sizes. */
2541 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2542 void *in_arg ATTRIBUTE_UNUSED)
2544 struct elf_aarch64_stub_hash_entry *stub_entry;
2547 /* Massage our args to the form they really have. */
2548 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2550 switch (stub_entry->stub_type)
2552 case aarch64_stub_adrp_branch:
2553 size = sizeof (aarch64_adrp_branch_stub);
2555 case aarch64_stub_long_branch:
2556 size = sizeof (aarch64_long_branch_stub);
2558 case aarch64_stub_erratum_835769_veneer:
2559 size = sizeof (aarch64_erratum_835769_stub);
2565 size = (size + 7) & ~7;
2566 stub_entry->stub_sec->size += size;
2570 /* External entry points for sizing and building linker stubs. */
2572 /* Set up various things so that we can make a list of input sections
2573 for each output section included in the link. Returns -1 on error,
2574 0 when no stubs will be needed, and 1 on success. */
2577 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2578 struct bfd_link_info *info)
2581 unsigned int bfd_count;
2582 int top_id, top_index;
2584 asection **input_list, **list;
2586 struct elf_aarch64_link_hash_table *htab =
2587 elf_aarch64_hash_table (info);
2589 if (!is_elf_hash_table (htab))
2592 /* Count the number of input BFDs and find the top input section id. */
2593 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2594 input_bfd != NULL; input_bfd = input_bfd->link.next)
2597 for (section = input_bfd->sections;
2598 section != NULL; section = section->next)
2600 if (top_id < section->id)
2601 top_id = section->id;
2604 htab->bfd_count = bfd_count;
2606 amt = sizeof (struct map_stub) * (top_id + 1);
2607 htab->stub_group = bfd_zmalloc (amt);
2608 if (htab->stub_group == NULL)
2611 /* We can't use output_bfd->section_count here to find the top output
2612 section index as some sections may have been removed, and
2613 _bfd_strip_section_from_output doesn't renumber the indices. */
2614 for (section = output_bfd->sections, top_index = 0;
2615 section != NULL; section = section->next)
2617 if (top_index < section->index)
2618 top_index = section->index;
2621 htab->top_index = top_index;
2622 amt = sizeof (asection *) * (top_index + 1);
2623 input_list = bfd_malloc (amt);
2624 htab->input_list = input_list;
2625 if (input_list == NULL)
2628 /* For sections we aren't interested in, mark their entries with a
2629 value we can check later. */
2630 list = input_list + top_index;
2632 *list = bfd_abs_section_ptr;
2633 while (list-- != input_list);
2635 for (section = output_bfd->sections;
2636 section != NULL; section = section->next)
2638 if ((section->flags & SEC_CODE) != 0)
2639 input_list[section->index] = NULL;
2645 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2646 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2648 /* The linker repeatedly calls this function for each input section,
2649 in the order that input sections are linked into output sections.
2650 Build lists of input sections to determine groupings between which
2651 we may insert linker stubs. */
2654 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2656 struct elf_aarch64_link_hash_table *htab =
2657 elf_aarch64_hash_table (info);
2659 if (isec->output_section->index <= htab->top_index)
2661 asection **list = htab->input_list + isec->output_section->index;
2663 if (*list != bfd_abs_section_ptr)
2665 /* Steal the link_sec pointer for our list. */
2666 /* This happens to make the list in reverse order,
2667 which is what we want. */
2668 PREV_SEC (isec) = *list;
2674 /* See whether we can group stub sections together. Grouping stub
2675 sections may result in fewer stubs. More importantly, we need to
2676 put all .init* and .fini* stubs at the beginning of the .init or
2677 .fini output sections respectively, because glibc splits the
2678 _init and _fini functions into multiple parts. Putting a stub in
2679 the middle of a function is not a good idea. */
2682 group_sections (struct elf_aarch64_link_hash_table *htab,
2683 bfd_size_type stub_group_size,
2684 bfd_boolean stubs_always_before_branch)
2686 asection **list = htab->input_list + htab->top_index;
2690 asection *tail = *list;
2692 if (tail == bfd_abs_section_ptr)
2695 while (tail != NULL)
2699 bfd_size_type total;
2703 while ((prev = PREV_SEC (curr)) != NULL
2704 && ((total += curr->output_offset - prev->output_offset)
2708 /* OK, the size from the start of CURR to the end is less
2709 than stub_group_size and thus can be handled by one stub
2710 section. (Or the tail section is itself larger than
2711 stub_group_size, in which case we may be toast.)
2712 We should really be keeping track of the total size of
2713 stubs added here, as stubs contribute to the final output
2717 prev = PREV_SEC (tail);
2718 /* Set up this stub group. */
2719 htab->stub_group[tail->id].link_sec = curr;
2721 while (tail != curr && (tail = prev) != NULL);
2723 /* But wait, there's more! Input sections up to stub_group_size
2724 bytes before the stub section can be handled by it too. */
2725 if (!stubs_always_before_branch)
2729 && ((total += tail->output_offset - prev->output_offset)
2733 prev = PREV_SEC (tail);
2734 htab->stub_group[tail->id].link_sec = curr;
2740 while (list-- != htab->input_list);
2742 free (htab->input_list);
2747 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2749 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2750 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2751 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2752 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2753 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2754 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2756 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2757 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2758 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2759 #define AARCH64_ZR 0x1f
2761 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2762 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2764 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2765 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2766 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2767 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2768 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2769 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2770 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2771 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2772 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2773 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2774 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2775 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2776 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2777 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2778 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2779 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2780 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2781 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2783 /* Classify an INSN if it is indeed a load/store.
2785 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2787 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2790 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2795 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2796 bfd_boolean *pair, bfd_boolean *load)
2804 /* Bail out quickly if INSN doesn't fall into the the load-store
2806 if (!AARCH64_LDST (insn))
2811 if (AARCH64_LDST_EX (insn))
2813 *rt = AARCH64_RT (insn);
2815 if (AARCH64_BIT (insn, 21) == 1)
2818 *rt2 = AARCH64_RT2 (insn);
2820 *load = AARCH64_LD (insn);
2823 else if (AARCH64_LDST_NAP (insn)
2824 || AARCH64_LDSTP_PI (insn)
2825 || AARCH64_LDSTP_O (insn)
2826 || AARCH64_LDSTP_PRE (insn))
2829 *rt = AARCH64_RT (insn);
2830 *rt2 = AARCH64_RT2 (insn);
2831 *load = AARCH64_LD (insn);
2834 else if (AARCH64_LDST_PCREL (insn)
2835 || AARCH64_LDST_UI (insn)
2836 || AARCH64_LDST_PIIMM (insn)
2837 || AARCH64_LDST_U (insn)
2838 || AARCH64_LDST_PREIMM (insn)
2839 || AARCH64_LDST_RO (insn)
2840 || AARCH64_LDST_UIMM (insn))
2842 *rt = AARCH64_RT (insn);
2844 if (AARCH64_LDST_PCREL (insn))
2846 opc = AARCH64_BITS (insn, 22, 2);
2847 v = AARCH64_BIT (insn, 26);
2848 opc_v = opc | (v << 2);
2849 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
2850 || opc_v == 5 || opc_v == 7);
2853 else if (AARCH64_LDST_SIMD_M (insn)
2854 || AARCH64_LDST_SIMD_M_PI (insn))
2856 *rt = AARCH64_RT (insn);
2857 *load = AARCH64_BIT (insn, 22);
2858 opcode = (insn >> 12) & 0xf;
2885 else if (AARCH64_LDST_SIMD_S (insn)
2886 || AARCH64_LDST_SIMD_S_PI (insn))
2888 *rt = AARCH64_RT (insn);
2889 r = (insn >> 21) & 1;
2890 *load = AARCH64_BIT (insn, 22);
2891 opcode = (insn >> 13) & 0x7;
2903 *rt2 = *rt + (r == 0 ? 2 : 3);
2911 *rt2 = *rt + (r == 0 ? 2 : 3);
2923 /* Return TRUE if INSN is multiply-accumulate. */
2926 aarch64_mlxl_p (uint32_t insn)
2928 uint32_t op31 = AARCH64_OP31 (insn);
2930 if (AARCH64_MAC (insn)
2931 && (op31 == 0 || op31 == 1 || op31 == 5)
2932 /* Exclude MUL instructions which are encoded as a multiple accumulate
2934 && AARCH64_RA (insn) != AARCH64_ZR)
2940 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
2941 it is possible for a 64-bit multiply-accumulate instruction to generate an
2942 incorrect result. The details are quite complex and hard to
2943 determine statically, since branches in the code may exist in some
2944 circumstances, but all cases end with a memory (load, store, or
2945 prefetch) instruction followed immediately by the multiply-accumulate
2946 operation. We employ a linker patching technique, by moving the potentially
2947 affected multiply-accumulate instruction into a patch region and replacing
2948 the original instruction with a branch to the patch. This function checks
2949 if INSN_1 is the memory operation followed by a multiply-accumulate
2950 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
2951 if INSN_1 and INSN_2 are safe. */
2954 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
2964 if (aarch64_mlxl_p (insn_2)
2965 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
2967 /* Any SIMD memory op is independent of the subsequent MLA
2968 by definition of the erratum. */
2969 if (AARCH64_BIT (insn_1, 26))
2972 /* If not SIMD, check for integer memory ops and MLA relationship. */
2973 rn = AARCH64_RN (insn_2);
2974 ra = AARCH64_RA (insn_2);
2975 rm = AARCH64_RM (insn_2);
2977 /* If this is a load and there's a true(RAW) dependency, we are safe
2978 and this is not an erratum sequence. */
2980 (rt == rn || rt == rm || rt == ra
2981 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
2984 /* We conservatively put out stubs for all other cases (including
2992 /* Used to order a list of mapping symbols by address. */
2995 elf_aarch64_compare_mapping (const void *a, const void *b)
2997 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
2998 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3000 if (amap->vma > bmap->vma)
3002 else if (amap->vma < bmap->vma)
3004 else if (amap->type > bmap->type)
3005 /* Ensure results do not depend on the host qsort for objects with
3006 multiple mapping symbols at the same address by sorting on type
3009 else if (amap->type < bmap->type)
3016 /* Scan for cortex-a53 erratum 835769 sequence.
3018 Return TRUE else FALSE on abnormal termination. */
3021 erratum_835769_scan (bfd *input_bfd,
3022 struct bfd_link_info *info,
3023 struct aarch64_erratum_835769_fix **fixes_p,
3024 unsigned int *num_fixes_p,
3025 unsigned int *fix_table_size_p)
3028 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3029 struct aarch64_erratum_835769_fix *fixes = *fixes_p;
3030 unsigned int num_fixes = *num_fixes_p;
3031 unsigned int fix_table_size = *fix_table_size_p;
3036 for (section = input_bfd->sections;
3038 section = section->next)
3040 bfd_byte *contents = NULL;
3041 struct _aarch64_elf_section_data *sec_data;
3044 if (elf_section_type (section) != SHT_PROGBITS
3045 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3046 || (section->flags & SEC_EXCLUDE) != 0
3047 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3048 || (section->output_section == bfd_abs_section_ptr))
3051 if (elf_section_data (section)->this_hdr.contents != NULL)
3052 contents = elf_section_data (section)->this_hdr.contents;
3053 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3056 sec_data = elf_aarch64_section_data (section);
3058 qsort (sec_data->map, sec_data->mapcount,
3059 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3061 for (span = 0; span < sec_data->mapcount; span++)
3063 unsigned int span_start = sec_data->map[span].vma;
3064 unsigned int span_end = ((span == sec_data->mapcount - 1)
3065 ? sec_data->map[0].vma + section->size
3066 : sec_data->map[span + 1].vma);
3068 char span_type = sec_data->map[span].type;
3070 if (span_type == 'd')
3073 for (i = span_start; i + 4 < span_end; i += 4)
3075 uint32_t insn_1 = bfd_getl32 (contents + i);
3076 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3078 if (aarch64_erratum_sequence (insn_1, insn_2))
3080 char *stub_name = NULL;
3081 stub_name = (char *) bfd_malloc
3082 (strlen ("__erratum_835769_veneer_") + 16);
3083 if (stub_name != NULL)
3085 (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3089 if (num_fixes == fix_table_size)
3091 fix_table_size *= 2;
3093 (struct aarch64_erratum_835769_fix *)
3095 sizeof (struct aarch64_erratum_835769_fix)
3101 fixes[num_fixes].input_bfd = input_bfd;
3102 fixes[num_fixes].section = section;
3103 fixes[num_fixes].offset = i + 4;
3104 fixes[num_fixes].veneered_insn = insn_2;
3105 fixes[num_fixes].stub_name = stub_name;
3106 fixes[num_fixes].stub_type = aarch64_stub_erratum_835769_veneer;
3111 if (elf_section_data (section)->this_hdr.contents == NULL)
3116 *num_fixes_p = num_fixes;
3117 *fix_table_size_p = fix_table_size;
3122 /* Resize all stub sections. */
3125 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
3129 /* OK, we've added some stubs. Find out the new size of the
3131 for (section = htab->stub_bfd->sections;
3132 section != NULL; section = section->next)
3134 /* Ignore non-stub sections. */
3135 if (!strstr (section->name, STUB_SUFFIX))
3140 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3144 /* Determine and set the size of the stub section for a final link.
3146 The basic idea here is to examine all the relocations looking for
3147 PC-relative calls to a target that is unreachable with a "bl"
3151 elfNN_aarch64_size_stubs (bfd *output_bfd,
3153 struct bfd_link_info *info,
3154 bfd_signed_vma group_size,
3155 asection * (*add_stub_section) (const char *,
3157 void (*layout_sections_again) (void))
3159 bfd_size_type stub_group_size;
3160 bfd_boolean stubs_always_before_branch;
3161 bfd_boolean stub_changed = 0;
3162 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3163 struct aarch64_erratum_835769_fix *erratum_835769_fixes = NULL;
3164 unsigned int num_erratum_835769_fixes = 0;
3165 unsigned int erratum_835769_fix_table_size = 10;
3168 if (htab->fix_erratum_835769)
3170 erratum_835769_fixes
3171 = (struct aarch64_erratum_835769_fix *)
3173 (sizeof (struct aarch64_erratum_835769_fix) *
3174 erratum_835769_fix_table_size);
3175 if (erratum_835769_fixes == NULL)
3176 goto error_ret_free_local;
3179 /* Propagate mach to stub bfd, because it may not have been
3180 finalized when we created stub_bfd. */
3181 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3182 bfd_get_mach (output_bfd));
3184 /* Stash our params away. */
3185 htab->stub_bfd = stub_bfd;
3186 htab->add_stub_section = add_stub_section;
3187 htab->layout_sections_again = layout_sections_again;
3188 stubs_always_before_branch = group_size < 0;
3190 stub_group_size = -group_size;
3192 stub_group_size = group_size;
3194 if (stub_group_size == 1)
3196 /* Default values. */
3197 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3198 stub_group_size = 127 * 1024 * 1024;
3201 group_sections (htab, stub_group_size, stubs_always_before_branch);
3207 unsigned prev_num_erratum_835769_fixes = num_erratum_835769_fixes;
3209 num_erratum_835769_fixes = 0;
3210 for (input_bfd = info->input_bfds;
3211 input_bfd != NULL; input_bfd = input_bfd->link.next)
3213 Elf_Internal_Shdr *symtab_hdr;
3215 Elf_Internal_Sym *local_syms = NULL;
3217 /* We'll need the symbol table in a second. */
3218 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3219 if (symtab_hdr->sh_info == 0)
3222 /* Walk over each section attached to the input bfd. */
3223 for (section = input_bfd->sections;
3224 section != NULL; section = section->next)
3226 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3228 /* If there aren't any relocs, then there's nothing more
3230 if ((section->flags & SEC_RELOC) == 0
3231 || section->reloc_count == 0
3232 || (section->flags & SEC_CODE) == 0)
3235 /* If this section is a link-once section that will be
3236 discarded, then don't create any stubs. */
3237 if (section->output_section == NULL
3238 || section->output_section->owner != output_bfd)
3241 /* Get the relocs. */
3243 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3244 NULL, info->keep_memory);
3245 if (internal_relocs == NULL)
3246 goto error_ret_free_local;
3248 /* Now examine each relocation. */
3249 irela = internal_relocs;
3250 irelaend = irela + section->reloc_count;
3251 for (; irela < irelaend; irela++)
3253 unsigned int r_type, r_indx;
3254 enum elf_aarch64_stub_type stub_type;
3255 struct elf_aarch64_stub_hash_entry *stub_entry;
3258 bfd_vma destination;
3259 struct elf_aarch64_link_hash_entry *hash;
3260 const char *sym_name;
3262 const asection *id_sec;
3263 unsigned char st_type;
3266 r_type = ELFNN_R_TYPE (irela->r_info);
3267 r_indx = ELFNN_R_SYM (irela->r_info);
3269 if (r_type >= (unsigned int) R_AARCH64_end)
3271 bfd_set_error (bfd_error_bad_value);
3272 error_ret_free_internal:
3273 if (elf_section_data (section)->relocs == NULL)
3274 free (internal_relocs);
3275 goto error_ret_free_local;
3278 /* Only look for stubs on unconditional branch and
3279 branch and link instructions. */
3280 if (r_type != (unsigned int) AARCH64_R (CALL26)
3281 && r_type != (unsigned int) AARCH64_R (JUMP26))
3284 /* Now determine the call target, its name, value,
3291 if (r_indx < symtab_hdr->sh_info)
3293 /* It's a local symbol. */
3294 Elf_Internal_Sym *sym;
3295 Elf_Internal_Shdr *hdr;
3297 if (local_syms == NULL)
3300 = (Elf_Internal_Sym *) symtab_hdr->contents;
3301 if (local_syms == NULL)
3303 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3304 symtab_hdr->sh_info, 0,
3306 if (local_syms == NULL)
3307 goto error_ret_free_internal;
3310 sym = local_syms + r_indx;
3311 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3312 sym_sec = hdr->bfd_section;
3314 /* This is an undefined symbol. It can never
3318 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3319 sym_value = sym->st_value;
3320 destination = (sym_value + irela->r_addend
3321 + sym_sec->output_offset
3322 + sym_sec->output_section->vma);
3323 st_type = ELF_ST_TYPE (sym->st_info);
3325 = bfd_elf_string_from_elf_section (input_bfd,
3326 symtab_hdr->sh_link,
3333 e_indx = r_indx - symtab_hdr->sh_info;
3334 hash = ((struct elf_aarch64_link_hash_entry *)
3335 elf_sym_hashes (input_bfd)[e_indx]);
3337 while (hash->root.root.type == bfd_link_hash_indirect
3338 || hash->root.root.type == bfd_link_hash_warning)
3339 hash = ((struct elf_aarch64_link_hash_entry *)
3340 hash->root.root.u.i.link);
3342 if (hash->root.root.type == bfd_link_hash_defined
3343 || hash->root.root.type == bfd_link_hash_defweak)
3345 struct elf_aarch64_link_hash_table *globals =
3346 elf_aarch64_hash_table (info);
3347 sym_sec = hash->root.root.u.def.section;
3348 sym_value = hash->root.root.u.def.value;
3349 /* For a destination in a shared library,
3350 use the PLT stub as target address to
3351 decide whether a branch stub is
3353 if (globals->root.splt != NULL && hash != NULL
3354 && hash->root.plt.offset != (bfd_vma) - 1)
3356 sym_sec = globals->root.splt;
3357 sym_value = hash->root.plt.offset;
3358 if (sym_sec->output_section != NULL)
3359 destination = (sym_value
3360 + sym_sec->output_offset
3362 sym_sec->output_section->vma);
3364 else if (sym_sec->output_section != NULL)
3365 destination = (sym_value + irela->r_addend
3366 + sym_sec->output_offset
3367 + sym_sec->output_section->vma);
3369 else if (hash->root.root.type == bfd_link_hash_undefined
3370 || (hash->root.root.type
3371 == bfd_link_hash_undefweak))
3373 /* For a shared library, use the PLT stub as
3374 target address to decide whether a long
3375 branch stub is needed.
3376 For absolute code, they cannot be handled. */
3377 struct elf_aarch64_link_hash_table *globals =
3378 elf_aarch64_hash_table (info);
3380 if (globals->root.splt != NULL && hash != NULL
3381 && hash->root.plt.offset != (bfd_vma) - 1)
3383 sym_sec = globals->root.splt;
3384 sym_value = hash->root.plt.offset;
3385 if (sym_sec->output_section != NULL)
3386 destination = (sym_value
3387 + sym_sec->output_offset
3389 sym_sec->output_section->vma);
3396 bfd_set_error (bfd_error_bad_value);
3397 goto error_ret_free_internal;
3399 st_type = ELF_ST_TYPE (hash->root.type);
3400 sym_name = hash->root.root.root.string;
3403 /* Determine what (if any) linker stub is needed. */
3404 stub_type = aarch64_type_of_stub
3405 (info, section, irela, st_type, hash, destination);
3406 if (stub_type == aarch64_stub_none)
3409 /* Support for grouping stub sections. */
3410 id_sec = htab->stub_group[section->id].link_sec;
3412 /* Get the name of this stub. */
3413 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3416 goto error_ret_free_internal;
3419 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3420 stub_name, FALSE, FALSE);
3421 if (stub_entry != NULL)
3423 /* The proper stub has already been created. */
3428 stub_entry = _bfd_aarch64_add_stub_entry_in_group
3429 (stub_name, section, htab);
3430 if (stub_entry == NULL)
3433 goto error_ret_free_internal;
3436 stub_entry->target_value = sym_value;
3437 stub_entry->target_section = sym_sec;
3438 stub_entry->stub_type = stub_type;
3439 stub_entry->h = hash;
3440 stub_entry->st_type = st_type;
3442 if (sym_name == NULL)
3443 sym_name = "unnamed";
3444 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3445 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3446 if (stub_entry->output_name == NULL)
3449 goto error_ret_free_internal;
3452 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3455 stub_changed = TRUE;
3458 /* We're done with the internal relocs, free them. */
3459 if (elf_section_data (section)->relocs == NULL)
3460 free (internal_relocs);
3463 if (htab->fix_erratum_835769)
3465 /* Scan for sequences which might trigger erratum 835769. */
3466 if (!erratum_835769_scan (input_bfd, info, &erratum_835769_fixes,
3467 &num_erratum_835769_fixes,
3468 &erratum_835769_fix_table_size))
3469 goto error_ret_free_local;
3473 if (prev_num_erratum_835769_fixes != num_erratum_835769_fixes)
3474 stub_changed = TRUE;
3479 _bfd_aarch64_resize_stubs (htab);
3481 /* Add erratum 835769 veneers to stub section sizes too. */
3482 if (htab->fix_erratum_835769)
3483 for (i = 0; i < num_erratum_835769_fixes; i++)
3485 stub_sec = _bfd_aarch64_create_or_find_stub_sec
3486 (erratum_835769_fixes[i].section, htab);
3488 if (stub_sec == NULL)
3489 goto error_ret_free_local;
3491 stub_sec->size += 8;
3494 /* Ask the linker to do its stuff. */
3495 (*htab->layout_sections_again) ();
3496 stub_changed = FALSE;
3499 /* Add stubs for erratum 835769 fixes now. */
3500 if (htab->fix_erratum_835769)
3502 for (i = 0; i < num_erratum_835769_fixes; i++)
3504 struct elf_aarch64_stub_hash_entry *stub_entry;
3505 char *stub_name = erratum_835769_fixes[i].stub_name;
3506 asection *section = erratum_835769_fixes[i].section;
3507 unsigned int section_id = erratum_835769_fixes[i].section->id;
3508 asection *link_sec = htab->stub_group[section_id].link_sec;
3509 asection *stub_sec = htab->stub_group[section_id].stub_sec;
3511 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
3512 stub_name, TRUE, FALSE);
3513 if (stub_entry == NULL)
3515 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3521 stub_entry->stub_sec = stub_sec;
3522 stub_entry->stub_offset = 0;
3523 stub_entry->id_sec = link_sec;
3524 stub_entry->stub_type = erratum_835769_fixes[i].stub_type;
3525 stub_entry->target_section = section;
3526 stub_entry->target_value = erratum_835769_fixes[i].offset;
3527 stub_entry->veneered_insn = erratum_835769_fixes[i].veneered_insn;
3528 stub_entry->output_name = erratum_835769_fixes[i].stub_name;
3534 error_ret_free_local:
3538 /* Build all the stubs associated with the current output file. The
3539 stubs are kept in a hash table attached to the main linker hash
3540 table. We also set up the .plt entries for statically linked PIC
3541 functions here. This function is called via aarch64_elf_finish in the
3545 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3548 struct bfd_hash_table *table;
3549 struct elf_aarch64_link_hash_table *htab;
3551 htab = elf_aarch64_hash_table (info);
3553 for (stub_sec = htab->stub_bfd->sections;
3554 stub_sec != NULL; stub_sec = stub_sec->next)
3558 /* Ignore non-stub sections. */
3559 if (!strstr (stub_sec->name, STUB_SUFFIX))
3562 /* Allocate memory to hold the linker stubs. */
3563 size = stub_sec->size;
3564 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3565 if (stub_sec->contents == NULL && size != 0)
3570 /* Build the stubs as directed by the stub hash table. */
3571 table = &htab->stub_hash_table;
3572 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3578 /* Add an entry to the code/data map for section SEC. */
3581 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3583 struct _aarch64_elf_section_data *sec_data =
3584 elf_aarch64_section_data (sec);
3585 unsigned int newidx;
3587 if (sec_data->map == NULL)
3589 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3590 sec_data->mapcount = 0;
3591 sec_data->mapsize = 1;
3594 newidx = sec_data->mapcount++;
3596 if (sec_data->mapcount > sec_data->mapsize)
3598 sec_data->mapsize *= 2;
3599 sec_data->map = bfd_realloc_or_free
3600 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3605 sec_data->map[newidx].vma = vma;
3606 sec_data->map[newidx].type = type;
3611 /* Initialise maps of insn/data for input BFDs. */
3613 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3615 Elf_Internal_Sym *isymbuf;
3616 Elf_Internal_Shdr *hdr;
3617 unsigned int i, localsyms;
3619 /* Make sure that we are dealing with an AArch64 elf binary. */
3620 if (!is_aarch64_elf (abfd))
3623 if ((abfd->flags & DYNAMIC) != 0)
3626 hdr = &elf_symtab_hdr (abfd);
3627 localsyms = hdr->sh_info;
3629 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3630 should contain the number of local symbols, which should come before any
3631 global symbols. Mapping symbols are always local. */
3632 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3634 /* No internal symbols read? Skip this BFD. */
3635 if (isymbuf == NULL)
3638 for (i = 0; i < localsyms; i++)
3640 Elf_Internal_Sym *isym = &isymbuf[i];
3641 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3644 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3646 name = bfd_elf_string_from_elf_section (abfd,
3650 if (bfd_is_aarch64_special_symbol_name
3651 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3652 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
3657 /* Set option values needed during linking. */
3659 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
3660 struct bfd_link_info *link_info,
3662 int no_wchar_warn, int pic_veneer,
3663 int fix_erratum_835769)
3665 struct elf_aarch64_link_hash_table *globals;
3667 globals = elf_aarch64_hash_table (link_info);
3668 globals->pic_veneer = pic_veneer;
3669 globals->fix_erratum_835769 = fix_erratum_835769;
3671 BFD_ASSERT (is_aarch64_elf (output_bfd));
3672 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3673 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3677 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3678 struct elf_aarch64_link_hash_table
3679 *globals, struct bfd_link_info *info,
3680 bfd_vma value, bfd *output_bfd,
3681 bfd_boolean *unresolved_reloc_p)
3683 bfd_vma off = (bfd_vma) - 1;
3684 asection *basegot = globals->root.sgot;
3685 bfd_boolean dyn = globals->root.dynamic_sections_created;
3689 BFD_ASSERT (basegot != NULL);
3690 off = h->got.offset;
3691 BFD_ASSERT (off != (bfd_vma) - 1);
3692 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3694 && SYMBOL_REFERENCES_LOCAL (info, h))
3695 || (ELF_ST_VISIBILITY (h->other)
3696 && h->root.type == bfd_link_hash_undefweak))
3698 /* This is actually a static link, or it is a -Bsymbolic link
3699 and the symbol is defined locally. We must initialize this
3700 entry in the global offset table. Since the offset must
3701 always be a multiple of 8 (4 in the case of ILP32), we use
3702 the least significant bit to record whether we have
3703 initialized it already.
3704 When doing a dynamic link, we create a .rel(a).got relocation
3705 entry to initialize the value. This is done in the
3706 finish_dynamic_symbol routine. */
3711 bfd_put_NN (output_bfd, value, basegot->contents + off);
3716 *unresolved_reloc_p = FALSE;
3718 off = off + basegot->output_section->vma + basegot->output_offset;
3724 /* Change R_TYPE to a more efficient access model where possible,
3725 return the new reloc type. */
3727 static bfd_reloc_code_real_type
3728 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
3729 struct elf_link_hash_entry *h)
3731 bfd_boolean is_local = h == NULL;
3735 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3736 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3738 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3739 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
3741 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3743 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3746 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3748 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3749 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
3751 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3752 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
3754 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3755 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
3757 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3758 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
3760 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
3761 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
3763 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3766 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3768 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
3769 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
3771 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3772 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3773 /* Instructions with these relocations will become NOPs. */
3774 return BFD_RELOC_AARCH64_NONE;
3784 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
3788 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
3789 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
3790 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
3791 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
3794 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3795 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
3796 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
3799 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
3800 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3801 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3802 case BFD_RELOC_AARCH64_TLSDESC_CALL:
3803 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
3804 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
3805 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3806 return GOT_TLSDESC_GD;
3808 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
3809 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
3810 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
3811 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
3814 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
3815 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
3816 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
3817 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
3818 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
3819 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
3820 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
3821 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
3831 aarch64_can_relax_tls (bfd *input_bfd,
3832 struct bfd_link_info *info,
3833 bfd_reloc_code_real_type r_type,
3834 struct elf_link_hash_entry *h,
3835 unsigned long r_symndx)
3837 unsigned int symbol_got_type;
3838 unsigned int reloc_got_type;
3840 if (! IS_AARCH64_TLS_RELOC (r_type))
3843 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
3844 reloc_got_type = aarch64_reloc_got_type (r_type);
3846 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
3852 if (h && h->root.type == bfd_link_hash_undefweak)
3858 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
3861 static bfd_reloc_code_real_type
3862 aarch64_tls_transition (bfd *input_bfd,
3863 struct bfd_link_info *info,
3864 unsigned int r_type,
3865 struct elf_link_hash_entry *h,
3866 unsigned long r_symndx)
3868 bfd_reloc_code_real_type bfd_r_type
3869 = elfNN_aarch64_bfd_reloc_from_type (r_type);
3871 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
3874 return aarch64_tls_transition_without_check (bfd_r_type, h);
3877 /* Return the base VMA address which should be subtracted from real addresses
3878 when resolving R_AARCH64_TLS_DTPREL relocation. */
3881 dtpoff_base (struct bfd_link_info *info)
3883 /* If tls_sec is NULL, we should have signalled an error already. */
3884 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
3885 return elf_hash_table (info)->tls_sec->vma;
3888 /* Return the base VMA address which should be subtracted from real addresses
3889 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
3892 tpoff_base (struct bfd_link_info *info)
3894 struct elf_link_hash_table *htab = elf_hash_table (info);
3896 /* If tls_sec is NULL, we should have signalled an error already. */
3897 BFD_ASSERT (htab->tls_sec != NULL);
3899 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
3900 htab->tls_sec->alignment_power);
3901 return htab->tls_sec->vma - base;
3905 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3906 unsigned long r_symndx)
3908 /* Calculate the address of the GOT entry for symbol
3909 referred to in h. */
3911 return &h->got.offset;
3915 struct elf_aarch64_local_symbol *l;
3917 l = elf_aarch64_locals (input_bfd);
3918 return &l[r_symndx].got_offset;
3923 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3924 unsigned long r_symndx)
3927 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
3932 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
3933 unsigned long r_symndx)
3936 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3941 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3942 unsigned long r_symndx)
3945 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
3951 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
3952 unsigned long r_symndx)
3954 /* Calculate the address of the GOT entry for symbol
3955 referred to in h. */
3958 struct elf_aarch64_link_hash_entry *eh;
3959 eh = (struct elf_aarch64_link_hash_entry *) h;
3960 return &eh->tlsdesc_got_jump_table_offset;
3965 struct elf_aarch64_local_symbol *l;
3967 l = elf_aarch64_locals (input_bfd);
3968 return &l[r_symndx].tlsdesc_got_jump_table_offset;
3973 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
3974 unsigned long r_symndx)
3977 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3982 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
3983 struct elf_link_hash_entry *h,
3984 unsigned long r_symndx)
3987 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
3992 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
3993 unsigned long r_symndx)
3996 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4001 /* Data for make_branch_to_erratum_835769_stub(). */
4003 struct erratum_835769_branch_to_stub_data
4005 asection *output_section;
4009 /* Helper to insert branches to erratum 835769 stubs in the right
4010 places for a particular section. */
4013 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
4016 struct elf_aarch64_stub_hash_entry *stub_entry;
4017 struct erratum_835769_branch_to_stub_data *data;
4019 unsigned long branch_insn = 0;
4020 bfd_vma veneered_insn_loc, veneer_entry_loc;
4021 bfd_signed_vma branch_offset;
4022 unsigned int target;
4025 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4026 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
4028 if (stub_entry->target_section != data->output_section
4029 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
4032 contents = data->contents;
4033 veneered_insn_loc = stub_entry->target_section->output_section->vma
4034 + stub_entry->target_section->output_offset
4035 + stub_entry->target_value;
4036 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4037 + stub_entry->stub_sec->output_offset
4038 + stub_entry->stub_offset;
4039 branch_offset = veneer_entry_loc - veneered_insn_loc;
4041 abfd = stub_entry->target_section->owner;
4042 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4043 (*_bfd_error_handler)
4044 (_("%B: error: Erratum 835769 stub out "
4045 "of range (input file too large)"), abfd);
4047 target = stub_entry->target_value;
4048 branch_insn = 0x14000000;
4049 branch_offset >>= 2;
4050 branch_offset &= 0x3ffffff;
4051 branch_insn |= branch_offset;
4052 bfd_putl32 (branch_insn, &contents[target]);
4058 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
4059 struct bfd_link_info *link_info,
4064 struct elf_aarch64_link_hash_table *globals =
4065 elf_aarch64_hash_table (link_info);
4067 if (globals == NULL)
4070 /* Fix code to point to erratum 835769 stubs. */
4071 if (globals->fix_erratum_835769)
4073 struct erratum_835769_branch_to_stub_data data;
4075 data.output_section = sec;
4076 data.contents = contents;
4077 bfd_hash_traverse (&globals->stub_hash_table,
4078 make_branch_to_erratum_835769_stub, &data);
4084 /* Perform a relocation as part of a final link. */
4085 static bfd_reloc_status_type
4086 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4089 asection *input_section,
4091 Elf_Internal_Rela *rel,
4093 struct bfd_link_info *info,
4095 struct elf_link_hash_entry *h,
4096 bfd_boolean *unresolved_reloc_p,
4097 bfd_boolean save_addend,
4098 bfd_vma *saved_addend,
4099 Elf_Internal_Sym *sym)
4101 Elf_Internal_Shdr *symtab_hdr;
4102 unsigned int r_type = howto->type;
4103 bfd_reloc_code_real_type bfd_r_type
4104 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4105 bfd_reloc_code_real_type new_bfd_r_type;
4106 unsigned long r_symndx;
4107 bfd_byte *hit_data = contents + rel->r_offset;
4109 bfd_signed_vma signed_addend;
4110 struct elf_aarch64_link_hash_table *globals;
4111 bfd_boolean weak_undef_p;
4113 globals = elf_aarch64_hash_table (info);
4115 symtab_hdr = &elf_symtab_hdr (input_bfd);
4117 BFD_ASSERT (is_aarch64_elf (input_bfd));
4119 r_symndx = ELFNN_R_SYM (rel->r_info);
4121 /* It is possible to have linker relaxations on some TLS access
4122 models. Update our information here. */
4123 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4124 if (new_bfd_r_type != bfd_r_type)
4126 bfd_r_type = new_bfd_r_type;
4127 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4128 BFD_ASSERT (howto != NULL);
4129 r_type = howto->type;
4132 place = input_section->output_section->vma
4133 + input_section->output_offset + rel->r_offset;
4135 /* Get addend, accumulating the addend for consecutive relocs
4136 which refer to the same offset. */
4137 signed_addend = saved_addend ? *saved_addend : 0;
4138 signed_addend += rel->r_addend;
4140 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4141 : bfd_is_und_section (sym_sec));
4143 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4144 it here if it is defined in a non-shared object. */
4146 && h->type == STT_GNU_IFUNC
4154 if ((input_section->flags & SEC_ALLOC) == 0
4155 || h->plt.offset == (bfd_vma) -1)
4158 /* STT_GNU_IFUNC symbol must go through PLT. */
4159 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4160 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4165 if (h->root.root.string)
4166 name = h->root.root.string;
4168 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4170 (*_bfd_error_handler)
4171 (_("%B: relocation %s against STT_GNU_IFUNC "
4172 "symbol `%s' isn't handled by %s"), input_bfd,
4173 howto->name, name, __FUNCTION__);
4174 bfd_set_error (bfd_error_bad_value);
4177 case BFD_RELOC_AARCH64_NN:
4178 if (rel->r_addend != 0)
4180 if (h->root.root.string)
4181 name = h->root.root.string;
4183 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4185 (*_bfd_error_handler)
4186 (_("%B: relocation %s against STT_GNU_IFUNC "
4187 "symbol `%s' has non-zero addend: %d"),
4188 input_bfd, howto->name, name, rel->r_addend);
4189 bfd_set_error (bfd_error_bad_value);
4193 /* Generate dynamic relocation only when there is a
4194 non-GOT reference in a shared object. */
4195 if (info->shared && h->non_got_ref)
4197 Elf_Internal_Rela outrel;
4200 /* Need a dynamic relocation to get the real function
4202 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4206 if (outrel.r_offset == (bfd_vma) -1
4207 || outrel.r_offset == (bfd_vma) -2)
4210 outrel.r_offset += (input_section->output_section->vma
4211 + input_section->output_offset);
4213 if (h->dynindx == -1
4215 || info->executable)
4217 /* This symbol is resolved locally. */
4218 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4219 outrel.r_addend = (h->root.u.def.value
4220 + h->root.u.def.section->output_section->vma
4221 + h->root.u.def.section->output_offset);
4225 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4226 outrel.r_addend = 0;
4229 sreloc = globals->root.irelifunc;
4230 elf_append_rela (output_bfd, sreloc, &outrel);
4232 /* If this reloc is against an external symbol, we
4233 do not want to fiddle with the addend. Otherwise,
4234 we need to include the symbol value so that it
4235 becomes an addend for the dynamic reloc. For an
4236 internal symbol, we have updated addend. */
4237 return bfd_reloc_ok;
4240 case BFD_RELOC_AARCH64_JUMP26:
4241 case BFD_RELOC_AARCH64_CALL26:
4242 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4245 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4247 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4248 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4249 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4250 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4251 base_got = globals->root.sgot;
4252 off = h->got.offset;
4254 if (base_got == NULL)
4257 if (off == (bfd_vma) -1)
4261 /* We can't use h->got.offset here to save state, or
4262 even just remember the offset, as finish_dynamic_symbol
4263 would use that as offset into .got. */
4265 if (globals->root.splt != NULL)
4267 plt_index = ((h->plt.offset - globals->plt_header_size) /
4268 globals->plt_entry_size);
4269 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4270 base_got = globals->root.sgotplt;
4274 plt_index = h->plt.offset / globals->plt_entry_size;
4275 off = plt_index * GOT_ENTRY_SIZE;
4276 base_got = globals->root.igotplt;
4279 if (h->dynindx == -1
4283 /* This references the local definition. We must
4284 initialize this entry in the global offset table.
4285 Since the offset must always be a multiple of 8,
4286 we use the least significant bit to record
4287 whether we have initialized it already.
4289 When doing a dynamic link, we create a .rela.got
4290 relocation entry to initialize the value. This
4291 is done in the finish_dynamic_symbol routine. */
4296 bfd_put_NN (output_bfd, value,
4297 base_got->contents + off);
4298 /* Note that this is harmless as -1 | 1 still is -1. */
4302 value = (base_got->output_section->vma
4303 + base_got->output_offset + off);
4306 value = aarch64_calculate_got_entry_vma (h, globals, info,
4308 unresolved_reloc_p);
4309 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4311 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4312 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4313 case BFD_RELOC_AARCH64_ADD_LO12:
4320 case BFD_RELOC_AARCH64_NONE:
4321 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4322 *unresolved_reloc_p = FALSE;
4323 return bfd_reloc_ok;
4325 case BFD_RELOC_AARCH64_NN:
4327 /* When generating a shared object or relocatable executable, these
4328 relocations are copied into the output file to be resolved at
4330 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4331 && (input_section->flags & SEC_ALLOC)
4333 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4334 || h->root.type != bfd_link_hash_undefweak))
4336 Elf_Internal_Rela outrel;
4338 bfd_boolean skip, relocate;
4341 *unresolved_reloc_p = FALSE;
4346 outrel.r_addend = signed_addend;
4348 _bfd_elf_section_offset (output_bfd, info, input_section,
4350 if (outrel.r_offset == (bfd_vma) - 1)
4352 else if (outrel.r_offset == (bfd_vma) - 2)
4358 outrel.r_offset += (input_section->output_section->vma
4359 + input_section->output_offset);
4362 memset (&outrel, 0, sizeof outrel);
4365 && (!info->shared || !SYMBOLIC_BIND (info, h) || !h->def_regular))
4366 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4371 /* On SVR4-ish systems, the dynamic loader cannot
4372 relocate the text and data segments independently,
4373 so the symbol does not matter. */
4375 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4376 outrel.r_addend += value;
4379 sreloc = elf_section_data (input_section)->sreloc;
4380 if (sreloc == NULL || sreloc->contents == NULL)
4381 return bfd_reloc_notsupported;
4383 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4384 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4386 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4388 /* Sanity to check that we have previously allocated
4389 sufficient space in the relocation section for the
4390 number of relocations we actually want to emit. */
4394 /* If this reloc is against an external symbol, we do not want to
4395 fiddle with the addend. Otherwise, we need to include the symbol
4396 value so that it becomes an addend for the dynamic reloc. */
4398 return bfd_reloc_ok;
4400 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4401 contents, rel->r_offset, value,
4405 value += signed_addend;
4408 case BFD_RELOC_AARCH64_JUMP26:
4409 case BFD_RELOC_AARCH64_CALL26:
4411 asection *splt = globals->root.splt;
4412 bfd_boolean via_plt_p =
4413 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4415 /* A call to an undefined weak symbol is converted to a jump to
4416 the next instruction unless a PLT entry will be created.
4417 The jump to the next instruction is optimized as a NOP.
4418 Do the same for local undefined symbols. */
4419 if (weak_undef_p && ! via_plt_p)
4421 bfd_putl32 (INSN_NOP, hit_data);
4422 return bfd_reloc_ok;
4425 /* If the call goes through a PLT entry, make sure to
4426 check distance to the right destination address. */
4429 value = (splt->output_section->vma
4430 + splt->output_offset + h->plt.offset);
4431 *unresolved_reloc_p = FALSE;
4434 /* If the target symbol is global and marked as a function the
4435 relocation applies a function call or a tail call. In this
4436 situation we can veneer out of range branches. The veneers
4437 use IP0 and IP1 hence cannot be used arbitrary out of range
4438 branches that occur within the body of a function. */
4439 if (h && h->type == STT_FUNC)
4441 /* Check if a stub has to be inserted because the destination
4443 if (! aarch64_valid_branch_p (value, place))
4445 /* The target is out of reach, so redirect the branch to
4446 the local stub for this function. */
4447 struct elf_aarch64_stub_hash_entry *stub_entry;
4448 stub_entry = elfNN_aarch64_get_stub_entry (input_section,
4451 if (stub_entry != NULL)
4452 value = (stub_entry->stub_offset
4453 + stub_entry->stub_sec->output_offset
4454 + stub_entry->stub_sec->output_section->vma);
4458 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4459 signed_addend, weak_undef_p);
4462 case BFD_RELOC_AARCH64_16:
4464 case BFD_RELOC_AARCH64_32:
4466 case BFD_RELOC_AARCH64_ADD_LO12:
4467 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4468 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4469 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4470 case BFD_RELOC_AARCH64_BRANCH19:
4471 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4472 case BFD_RELOC_AARCH64_LDST8_LO12:
4473 case BFD_RELOC_AARCH64_LDST16_LO12:
4474 case BFD_RELOC_AARCH64_LDST32_LO12:
4475 case BFD_RELOC_AARCH64_LDST64_LO12:
4476 case BFD_RELOC_AARCH64_LDST128_LO12:
4477 case BFD_RELOC_AARCH64_MOVW_G0_S:
4478 case BFD_RELOC_AARCH64_MOVW_G1_S:
4479 case BFD_RELOC_AARCH64_MOVW_G2_S:
4480 case BFD_RELOC_AARCH64_MOVW_G0:
4481 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4482 case BFD_RELOC_AARCH64_MOVW_G1:
4483 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4484 case BFD_RELOC_AARCH64_MOVW_G2:
4485 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4486 case BFD_RELOC_AARCH64_MOVW_G3:
4487 case BFD_RELOC_AARCH64_16_PCREL:
4488 case BFD_RELOC_AARCH64_32_PCREL:
4489 case BFD_RELOC_AARCH64_64_PCREL:
4490 case BFD_RELOC_AARCH64_TSTBR14:
4491 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4492 signed_addend, weak_undef_p);
4495 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4496 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4497 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4498 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4499 if (globals->root.sgot == NULL)
4500 BFD_ASSERT (h != NULL);
4504 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4506 unresolved_reloc_p);
4507 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4512 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4513 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4514 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4515 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4516 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4517 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4518 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4519 if (globals->root.sgot == NULL)
4520 return bfd_reloc_notsupported;
4522 value = (symbol_got_offset (input_bfd, h, r_symndx)
4523 + globals->root.sgot->output_section->vma
4524 + globals->root.sgot->output_offset);
4526 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4528 *unresolved_reloc_p = FALSE;
4531 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4532 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4533 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4534 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4535 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4536 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4537 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4538 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4539 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4540 signed_addend - tpoff_base (info),
4542 *unresolved_reloc_p = FALSE;
4545 case BFD_RELOC_AARCH64_TLSDESC_ADD:
4546 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4547 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4548 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4549 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4550 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4551 case BFD_RELOC_AARCH64_TLSDESC_LDR:
4552 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4553 if (globals->root.sgot == NULL)
4554 return bfd_reloc_notsupported;
4555 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4556 + globals->root.sgotplt->output_section->vma
4557 + globals->root.sgotplt->output_offset
4558 + globals->sgotplt_jump_table_size);
4560 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4562 *unresolved_reloc_p = FALSE;
4566 return bfd_reloc_notsupported;
4570 *saved_addend = value;
4572 /* Only apply the final relocation in a sequence. */
4574 return bfd_reloc_continue;
4576 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4580 /* Handle TLS relaxations. Relaxing is possible for symbols that use
4581 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4584 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4585 is to then call final_link_relocate. Return other values in the
4588 static bfd_reloc_status_type
4589 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
4590 bfd *input_bfd, bfd_byte *contents,
4591 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
4593 bfd_boolean is_local = h == NULL;
4594 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
4597 BFD_ASSERT (globals && input_bfd && contents && rel);
4599 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
4601 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4602 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4605 /* GD->LE relaxation:
4606 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
4608 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
4610 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4611 return bfd_reloc_continue;
4615 /* GD->IE relaxation:
4616 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
4618 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
4620 return bfd_reloc_continue;
4623 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4627 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4630 /* Tiny TLSDESC->LE relaxation:
4631 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
4632 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
4636 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
4637 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
4639 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4640 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
4641 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4643 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
4644 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
4645 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
4646 return bfd_reloc_continue;
4650 /* Tiny TLSDESC->IE relaxation:
4651 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
4652 adr x0, :tlsdesc:var => nop
4656 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
4657 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
4659 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4660 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4662 bfd_putl32 (0x58000000, contents + rel->r_offset);
4663 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
4664 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
4665 return bfd_reloc_continue;
4668 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4671 /* Tiny GD->LE relaxation:
4672 adr x0, :tlsgd:var => mrs x1, tpidr_el0
4673 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
4674 nop => add x0, x0, #:tprel_lo12_nc:x
4677 /* First kill the tls_get_addr reloc on the bl instruction. */
4678 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4680 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
4681 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
4682 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
4684 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4685 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
4686 rel[1].r_offset = rel->r_offset + 8;
4688 /* Move the current relocation to the second instruction in
4691 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
4692 AARCH64_R (TLSLE_ADD_TPREL_HI12));
4693 return bfd_reloc_continue;
4697 /* Tiny GD->IE relaxation:
4698 adr x0, :tlsgd:var => ldr x0, :gottprel:var
4699 bl __tls_get_addr => mrs x1, tpidr_el0
4700 nop => add x0, x0, x1
4703 /* First kill the tls_get_addr reloc on the bl instruction. */
4704 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4705 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4707 bfd_putl32 (0x58000000, contents + rel->r_offset);
4708 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4709 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4710 return bfd_reloc_continue;
4713 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4714 return bfd_reloc_continue;
4716 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4719 /* GD->LE relaxation:
4720 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
4722 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4723 return bfd_reloc_continue;
4727 /* GD->IE relaxation:
4728 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
4730 insn = bfd_getl32 (contents + rel->r_offset);
4732 bfd_putl32 (insn, contents + rel->r_offset);
4733 return bfd_reloc_continue;
4736 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4739 /* GD->LE relaxation
4740 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
4741 bl __tls_get_addr => mrs x1, tpidr_el0
4742 nop => add x0, x1, x0
4745 /* First kill the tls_get_addr reloc on the bl instruction. */
4746 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
4747 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4749 bfd_putl32 (0xf2800000, contents + rel->r_offset);
4750 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
4751 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
4752 return bfd_reloc_continue;
4756 /* GD->IE relaxation
4757 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
4758 BL __tls_get_addr => mrs x1, tpidr_el0
4760 NOP => add x0, x1, x0
4763 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
4765 /* Remove the relocation on the BL instruction. */
4766 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
4768 bfd_putl32 (0xf9400000, contents + rel->r_offset);
4770 /* We choose to fixup the BL and NOP instructions using the
4771 offset from the second relocation to allow flexibility in
4772 scheduling instructions between the ADD and BL. */
4773 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
4774 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
4775 return bfd_reloc_continue;
4778 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4779 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4780 /* GD->IE/LE relaxation:
4781 add x0, x0, #:tlsdesc_lo12:var => nop
4784 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
4785 return bfd_reloc_ok;
4787 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4788 /* IE->LE relaxation:
4789 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
4793 insn = bfd_getl32 (contents + rel->r_offset);
4794 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
4796 return bfd_reloc_continue;
4798 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4799 /* IE->LE relaxation:
4800 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
4804 insn = bfd_getl32 (contents + rel->r_offset);
4805 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
4807 return bfd_reloc_continue;
4810 return bfd_reloc_continue;
4813 return bfd_reloc_ok;
4816 /* Relocate an AArch64 ELF section. */
4819 elfNN_aarch64_relocate_section (bfd *output_bfd,
4820 struct bfd_link_info *info,
4822 asection *input_section,
4824 Elf_Internal_Rela *relocs,
4825 Elf_Internal_Sym *local_syms,
4826 asection **local_sections)
4828 Elf_Internal_Shdr *symtab_hdr;
4829 struct elf_link_hash_entry **sym_hashes;
4830 Elf_Internal_Rela *rel;
4831 Elf_Internal_Rela *relend;
4833 struct elf_aarch64_link_hash_table *globals;
4834 bfd_boolean save_addend = FALSE;
4837 globals = elf_aarch64_hash_table (info);
4839 symtab_hdr = &elf_symtab_hdr (input_bfd);
4840 sym_hashes = elf_sym_hashes (input_bfd);
4843 relend = relocs + input_section->reloc_count;
4844 for (; rel < relend; rel++)
4846 unsigned int r_type;
4847 bfd_reloc_code_real_type bfd_r_type;
4848 bfd_reloc_code_real_type relaxed_bfd_r_type;
4849 reloc_howto_type *howto;
4850 unsigned long r_symndx;
4851 Elf_Internal_Sym *sym;
4853 struct elf_link_hash_entry *h;
4855 bfd_reloc_status_type r;
4858 bfd_boolean unresolved_reloc = FALSE;
4859 char *error_message = NULL;
4861 r_symndx = ELFNN_R_SYM (rel->r_info);
4862 r_type = ELFNN_R_TYPE (rel->r_info);
4864 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
4865 howto = bfd_reloc.howto;
4869 (*_bfd_error_handler)
4870 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
4871 input_bfd, input_section, r_type);
4874 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
4880 if (r_symndx < symtab_hdr->sh_info)
4882 sym = local_syms + r_symndx;
4883 sym_type = ELFNN_ST_TYPE (sym->st_info);
4884 sec = local_sections[r_symndx];
4886 /* An object file might have a reference to a local
4887 undefined symbol. This is a daft object file, but we
4888 should at least do something about it. */
4889 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
4890 && bfd_is_und_section (sec)
4891 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
4893 if (!info->callbacks->undefined_symbol
4894 (info, bfd_elf_string_from_elf_section
4895 (input_bfd, symtab_hdr->sh_link, sym->st_name),
4896 input_bfd, input_section, rel->r_offset, TRUE))
4900 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4902 /* Relocate against local STT_GNU_IFUNC symbol. */
4903 if (!info->relocatable
4904 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
4906 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
4911 /* Set STT_GNU_IFUNC symbol value. */
4912 h->root.u.def.value = sym->st_value;
4913 h->root.u.def.section = sec;
4918 bfd_boolean warned, ignored;
4920 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
4921 r_symndx, symtab_hdr, sym_hashes,
4923 unresolved_reloc, warned, ignored);
4928 if (sec != NULL && discarded_section (sec))
4929 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4930 rel, 1, relend, howto, 0, contents);
4932 if (info->relocatable)
4936 name = h->root.root.string;
4939 name = (bfd_elf_string_from_elf_section
4940 (input_bfd, symtab_hdr->sh_link, sym->st_name));
4941 if (name == NULL || *name == '\0')
4942 name = bfd_section_name (input_bfd, sec);
4946 && r_type != R_AARCH64_NONE
4947 && r_type != R_AARCH64_NULL
4949 || h->root.type == bfd_link_hash_defined
4950 || h->root.type == bfd_link_hash_defweak)
4951 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
4953 (*_bfd_error_handler)
4954 ((sym_type == STT_TLS
4955 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
4956 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
4958 input_section, (long) rel->r_offset, howto->name, name);
4961 /* We relax only if we can see that there can be a valid transition
4962 from a reloc type to another.
4963 We call elfNN_aarch64_final_link_relocate unless we're completely
4964 done, i.e., the relaxation produced the final output we want. */
4966 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
4968 if (relaxed_bfd_r_type != bfd_r_type)
4970 bfd_r_type = relaxed_bfd_r_type;
4971 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4972 BFD_ASSERT (howto != NULL);
4973 r_type = howto->type;
4974 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
4975 unresolved_reloc = 0;
4978 r = bfd_reloc_continue;
4980 /* There may be multiple consecutive relocations for the
4981 same offset. In that case we are supposed to treat the
4982 output of each relocation as the addend for the next. */
4983 if (rel + 1 < relend
4984 && rel->r_offset == rel[1].r_offset
4985 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
4986 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
4989 save_addend = FALSE;
4991 if (r == bfd_reloc_continue)
4992 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
4993 input_section, contents, rel,
4994 relocation, info, sec,
4995 h, &unresolved_reloc,
4996 save_addend, &addend, sym);
4998 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5000 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5001 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5002 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5003 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5005 bfd_boolean need_relocs = FALSE;
5010 off = symbol_got_offset (input_bfd, h, r_symndx);
5011 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5014 (info->shared || indx != 0) &&
5016 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5017 || h->root.type != bfd_link_hash_undefweak);
5019 BFD_ASSERT (globals->root.srelgot != NULL);
5023 Elf_Internal_Rela rela;
5024 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
5026 rela.r_offset = globals->root.sgot->output_section->vma +
5027 globals->root.sgot->output_offset + off;
5030 loc = globals->root.srelgot->contents;
5031 loc += globals->root.srelgot->reloc_count++
5032 * RELOC_SIZE (htab);
5033 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5037 bfd_put_NN (output_bfd,
5038 relocation - dtpoff_base (info),
5039 globals->root.sgot->contents + off
5044 /* This TLS symbol is global. We emit a
5045 relocation to fixup the tls offset at load
5048 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
5051 (globals->root.sgot->output_section->vma
5052 + globals->root.sgot->output_offset + off
5055 loc = globals->root.srelgot->contents;
5056 loc += globals->root.srelgot->reloc_count++
5057 * RELOC_SIZE (globals);
5058 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5059 bfd_put_NN (output_bfd, (bfd_vma) 0,
5060 globals->root.sgot->contents + off
5066 bfd_put_NN (output_bfd, (bfd_vma) 1,
5067 globals->root.sgot->contents + off);
5068 bfd_put_NN (output_bfd,
5069 relocation - dtpoff_base (info),
5070 globals->root.sgot->contents + off
5074 symbol_got_offset_mark (input_bfd, h, r_symndx);
5078 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5079 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5080 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5081 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5083 bfd_boolean need_relocs = FALSE;
5088 off = symbol_got_offset (input_bfd, h, r_symndx);
5090 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5093 (info->shared || indx != 0) &&
5095 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5096 || h->root.type != bfd_link_hash_undefweak);
5098 BFD_ASSERT (globals->root.srelgot != NULL);
5102 Elf_Internal_Rela rela;
5105 rela.r_addend = relocation - dtpoff_base (info);
5109 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5110 rela.r_offset = globals->root.sgot->output_section->vma +
5111 globals->root.sgot->output_offset + off;
5113 loc = globals->root.srelgot->contents;
5114 loc += globals->root.srelgot->reloc_count++
5115 * RELOC_SIZE (htab);
5117 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5119 bfd_put_NN (output_bfd, rela.r_addend,
5120 globals->root.sgot->contents + off);
5123 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5124 globals->root.sgot->contents + off);
5126 symbol_got_offset_mark (input_bfd, h, r_symndx);
5130 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5131 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5132 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5133 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5134 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5135 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5136 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5137 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5140 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5141 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5142 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5143 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5144 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5145 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5147 bfd_boolean need_relocs = FALSE;
5148 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5149 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5151 need_relocs = (h == NULL
5152 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5153 || h->root.type != bfd_link_hash_undefweak);
5155 BFD_ASSERT (globals->root.srelgot != NULL);
5156 BFD_ASSERT (globals->root.sgot != NULL);
5161 Elf_Internal_Rela rela;
5162 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5165 rela.r_offset = (globals->root.sgotplt->output_section->vma
5166 + globals->root.sgotplt->output_offset
5167 + off + globals->sgotplt_jump_table_size);
5170 rela.r_addend = relocation - dtpoff_base (info);
5172 /* Allocate the next available slot in the PLT reloc
5173 section to hold our R_AARCH64_TLSDESC, the next
5174 available slot is determined from reloc_count,
5175 which we step. But note, reloc_count was
5176 artifically moved down while allocating slots for
5177 real PLT relocs such that all of the PLT relocs
5178 will fit above the initial reloc_count and the
5179 extra stuff will fit below. */
5180 loc = globals->root.srelplt->contents;
5181 loc += globals->root.srelplt->reloc_count++
5182 * RELOC_SIZE (globals);
5184 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5186 bfd_put_NN (output_bfd, (bfd_vma) 0,
5187 globals->root.sgotplt->contents + off +
5188 globals->sgotplt_jump_table_size);
5189 bfd_put_NN (output_bfd, (bfd_vma) 0,
5190 globals->root.sgotplt->contents + off +
5191 globals->sgotplt_jump_table_size +
5195 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5206 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5207 because such sections are not SEC_ALLOC and thus ld.so will
5208 not process them. */
5209 if (unresolved_reloc
5210 && !((input_section->flags & SEC_DEBUGGING) != 0
5212 && _bfd_elf_section_offset (output_bfd, info, input_section,
5213 +rel->r_offset) != (bfd_vma) - 1)
5215 (*_bfd_error_handler)
5217 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5218 input_bfd, input_section, (long) rel->r_offset, howto->name,
5219 h->root.root.string);
5223 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5227 case bfd_reloc_overflow:
5228 /* If the overflowing reloc was to an undefined symbol,
5229 we have already printed one error message and there
5230 is no point complaining again. */
5232 h->root.type != bfd_link_hash_undefined)
5233 && (!((*info->callbacks->reloc_overflow)
5234 (info, (h ? &h->root : NULL), name, howto->name,
5235 (bfd_vma) 0, input_bfd, input_section,
5240 case bfd_reloc_undefined:
5241 if (!((*info->callbacks->undefined_symbol)
5242 (info, name, input_bfd, input_section,
5243 rel->r_offset, TRUE)))
5247 case bfd_reloc_outofrange:
5248 error_message = _("out of range");
5251 case bfd_reloc_notsupported:
5252 error_message = _("unsupported relocation");
5255 case bfd_reloc_dangerous:
5256 /* error_message should already be set. */
5260 error_message = _("unknown error");
5264 BFD_ASSERT (error_message != NULL);
5265 if (!((*info->callbacks->reloc_dangerous)
5266 (info, error_message, input_bfd, input_section,
5277 /* Set the right machine number. */
5280 elfNN_aarch64_object_p (bfd *abfd)
5283 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5285 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5290 /* Function to keep AArch64 specific flags in the ELF header. */
5293 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5295 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5300 elf_elfheader (abfd)->e_flags = flags;
5301 elf_flags_init (abfd) = TRUE;
5307 /* Merge backend specific data from an object file to the output
5308 object file when linking. */
5311 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5315 bfd_boolean flags_compatible = TRUE;
5318 /* Check if we have the same endianess. */
5319 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5322 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5325 /* The input BFD must have had its flags initialised. */
5326 /* The following seems bogus to me -- The flags are initialized in
5327 the assembler but I don't think an elf_flags_init field is
5328 written into the object. */
5329 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5331 in_flags = elf_elfheader (ibfd)->e_flags;
5332 out_flags = elf_elfheader (obfd)->e_flags;
5334 if (!elf_flags_init (obfd))
5336 /* If the input is the default architecture and had the default
5337 flags then do not bother setting the flags for the output
5338 architecture, instead allow future merges to do this. If no
5339 future merges ever set these flags then they will retain their
5340 uninitialised values, which surprise surprise, correspond
5341 to the default values. */
5342 if (bfd_get_arch_info (ibfd)->the_default
5343 && elf_elfheader (ibfd)->e_flags == 0)
5346 elf_flags_init (obfd) = TRUE;
5347 elf_elfheader (obfd)->e_flags = in_flags;
5349 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5350 && bfd_get_arch_info (obfd)->the_default)
5351 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5352 bfd_get_mach (ibfd));
5357 /* Identical flags must be compatible. */
5358 if (in_flags == out_flags)
5361 /* Check to see if the input BFD actually contains any sections. If
5362 not, its flags may not have been initialised either, but it
5363 cannot actually cause any incompatiblity. Do not short-circuit
5364 dynamic objects; their section list may be emptied by
5365 elf_link_add_object_symbols.
5367 Also check to see if there are no code sections in the input.
5368 In this case there is no need to check for code specific flags.
5369 XXX - do we need to worry about floating-point format compatability
5370 in data sections ? */
5371 if (!(ibfd->flags & DYNAMIC))
5373 bfd_boolean null_input_bfd = TRUE;
5374 bfd_boolean only_data_sections = TRUE;
5376 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5378 if ((bfd_get_section_flags (ibfd, sec)
5379 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5380 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5381 only_data_sections = FALSE;
5383 null_input_bfd = FALSE;
5387 if (null_input_bfd || only_data_sections)
5391 return flags_compatible;
5394 /* Display the flags field. */
5397 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5399 FILE *file = (FILE *) ptr;
5400 unsigned long flags;
5402 BFD_ASSERT (abfd != NULL && ptr != NULL);
5404 /* Print normal ELF private data. */
5405 _bfd_elf_print_private_bfd_data (abfd, ptr);
5407 flags = elf_elfheader (abfd)->e_flags;
5408 /* Ignore init flag - it may not be set, despite the flags field
5409 containing valid data. */
5411 /* xgettext:c-format */
5412 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5415 fprintf (file, _("<Unrecognised flag bits set>"));
5422 /* Update the got entry reference counts for the section being removed. */
5425 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
5426 struct bfd_link_info *info,
5428 const Elf_Internal_Rela * relocs)
5430 struct elf_aarch64_link_hash_table *htab;
5431 Elf_Internal_Shdr *symtab_hdr;
5432 struct elf_link_hash_entry **sym_hashes;
5433 struct elf_aarch64_local_symbol *locals;
5434 const Elf_Internal_Rela *rel, *relend;
5436 if (info->relocatable)
5439 htab = elf_aarch64_hash_table (info);
5444 elf_section_data (sec)->local_dynrel = NULL;
5446 symtab_hdr = &elf_symtab_hdr (abfd);
5447 sym_hashes = elf_sym_hashes (abfd);
5449 locals = elf_aarch64_locals (abfd);
5451 relend = relocs + sec->reloc_count;
5452 for (rel = relocs; rel < relend; rel++)
5454 unsigned long r_symndx;
5455 unsigned int r_type;
5456 struct elf_link_hash_entry *h = NULL;
5458 r_symndx = ELFNN_R_SYM (rel->r_info);
5460 if (r_symndx >= symtab_hdr->sh_info)
5463 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5464 while (h->root.type == bfd_link_hash_indirect
5465 || h->root.type == bfd_link_hash_warning)
5466 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5470 Elf_Internal_Sym *isym;
5472 /* A local symbol. */
5473 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5476 /* Check relocation against local STT_GNU_IFUNC symbol. */
5478 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5480 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
5488 struct elf_aarch64_link_hash_entry *eh;
5489 struct elf_dyn_relocs **pp;
5490 struct elf_dyn_relocs *p;
5492 eh = (struct elf_aarch64_link_hash_entry *) h;
5494 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
5497 /* Everything must go for SEC. */
5503 r_type = ELFNN_R_TYPE (rel->r_info);
5504 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
5506 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5507 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5508 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5509 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5510 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5511 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5512 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5513 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5514 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5515 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5516 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5517 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5518 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5519 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5520 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5521 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5522 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5523 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5524 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5525 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5526 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5527 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5528 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5529 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5530 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5533 if (h->got.refcount > 0)
5534 h->got.refcount -= 1;
5536 if (h->type == STT_GNU_IFUNC)
5538 if (h->plt.refcount > 0)
5539 h->plt.refcount -= 1;
5542 else if (locals != NULL)
5544 if (locals[r_symndx].got_refcount > 0)
5545 locals[r_symndx].got_refcount -= 1;
5549 case BFD_RELOC_AARCH64_CALL26:
5550 case BFD_RELOC_AARCH64_JUMP26:
5551 /* If this is a local symbol then we resolve it
5552 directly without creating a PLT entry. */
5556 if (h->plt.refcount > 0)
5557 h->plt.refcount -= 1;
5560 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5561 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5562 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5563 case BFD_RELOC_AARCH64_MOVW_G3:
5564 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
5565 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5566 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
5567 case BFD_RELOC_AARCH64_NN:
5568 if (h != NULL && info->executable)
5570 if (h->plt.refcount > 0)
5571 h->plt.refcount -= 1;
5583 /* Adjust a symbol defined by a dynamic object and referenced by a
5584 regular object. The current definition is in some section of the
5585 dynamic object, but we're not including those sections. We have to
5586 change the definition to something the rest of the link can
5590 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
5591 struct elf_link_hash_entry *h)
5593 struct elf_aarch64_link_hash_table *htab;
5596 /* If this is a function, put it in the procedure linkage table. We
5597 will fill in the contents of the procedure linkage table later,
5598 when we know the address of the .got section. */
5599 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
5601 if (h->plt.refcount <= 0
5602 || (h->type != STT_GNU_IFUNC
5603 && (SYMBOL_CALLS_LOCAL (info, h)
5604 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
5605 && h->root.type == bfd_link_hash_undefweak))))
5607 /* This case can occur if we saw a CALL26 reloc in
5608 an input file, but the symbol wasn't referred to
5609 by a dynamic object or all references were
5610 garbage collected. In which case we can end up
5612 h->plt.offset = (bfd_vma) - 1;
5619 /* It's possible that we incorrectly decided a .plt reloc was
5620 needed for an R_X86_64_PC32 reloc to a non-function sym in
5621 check_relocs. We can't decide accurately between function and
5622 non-function syms in check-relocs; Objects loaded later in
5623 the link may change h->type. So fix it now. */
5624 h->plt.offset = (bfd_vma) - 1;
5627 /* If this is a weak symbol, and there is a real definition, the
5628 processor independent code will have arranged for us to see the
5629 real definition first, and we can just use the same value. */
5630 if (h->u.weakdef != NULL)
5632 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
5633 || h->u.weakdef->root.type == bfd_link_hash_defweak);
5634 h->root.u.def.section = h->u.weakdef->root.u.def.section;
5635 h->root.u.def.value = h->u.weakdef->root.u.def.value;
5636 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
5637 h->non_got_ref = h->u.weakdef->non_got_ref;
5641 /* If we are creating a shared library, we must presume that the
5642 only references to the symbol are via the global offset table.
5643 For such cases we need not do anything here; the relocations will
5644 be handled correctly by relocate_section. */
5648 /* If there are no references to this symbol that do not use the
5649 GOT, we don't need to generate a copy reloc. */
5650 if (!h->non_got_ref)
5653 /* If -z nocopyreloc was given, we won't generate them either. */
5654 if (info->nocopyreloc)
5660 /* We must allocate the symbol in our .dynbss section, which will
5661 become part of the .bss section of the executable. There will be
5662 an entry for this symbol in the .dynsym section. The dynamic
5663 object will contain position independent code, so all references
5664 from the dynamic object to this symbol will go through the global
5665 offset table. The dynamic linker will use the .dynsym entry to
5666 determine the address it must put in the global offset table, so
5667 both the dynamic object and the regular object will refer to the
5668 same memory location for the variable. */
5670 htab = elf_aarch64_hash_table (info);
5672 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
5673 to copy the initial value out of the dynamic object and into the
5674 runtime process image. */
5675 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
5677 htab->srelbss->size += RELOC_SIZE (htab);
5683 return _bfd_elf_adjust_dynamic_copy (info, h, s);
5688 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
5690 struct elf_aarch64_local_symbol *locals;
5691 locals = elf_aarch64_locals (abfd);
5694 locals = (struct elf_aarch64_local_symbol *)
5695 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
5698 elf_aarch64_locals (abfd) = locals;
5703 /* Create the .got section to hold the global offset table. */
5706 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
5708 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
5711 struct elf_link_hash_entry *h;
5712 struct elf_link_hash_table *htab = elf_hash_table (info);
5714 /* This function may be called more than once. */
5715 s = bfd_get_linker_section (abfd, ".got");
5719 flags = bed->dynamic_sec_flags;
5721 s = bfd_make_section_anyway_with_flags (abfd,
5722 (bed->rela_plts_and_copies_p
5723 ? ".rela.got" : ".rel.got"),
5724 (bed->dynamic_sec_flags
5727 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
5731 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
5733 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
5736 htab->sgot->size += GOT_ENTRY_SIZE;
5738 if (bed->want_got_sym)
5740 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
5741 (or .got.plt) section. We don't do this in the linker script
5742 because we don't want to define the symbol if we are not creating
5743 a global offset table. */
5744 h = _bfd_elf_define_linkage_sym (abfd, info, s,
5745 "_GLOBAL_OFFSET_TABLE_");
5746 elf_hash_table (info)->hgot = h;
5751 if (bed->want_got_plt)
5753 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
5755 || !bfd_set_section_alignment (abfd, s,
5756 bed->s->log_file_align))
5761 /* The first bit of the global offset table is the header. */
5762 s->size += bed->got_header_size;
5767 /* Look through the relocs for a section during the first phase. */
5770 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
5771 asection *sec, const Elf_Internal_Rela *relocs)
5773 Elf_Internal_Shdr *symtab_hdr;
5774 struct elf_link_hash_entry **sym_hashes;
5775 const Elf_Internal_Rela *rel;
5776 const Elf_Internal_Rela *rel_end;
5779 struct elf_aarch64_link_hash_table *htab;
5781 if (info->relocatable)
5784 BFD_ASSERT (is_aarch64_elf (abfd));
5786 htab = elf_aarch64_hash_table (info);
5789 symtab_hdr = &elf_symtab_hdr (abfd);
5790 sym_hashes = elf_sym_hashes (abfd);
5792 rel_end = relocs + sec->reloc_count;
5793 for (rel = relocs; rel < rel_end; rel++)
5795 struct elf_link_hash_entry *h;
5796 unsigned long r_symndx;
5797 unsigned int r_type;
5798 bfd_reloc_code_real_type bfd_r_type;
5799 Elf_Internal_Sym *isym;
5801 r_symndx = ELFNN_R_SYM (rel->r_info);
5802 r_type = ELFNN_R_TYPE (rel->r_info);
5804 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
5806 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
5811 if (r_symndx < symtab_hdr->sh_info)
5813 /* A local symbol. */
5814 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5819 /* Check relocation against local STT_GNU_IFUNC symbol. */
5820 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5822 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
5827 /* Fake a STT_GNU_IFUNC symbol. */
5828 h->type = STT_GNU_IFUNC;
5831 h->forced_local = 1;
5832 h->root.type = bfd_link_hash_defined;
5839 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5840 while (h->root.type == bfd_link_hash_indirect
5841 || h->root.type == bfd_link_hash_warning)
5842 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5844 /* PR15323, ref flags aren't set for references in the same
5846 h->root.non_ir_ref = 1;
5849 /* Could be done earlier, if h were already available. */
5850 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
5854 /* Create the ifunc sections for static executables. If we
5855 never see an indirect function symbol nor we are building
5856 a static executable, those sections will be empty and
5857 won't appear in output. */
5863 case BFD_RELOC_AARCH64_NN:
5864 case BFD_RELOC_AARCH64_CALL26:
5865 case BFD_RELOC_AARCH64_JUMP26:
5866 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5867 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5868 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5869 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5870 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5871 case BFD_RELOC_AARCH64_ADD_LO12:
5872 if (htab->root.dynobj == NULL)
5873 htab->root.dynobj = abfd;
5874 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
5879 /* It is referenced by a non-shared object. */
5881 h->root.non_ir_ref = 1;
5886 case BFD_RELOC_AARCH64_NN:
5888 /* We don't need to handle relocs into sections not going into
5889 the "real" output. */
5890 if ((sec->flags & SEC_ALLOC) == 0)
5898 h->plt.refcount += 1;
5899 h->pointer_equality_needed = 1;
5902 /* No need to do anything if we're not creating a shared
5908 struct elf_dyn_relocs *p;
5909 struct elf_dyn_relocs **head;
5911 /* We must copy these reloc types into the output file.
5912 Create a reloc section in dynobj and make room for
5916 if (htab->root.dynobj == NULL)
5917 htab->root.dynobj = abfd;
5919 sreloc = _bfd_elf_make_dynamic_reloc_section
5920 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
5926 /* If this is a global symbol, we count the number of
5927 relocations we need for this symbol. */
5930 struct elf_aarch64_link_hash_entry *eh;
5931 eh = (struct elf_aarch64_link_hash_entry *) h;
5932 head = &eh->dyn_relocs;
5936 /* Track dynamic relocs needed for local syms too.
5937 We really need local syms available to do this
5943 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5948 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
5952 /* Beware of type punned pointers vs strict aliasing
5954 vpp = &(elf_section_data (s)->local_dynrel);
5955 head = (struct elf_dyn_relocs **) vpp;
5959 if (p == NULL || p->sec != sec)
5961 bfd_size_type amt = sizeof *p;
5962 p = ((struct elf_dyn_relocs *)
5963 bfd_zalloc (htab->root.dynobj, amt));
5976 /* RR: We probably want to keep a consistency check that
5977 there are no dangling GOT_PAGE relocs. */
5978 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5979 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5980 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5981 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5982 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5983 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5984 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5985 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5986 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5987 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5988 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5989 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5990 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5991 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5992 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5993 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5994 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5995 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5996 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5997 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5998 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5999 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6000 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6001 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6002 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6005 unsigned old_got_type;
6007 got_type = aarch64_reloc_got_type (bfd_r_type);
6011 h->got.refcount += 1;
6012 old_got_type = elf_aarch64_hash_entry (h)->got_type;
6016 struct elf_aarch64_local_symbol *locals;
6018 if (!elfNN_aarch64_allocate_local_symbols
6019 (abfd, symtab_hdr->sh_info))
6022 locals = elf_aarch64_locals (abfd);
6023 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6024 locals[r_symndx].got_refcount += 1;
6025 old_got_type = locals[r_symndx].got_type;
6028 /* If a variable is accessed with both general dynamic TLS
6029 methods, two slots may be created. */
6030 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
6031 got_type |= old_got_type;
6033 /* We will already have issued an error message if there
6034 is a TLS/non-TLS mismatch, based on the symbol type.
6035 So just combine any TLS types needed. */
6036 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
6037 && got_type != GOT_NORMAL)
6038 got_type |= old_got_type;
6040 /* If the symbol is accessed by both IE and GD methods, we
6041 are able to relax. Turn off the GD flag, without
6042 messing up with any other kind of TLS types that may be
6044 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
6045 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
6047 if (old_got_type != got_type)
6050 elf_aarch64_hash_entry (h)->got_type = got_type;
6053 struct elf_aarch64_local_symbol *locals;
6054 locals = elf_aarch64_locals (abfd);
6055 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6056 locals[r_symndx].got_type = got_type;
6060 if (htab->root.dynobj == NULL)
6061 htab->root.dynobj = abfd;
6062 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
6067 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6068 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6069 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6070 case BFD_RELOC_AARCH64_MOVW_G3:
6073 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6074 (*_bfd_error_handler)
6075 (_("%B: relocation %s against `%s' can not be used when making "
6076 "a shared object; recompile with -fPIC"),
6077 abfd, elfNN_aarch64_howto_table[howto_index].name,
6078 (h) ? h->root.root.string : "a local symbol");
6079 bfd_set_error (bfd_error_bad_value);
6083 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6084 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6085 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6086 if (h != NULL && info->executable)
6088 /* If this reloc is in a read-only section, we might
6089 need a copy reloc. We can't check reliably at this
6090 stage whether the section is read-only, as input
6091 sections have not yet been mapped to output sections.
6092 Tentatively set the flag for now, and correct in
6093 adjust_dynamic_symbol. */
6095 h->plt.refcount += 1;
6096 h->pointer_equality_needed = 1;
6098 /* FIXME:: RR need to handle these in shared libraries
6099 and essentially bomb out as these being non-PIC
6100 relocations in shared libraries. */
6103 case BFD_RELOC_AARCH64_CALL26:
6104 case BFD_RELOC_AARCH64_JUMP26:
6105 /* If this is a local symbol then we resolve it
6106 directly without creating a PLT entry. */
6111 if (h->plt.refcount <= 0)
6112 h->plt.refcount = 1;
6114 h->plt.refcount += 1;
6125 /* Treat mapping symbols as special target symbols. */
6128 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6131 return bfd_is_aarch64_special_symbol_name (sym->name,
6132 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6135 /* This is a copy of elf_find_function () from elf.c except that
6136 AArch64 mapping symbols are ignored when looking for function names. */
6139 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6143 const char **filename_ptr,
6144 const char **functionname_ptr)
6146 const char *filename = NULL;
6147 asymbol *func = NULL;
6148 bfd_vma low_func = 0;
6151 for (p = symbols; *p != NULL; p++)
6155 q = (elf_symbol_type *) * p;
6157 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6162 filename = bfd_asymbol_name (&q->symbol);
6166 /* Skip mapping symbols. */
6167 if ((q->symbol.flags & BSF_LOCAL)
6168 && (bfd_is_aarch64_special_symbol_name
6169 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6172 if (bfd_get_section (&q->symbol) == section
6173 && q->symbol.value >= low_func && q->symbol.value <= offset)
6175 func = (asymbol *) q;
6176 low_func = q->symbol.value;
6186 *filename_ptr = filename;
6187 if (functionname_ptr)
6188 *functionname_ptr = bfd_asymbol_name (func);
6194 /* Find the nearest line to a particular section and offset, for error
6195 reporting. This code is a duplicate of the code in elf.c, except
6196 that it uses aarch64_elf_find_function. */
6199 elfNN_aarch64_find_nearest_line (bfd *abfd,
6203 const char **filename_ptr,
6204 const char **functionname_ptr,
6205 unsigned int *line_ptr,
6206 unsigned int *discriminator_ptr)
6208 bfd_boolean found = FALSE;
6210 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6211 filename_ptr, functionname_ptr,
6212 line_ptr, discriminator_ptr,
6213 dwarf_debug_sections, 0,
6214 &elf_tdata (abfd)->dwarf2_find_line_info))
6216 if (!*functionname_ptr)
6217 aarch64_elf_find_function (abfd, symbols, section, offset,
6218 *filename_ptr ? NULL : filename_ptr,
6224 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6225 toolchain uses DWARF1. */
6227 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6228 &found, filename_ptr,
6229 functionname_ptr, line_ptr,
6230 &elf_tdata (abfd)->line_info))
6233 if (found && (*functionname_ptr || *line_ptr))
6236 if (symbols == NULL)
6239 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6240 filename_ptr, functionname_ptr))
6248 elfNN_aarch64_find_inliner_info (bfd *abfd,
6249 const char **filename_ptr,
6250 const char **functionname_ptr,
6251 unsigned int *line_ptr)
6254 found = _bfd_dwarf2_find_inliner_info
6255 (abfd, filename_ptr,
6256 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6262 elfNN_aarch64_post_process_headers (bfd *abfd,
6263 struct bfd_link_info *link_info)
6265 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6267 i_ehdrp = elf_elfheader (abfd);
6268 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6270 _bfd_elf_post_process_headers (abfd, link_info);
6273 static enum elf_reloc_type_class
6274 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6275 const asection *rel_sec ATTRIBUTE_UNUSED,
6276 const Elf_Internal_Rela *rela)
6278 switch ((int) ELFNN_R_TYPE (rela->r_info))
6280 case AARCH64_R (RELATIVE):
6281 return reloc_class_relative;
6282 case AARCH64_R (JUMP_SLOT):
6283 return reloc_class_plt;
6284 case AARCH64_R (COPY):
6285 return reloc_class_copy;
6287 return reloc_class_normal;
6291 /* Handle an AArch64 specific section when reading an object file. This is
6292 called when bfd_section_from_shdr finds a section with an unknown
6296 elfNN_aarch64_section_from_shdr (bfd *abfd,
6297 Elf_Internal_Shdr *hdr,
6298 const char *name, int shindex)
6300 /* There ought to be a place to keep ELF backend specific flags, but
6301 at the moment there isn't one. We just keep track of the
6302 sections by their name, instead. Fortunately, the ABI gives
6303 names for all the AArch64 specific sections, so we will probably get
6305 switch (hdr->sh_type)
6307 case SHT_AARCH64_ATTRIBUTES:
6314 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6320 /* A structure used to record a list of sections, independently
6321 of the next and prev fields in the asection structure. */
6322 typedef struct section_list
6325 struct section_list *next;
6326 struct section_list *prev;
6330 /* Unfortunately we need to keep a list of sections for which
6331 an _aarch64_elf_section_data structure has been allocated. This
6332 is because it is possible for functions like elfNN_aarch64_write_section
6333 to be called on a section which has had an elf_data_structure
6334 allocated for it (and so the used_by_bfd field is valid) but
6335 for which the AArch64 extended version of this structure - the
6336 _aarch64_elf_section_data structure - has not been allocated. */
6337 static section_list *sections_with_aarch64_elf_section_data = NULL;
6340 record_section_with_aarch64_elf_section_data (asection *sec)
6342 struct section_list *entry;
6344 entry = bfd_malloc (sizeof (*entry));
6348 entry->next = sections_with_aarch64_elf_section_data;
6350 if (entry->next != NULL)
6351 entry->next->prev = entry;
6352 sections_with_aarch64_elf_section_data = entry;
6355 static struct section_list *
6356 find_aarch64_elf_section_entry (asection *sec)
6358 struct section_list *entry;
6359 static struct section_list *last_entry = NULL;
6361 /* This is a short cut for the typical case where the sections are added
6362 to the sections_with_aarch64_elf_section_data list in forward order and
6363 then looked up here in backwards order. This makes a real difference
6364 to the ld-srec/sec64k.exp linker test. */
6365 entry = sections_with_aarch64_elf_section_data;
6366 if (last_entry != NULL)
6368 if (last_entry->sec == sec)
6370 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6371 entry = last_entry->next;
6374 for (; entry; entry = entry->next)
6375 if (entry->sec == sec)
6379 /* Record the entry prior to this one - it is the entry we are
6380 most likely to want to locate next time. Also this way if we
6381 have been called from
6382 unrecord_section_with_aarch64_elf_section_data () we will not
6383 be caching a pointer that is about to be freed. */
6384 last_entry = entry->prev;
6390 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6392 struct section_list *entry;
6394 entry = find_aarch64_elf_section_entry (sec);
6398 if (entry->prev != NULL)
6399 entry->prev->next = entry->next;
6400 if (entry->next != NULL)
6401 entry->next->prev = entry->prev;
6402 if (entry == sections_with_aarch64_elf_section_data)
6403 sections_with_aarch64_elf_section_data = entry->next;
6412 struct bfd_link_info *info;
6415 int (*func) (void *, const char *, Elf_Internal_Sym *,
6416 asection *, struct elf_link_hash_entry *);
6417 } output_arch_syminfo;
6419 enum map_symbol_type
6426 /* Output a single mapping symbol. */
6429 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
6430 enum map_symbol_type type, bfd_vma offset)
6432 static const char *names[2] = { "$x", "$d" };
6433 Elf_Internal_Sym sym;
6435 sym.st_value = (osi->sec->output_section->vma
6436 + osi->sec->output_offset + offset);
6439 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6440 sym.st_shndx = osi->sec_shndx;
6441 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
6446 /* Output mapping symbols for PLT entries associated with H. */
6449 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
6451 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
6454 if (h->root.type == bfd_link_hash_indirect)
6457 if (h->root.type == bfd_link_hash_warning)
6458 /* When warning symbols are created, they **replace** the "real"
6459 entry in the hash table, thus we never get to see the real
6460 symbol in a hash traversal. So look at it now. */
6461 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6463 if (h->plt.offset == (bfd_vma) - 1)
6466 addr = h->plt.offset;
6469 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6476 /* Output a single local symbol for a generated stub. */
6479 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
6480 bfd_vma offset, bfd_vma size)
6482 Elf_Internal_Sym sym;
6484 sym.st_value = (osi->sec->output_section->vma
6485 + osi->sec->output_offset + offset);
6488 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6489 sym.st_shndx = osi->sec_shndx;
6490 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
6494 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
6496 struct elf_aarch64_stub_hash_entry *stub_entry;
6500 output_arch_syminfo *osi;
6502 /* Massage our args to the form they really have. */
6503 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
6504 osi = (output_arch_syminfo *) in_arg;
6506 stub_sec = stub_entry->stub_sec;
6508 /* Ensure this stub is attached to the current section being
6510 if (stub_sec != osi->sec)
6513 addr = (bfd_vma) stub_entry->stub_offset;
6515 stub_name = stub_entry->output_name;
6517 switch (stub_entry->stub_type)
6519 case aarch64_stub_adrp_branch:
6520 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6521 sizeof (aarch64_adrp_branch_stub)))
6523 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6526 case aarch64_stub_long_branch:
6527 if (!elfNN_aarch64_output_stub_sym
6528 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
6530 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6532 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
6535 case aarch64_stub_erratum_835769_veneer:
6536 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6537 sizeof (aarch64_erratum_835769_stub)))
6539 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6549 /* Output mapping symbols for linker generated sections. */
6552 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
6553 struct bfd_link_info *info,
6555 int (*func) (void *, const char *,
6558 struct elf_link_hash_entry
6561 output_arch_syminfo osi;
6562 struct elf_aarch64_link_hash_table *htab;
6564 htab = elf_aarch64_hash_table (info);
6570 /* Long calls stubs. */
6571 if (htab->stub_bfd && htab->stub_bfd->sections)
6575 for (stub_sec = htab->stub_bfd->sections;
6576 stub_sec != NULL; stub_sec = stub_sec->next)
6578 /* Ignore non-stub sections. */
6579 if (!strstr (stub_sec->name, STUB_SUFFIX))
6584 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6585 (output_bfd, osi.sec->output_section);
6587 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
6592 /* Finally, output mapping symbols for the PLT. */
6593 if (!htab->root.splt || htab->root.splt->size == 0)
6596 /* For now live without mapping symbols for the plt. */
6597 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6598 (output_bfd, htab->root.splt->output_section);
6599 osi.sec = htab->root.splt;
6601 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
6608 /* Allocate target specific section data. */
6611 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
6613 if (!sec->used_by_bfd)
6615 _aarch64_elf_section_data *sdata;
6616 bfd_size_type amt = sizeof (*sdata);
6618 sdata = bfd_zalloc (abfd, amt);
6621 sec->used_by_bfd = sdata;
6624 record_section_with_aarch64_elf_section_data (sec);
6626 return _bfd_elf_new_section_hook (abfd, sec);
6631 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
6633 void *ignore ATTRIBUTE_UNUSED)
6635 unrecord_section_with_aarch64_elf_section_data (sec);
6639 elfNN_aarch64_close_and_cleanup (bfd *abfd)
6642 bfd_map_over_sections (abfd,
6643 unrecord_section_via_map_over_sections, NULL);
6645 return _bfd_elf_close_and_cleanup (abfd);
6649 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
6652 bfd_map_over_sections (abfd,
6653 unrecord_section_via_map_over_sections, NULL);
6655 return _bfd_free_cached_info (abfd);
6658 /* Create dynamic sections. This is different from the ARM backend in that
6659 the got, plt, gotplt and their relocation sections are all created in the
6660 standard part of the bfd elf backend. */
6663 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
6664 struct bfd_link_info *info)
6666 struct elf_aarch64_link_hash_table *htab;
6668 /* We need to create .got section. */
6669 if (!aarch64_elf_create_got_section (dynobj, info))
6672 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
6675 htab = elf_aarch64_hash_table (info);
6676 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
6678 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
6680 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
6687 /* Allocate space in .plt, .got and associated reloc sections for
6691 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
6693 struct bfd_link_info *info;
6694 struct elf_aarch64_link_hash_table *htab;
6695 struct elf_aarch64_link_hash_entry *eh;
6696 struct elf_dyn_relocs *p;
6698 /* An example of a bfd_link_hash_indirect symbol is versioned
6699 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
6700 -> __gxx_personality_v0(bfd_link_hash_defined)
6702 There is no need to process bfd_link_hash_indirect symbols here
6703 because we will also be presented with the concrete instance of
6704 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
6705 called to copy all relevant data from the generic to the concrete
6708 if (h->root.type == bfd_link_hash_indirect)
6711 if (h->root.type == bfd_link_hash_warning)
6712 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6714 info = (struct bfd_link_info *) inf;
6715 htab = elf_aarch64_hash_table (info);
6717 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
6718 here if it is defined and referenced in a non-shared object. */
6719 if (h->type == STT_GNU_IFUNC
6722 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
6724 /* Make sure this symbol is output as a dynamic symbol.
6725 Undefined weak syms won't yet be marked as dynamic. */
6726 if (h->dynindx == -1 && !h->forced_local)
6728 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6732 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
6734 asection *s = htab->root.splt;
6736 /* If this is the first .plt entry, make room for the special
6739 s->size += htab->plt_header_size;
6741 h->plt.offset = s->size;
6743 /* If this symbol is not defined in a regular file, and we are
6744 not generating a shared library, then set the symbol to this
6745 location in the .plt. This is required to make function
6746 pointers compare as equal between the normal executable and
6747 the shared library. */
6748 if (!info->shared && !h->def_regular)
6750 h->root.u.def.section = s;
6751 h->root.u.def.value = h->plt.offset;
6754 /* Make room for this entry. For now we only create the
6755 small model PLT entries. We later need to find a way
6756 of relaxing into these from the large model PLT entries. */
6757 s->size += PLT_SMALL_ENTRY_SIZE;
6759 /* We also need to make an entry in the .got.plt section, which
6760 will be placed in the .got section by the linker script. */
6761 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
6763 /* We also need to make an entry in the .rela.plt section. */
6764 htab->root.srelplt->size += RELOC_SIZE (htab);
6766 /* We need to ensure that all GOT entries that serve the PLT
6767 are consecutive with the special GOT slots [0] [1] and
6768 [2]. Any addtional relocations, such as
6769 R_AARCH64_TLSDESC, must be placed after the PLT related
6770 entries. We abuse the reloc_count such that during
6771 sizing we adjust reloc_count to indicate the number of
6772 PLT related reserved entries. In subsequent phases when
6773 filling in the contents of the reloc entries, PLT related
6774 entries are placed by computing their PLT index (0
6775 .. reloc_count). While other none PLT relocs are placed
6776 at the slot indicated by reloc_count and reloc_count is
6779 htab->root.srelplt->reloc_count++;
6783 h->plt.offset = (bfd_vma) - 1;
6789 h->plt.offset = (bfd_vma) - 1;
6793 eh = (struct elf_aarch64_link_hash_entry *) h;
6794 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
6796 if (h->got.refcount > 0)
6799 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
6801 h->got.offset = (bfd_vma) - 1;
6803 dyn = htab->root.dynamic_sections_created;
6805 /* Make sure this symbol is output as a dynamic symbol.
6806 Undefined weak syms won't yet be marked as dynamic. */
6807 if (dyn && h->dynindx == -1 && !h->forced_local)
6809 if (!bfd_elf_link_record_dynamic_symbol (info, h))
6813 if (got_type == GOT_UNKNOWN)
6816 else if (got_type == GOT_NORMAL)
6818 h->got.offset = htab->root.sgot->size;
6819 htab->root.sgot->size += GOT_ENTRY_SIZE;
6820 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6821 || h->root.type != bfd_link_hash_undefweak)
6823 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6825 htab->root.srelgot->size += RELOC_SIZE (htab);
6831 if (got_type & GOT_TLSDESC_GD)
6833 eh->tlsdesc_got_jump_table_offset =
6834 (htab->root.sgotplt->size
6835 - aarch64_compute_jump_table_size (htab));
6836 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
6837 h->got.offset = (bfd_vma) - 2;
6840 if (got_type & GOT_TLS_GD)
6842 h->got.offset = htab->root.sgot->size;
6843 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
6846 if (got_type & GOT_TLS_IE)
6848 h->got.offset = htab->root.sgot->size;
6849 htab->root.sgot->size += GOT_ENTRY_SIZE;
6852 indx = h && h->dynindx != -1 ? h->dynindx : 0;
6853 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6854 || h->root.type != bfd_link_hash_undefweak)
6857 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
6859 if (got_type & GOT_TLSDESC_GD)
6861 htab->root.srelplt->size += RELOC_SIZE (htab);
6862 /* Note reloc_count not incremented here! We have
6863 already adjusted reloc_count for this relocation
6866 /* TLSDESC PLT is now needed, but not yet determined. */
6867 htab->tlsdesc_plt = (bfd_vma) - 1;
6870 if (got_type & GOT_TLS_GD)
6871 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
6873 if (got_type & GOT_TLS_IE)
6874 htab->root.srelgot->size += RELOC_SIZE (htab);
6880 h->got.offset = (bfd_vma) - 1;
6883 if (eh->dyn_relocs == NULL)
6886 /* In the shared -Bsymbolic case, discard space allocated for
6887 dynamic pc-relative relocs against symbols which turn out to be
6888 defined in regular objects. For the normal shared case, discard
6889 space for pc-relative relocs that have become local due to symbol
6890 visibility changes. */
6894 /* Relocs that use pc_count are those that appear on a call
6895 insn, or certain REL relocs that can generated via assembly.
6896 We want calls to protected symbols to resolve directly to the
6897 function rather than going via the plt. If people want
6898 function pointer comparisons to work as expected then they
6899 should avoid writing weird assembly. */
6900 if (SYMBOL_CALLS_LOCAL (info, h))
6902 struct elf_dyn_relocs **pp;
6904 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
6906 p->count -= p->pc_count;
6915 /* Also discard relocs on undefined weak syms with non-default
6917 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
6919 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
6920 eh->dyn_relocs = NULL;
6922 /* Make sure undefined weak symbols are output as a dynamic
6924 else if (h->dynindx == -1
6926 && !bfd_elf_link_record_dynamic_symbol (info, h))
6931 else if (ELIMINATE_COPY_RELOCS)
6933 /* For the non-shared case, discard space for relocs against
6934 symbols which turn out to need copy relocs or are not
6940 || (htab->root.dynamic_sections_created
6941 && (h->root.type == bfd_link_hash_undefweak
6942 || h->root.type == bfd_link_hash_undefined))))
6944 /* Make sure this symbol is output as a dynamic symbol.
6945 Undefined weak syms won't yet be marked as dynamic. */
6946 if (h->dynindx == -1
6948 && !bfd_elf_link_record_dynamic_symbol (info, h))
6951 /* If that succeeded, we know we'll be keeping all the
6953 if (h->dynindx != -1)
6957 eh->dyn_relocs = NULL;
6962 /* Finally, allocate space. */
6963 for (p = eh->dyn_relocs; p != NULL; p = p->next)
6967 sreloc = elf_section_data (p->sec)->sreloc;
6969 BFD_ASSERT (sreloc != NULL);
6971 sreloc->size += p->count * RELOC_SIZE (htab);
6977 /* Allocate space in .plt, .got and associated reloc sections for
6978 ifunc dynamic relocs. */
6981 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
6984 struct bfd_link_info *info;
6985 struct elf_aarch64_link_hash_table *htab;
6986 struct elf_aarch64_link_hash_entry *eh;
6988 /* An example of a bfd_link_hash_indirect symbol is versioned
6989 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
6990 -> __gxx_personality_v0(bfd_link_hash_defined)
6992 There is no need to process bfd_link_hash_indirect symbols here
6993 because we will also be presented with the concrete instance of
6994 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
6995 called to copy all relevant data from the generic to the concrete
6998 if (h->root.type == bfd_link_hash_indirect)
7001 if (h->root.type == bfd_link_hash_warning)
7002 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7004 info = (struct bfd_link_info *) inf;
7005 htab = elf_aarch64_hash_table (info);
7007 eh = (struct elf_aarch64_link_hash_entry *) h;
7009 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7010 here if it is defined and referenced in a non-shared object. */
7011 if (h->type == STT_GNU_IFUNC
7013 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
7015 htab->plt_entry_size,
7016 htab->plt_header_size,
7021 /* Allocate space in .plt, .got and associated reloc sections for
7022 local dynamic relocs. */
7025 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
7027 struct elf_link_hash_entry *h
7028 = (struct elf_link_hash_entry *) *slot;
7030 if (h->type != STT_GNU_IFUNC
7034 || h->root.type != bfd_link_hash_defined)
7037 return elfNN_aarch64_allocate_dynrelocs (h, inf);
7040 /* Allocate space in .plt, .got and associated reloc sections for
7041 local ifunc dynamic relocs. */
7044 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
7046 struct elf_link_hash_entry *h
7047 = (struct elf_link_hash_entry *) *slot;
7049 if (h->type != STT_GNU_IFUNC
7053 || h->root.type != bfd_link_hash_defined)
7056 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
7059 /* This is the most important function of all . Innocuosly named
7062 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
7063 struct bfd_link_info *info)
7065 struct elf_aarch64_link_hash_table *htab;
7071 htab = elf_aarch64_hash_table ((info));
7072 dynobj = htab->root.dynobj;
7074 BFD_ASSERT (dynobj != NULL);
7076 if (htab->root.dynamic_sections_created)
7078 if (info->executable)
7080 s = bfd_get_linker_section (dynobj, ".interp");
7083 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7084 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7088 /* Set up .got offsets for local syms, and space for local dynamic
7090 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7092 struct elf_aarch64_local_symbol *locals = NULL;
7093 Elf_Internal_Shdr *symtab_hdr;
7097 if (!is_aarch64_elf (ibfd))
7100 for (s = ibfd->sections; s != NULL; s = s->next)
7102 struct elf_dyn_relocs *p;
7104 for (p = (struct elf_dyn_relocs *)
7105 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7107 if (!bfd_is_abs_section (p->sec)
7108 && bfd_is_abs_section (p->sec->output_section))
7110 /* Input section has been discarded, either because
7111 it is a copy of a linkonce section or due to
7112 linker script /DISCARD/, so we'll be discarding
7115 else if (p->count != 0)
7117 srel = elf_section_data (p->sec)->sreloc;
7118 srel->size += p->count * RELOC_SIZE (htab);
7119 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7120 info->flags |= DF_TEXTREL;
7125 locals = elf_aarch64_locals (ibfd);
7129 symtab_hdr = &elf_symtab_hdr (ibfd);
7130 srel = htab->root.srelgot;
7131 for (i = 0; i < symtab_hdr->sh_info; i++)
7133 locals[i].got_offset = (bfd_vma) - 1;
7134 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7135 if (locals[i].got_refcount > 0)
7137 unsigned got_type = locals[i].got_type;
7138 if (got_type & GOT_TLSDESC_GD)
7140 locals[i].tlsdesc_got_jump_table_offset =
7141 (htab->root.sgotplt->size
7142 - aarch64_compute_jump_table_size (htab));
7143 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7144 locals[i].got_offset = (bfd_vma) - 2;
7147 if (got_type & GOT_TLS_GD)
7149 locals[i].got_offset = htab->root.sgot->size;
7150 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7153 if (got_type & GOT_TLS_IE)
7155 locals[i].got_offset = htab->root.sgot->size;
7156 htab->root.sgot->size += GOT_ENTRY_SIZE;
7159 if (got_type == GOT_UNKNOWN)
7163 if (got_type == GOT_NORMAL)
7169 if (got_type & GOT_TLSDESC_GD)
7171 htab->root.srelplt->size += RELOC_SIZE (htab);
7172 /* Note RELOC_COUNT not incremented here! */
7173 htab->tlsdesc_plt = (bfd_vma) - 1;
7176 if (got_type & GOT_TLS_GD)
7177 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7179 if (got_type & GOT_TLS_IE)
7180 htab->root.srelgot->size += RELOC_SIZE (htab);
7185 locals[i].got_refcount = (bfd_vma) - 1;
7191 /* Allocate global sym .plt and .got entries, and space for global
7192 sym dynamic relocs. */
7193 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7196 /* Allocate global ifunc sym .plt and .got entries, and space for global
7197 ifunc sym dynamic relocs. */
7198 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7201 /* Allocate .plt and .got entries, and space for local symbols. */
7202 htab_traverse (htab->loc_hash_table,
7203 elfNN_aarch64_allocate_local_dynrelocs,
7206 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7207 htab_traverse (htab->loc_hash_table,
7208 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7211 /* For every jump slot reserved in the sgotplt, reloc_count is
7212 incremented. However, when we reserve space for TLS descriptors,
7213 it's not incremented, so in order to compute the space reserved
7214 for them, it suffices to multiply the reloc count by the jump
7217 if (htab->root.srelplt)
7218 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7220 if (htab->tlsdesc_plt)
7222 if (htab->root.splt->size == 0)
7223 htab->root.splt->size += PLT_ENTRY_SIZE;
7225 htab->tlsdesc_plt = htab->root.splt->size;
7226 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7228 /* If we're not using lazy TLS relocations, don't generate the
7229 GOT entry required. */
7230 if (!(info->flags & DF_BIND_NOW))
7232 htab->dt_tlsdesc_got = htab->root.sgot->size;
7233 htab->root.sgot->size += GOT_ENTRY_SIZE;
7237 /* Init mapping symbols information to use later to distingush between
7238 code and data while scanning for erratam 835769. */
7239 if (htab->fix_erratum_835769)
7240 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7242 if (!is_aarch64_elf (ibfd))
7244 bfd_elfNN_aarch64_init_maps (ibfd);
7247 /* We now have determined the sizes of the various dynamic sections.
7248 Allocate memory for them. */
7250 for (s = dynobj->sections; s != NULL; s = s->next)
7252 if ((s->flags & SEC_LINKER_CREATED) == 0)
7255 if (s == htab->root.splt
7256 || s == htab->root.sgot
7257 || s == htab->root.sgotplt
7258 || s == htab->root.iplt
7259 || s == htab->root.igotplt || s == htab->sdynbss)
7261 /* Strip this section if we don't need it; see the
7264 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7266 if (s->size != 0 && s != htab->root.srelplt)
7269 /* We use the reloc_count field as a counter if we need
7270 to copy relocs into the output file. */
7271 if (s != htab->root.srelplt)
7276 /* It's not one of our sections, so don't allocate space. */
7282 /* If we don't need this section, strip it from the
7283 output file. This is mostly to handle .rela.bss and
7284 .rela.plt. We must create both sections in
7285 create_dynamic_sections, because they must be created
7286 before the linker maps input sections to output
7287 sections. The linker does that before
7288 adjust_dynamic_symbol is called, and it is that
7289 function which decides whether anything needs to go
7290 into these sections. */
7292 s->flags |= SEC_EXCLUDE;
7296 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7299 /* Allocate memory for the section contents. We use bfd_zalloc
7300 here in case unused entries are not reclaimed before the
7301 section's contents are written out. This should not happen,
7302 but this way if it does, we get a R_AARCH64_NONE reloc instead
7304 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7305 if (s->contents == NULL)
7309 if (htab->root.dynamic_sections_created)
7311 /* Add some entries to the .dynamic section. We fill in the
7312 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7313 must add the entries now so that we get the correct size for
7314 the .dynamic section. The DT_DEBUG entry is filled in by the
7315 dynamic linker and used by the debugger. */
7316 #define add_dynamic_entry(TAG, VAL) \
7317 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7319 if (info->executable)
7321 if (!add_dynamic_entry (DT_DEBUG, 0))
7325 if (htab->root.splt->size != 0)
7327 if (!add_dynamic_entry (DT_PLTGOT, 0)
7328 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7329 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7330 || !add_dynamic_entry (DT_JMPREL, 0))
7333 if (htab->tlsdesc_plt
7334 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7335 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7341 if (!add_dynamic_entry (DT_RELA, 0)
7342 || !add_dynamic_entry (DT_RELASZ, 0)
7343 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7346 /* If any dynamic relocs apply to a read-only section,
7347 then we need a DT_TEXTREL entry. */
7348 if ((info->flags & DF_TEXTREL) != 0)
7350 if (!add_dynamic_entry (DT_TEXTREL, 0))
7355 #undef add_dynamic_entry
7361 elf_aarch64_update_plt_entry (bfd *output_bfd,
7362 bfd_reloc_code_real_type r_type,
7363 bfd_byte *plt_entry, bfd_vma value)
7365 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
7367 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
7371 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
7372 struct elf_aarch64_link_hash_table
7373 *htab, bfd *output_bfd,
7374 struct bfd_link_info *info)
7376 bfd_byte *plt_entry;
7379 bfd_vma gotplt_entry_address;
7380 bfd_vma plt_entry_address;
7381 Elf_Internal_Rela rela;
7383 asection *plt, *gotplt, *relplt;
7385 /* When building a static executable, use .iplt, .igot.plt and
7386 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7387 if (htab->root.splt != NULL)
7389 plt = htab->root.splt;
7390 gotplt = htab->root.sgotplt;
7391 relplt = htab->root.srelplt;
7395 plt = htab->root.iplt;
7396 gotplt = htab->root.igotplt;
7397 relplt = htab->root.irelplt;
7400 /* Get the index in the procedure linkage table which
7401 corresponds to this symbol. This is the index of this symbol
7402 in all the symbols for which we are making plt entries. The
7403 first entry in the procedure linkage table is reserved.
7405 Get the offset into the .got table of the entry that
7406 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
7407 bytes. The first three are reserved for the dynamic linker.
7409 For static executables, we don't reserve anything. */
7411 if (plt == htab->root.splt)
7413 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
7414 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
7418 plt_index = h->plt.offset / htab->plt_entry_size;
7419 got_offset = plt_index * GOT_ENTRY_SIZE;
7422 plt_entry = plt->contents + h->plt.offset;
7423 plt_entry_address = plt->output_section->vma
7424 + plt->output_offset + h->plt.offset;
7425 gotplt_entry_address = gotplt->output_section->vma +
7426 gotplt->output_offset + got_offset;
7428 /* Copy in the boiler-plate for the PLTn entry. */
7429 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
7431 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7432 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7433 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7435 PG (gotplt_entry_address) -
7436 PG (plt_entry_address));
7438 /* Fill in the lo12 bits for the load from the pltgot. */
7439 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7441 PG_OFFSET (gotplt_entry_address));
7443 /* Fill in the lo12 bits for the add from the pltgot entry. */
7444 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7446 PG_OFFSET (gotplt_entry_address));
7448 /* All the GOTPLT Entries are essentially initialized to PLT0. */
7449 bfd_put_NN (output_bfd,
7450 plt->output_section->vma + plt->output_offset,
7451 gotplt->contents + got_offset);
7453 rela.r_offset = gotplt_entry_address;
7455 if (h->dynindx == -1
7456 || ((info->executable
7457 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7459 && h->type == STT_GNU_IFUNC))
7461 /* If an STT_GNU_IFUNC symbol is locally defined, generate
7462 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
7463 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
7464 rela.r_addend = (h->root.u.def.value
7465 + h->root.u.def.section->output_section->vma
7466 + h->root.u.def.section->output_offset);
7470 /* Fill in the entry in the .rela.plt section. */
7471 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
7475 /* Compute the relocation entry to used based on PLT index and do
7476 not adjust reloc_count. The reloc_count has already been adjusted
7477 to account for this entry. */
7478 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
7479 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7482 /* Size sections even though they're not dynamic. We use it to setup
7483 _TLS_MODULE_BASE_, if needed. */
7486 elfNN_aarch64_always_size_sections (bfd *output_bfd,
7487 struct bfd_link_info *info)
7491 if (info->relocatable)
7494 tls_sec = elf_hash_table (info)->tls_sec;
7498 struct elf_link_hash_entry *tlsbase;
7500 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
7501 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
7505 struct bfd_link_hash_entry *h = NULL;
7506 const struct elf_backend_data *bed =
7507 get_elf_backend_data (output_bfd);
7509 if (!(_bfd_generic_link_add_one_symbol
7510 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
7511 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
7514 tlsbase->type = STT_TLS;
7515 tlsbase = (struct elf_link_hash_entry *) h;
7516 tlsbase->def_regular = 1;
7517 tlsbase->other = STV_HIDDEN;
7518 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
7525 /* Finish up dynamic symbol handling. We set the contents of various
7526 dynamic sections here. */
7528 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
7529 struct bfd_link_info *info,
7530 struct elf_link_hash_entry *h,
7531 Elf_Internal_Sym *sym)
7533 struct elf_aarch64_link_hash_table *htab;
7534 htab = elf_aarch64_hash_table (info);
7536 if (h->plt.offset != (bfd_vma) - 1)
7538 asection *plt, *gotplt, *relplt;
7540 /* This symbol has an entry in the procedure linkage table. Set
7543 /* When building a static executable, use .iplt, .igot.plt and
7544 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7545 if (htab->root.splt != NULL)
7547 plt = htab->root.splt;
7548 gotplt = htab->root.sgotplt;
7549 relplt = htab->root.srelplt;
7553 plt = htab->root.iplt;
7554 gotplt = htab->root.igotplt;
7555 relplt = htab->root.irelplt;
7558 /* This symbol has an entry in the procedure linkage table. Set
7560 if ((h->dynindx == -1
7561 && !((h->forced_local || info->executable)
7563 && h->type == STT_GNU_IFUNC))
7569 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
7570 if (!h->def_regular)
7572 /* Mark the symbol as undefined, rather than as defined in
7573 the .plt section. */
7574 sym->st_shndx = SHN_UNDEF;
7575 /* If the symbol is weak we need to clear the value.
7576 Otherwise, the PLT entry would provide a definition for
7577 the symbol even if the symbol wasn't defined anywhere,
7578 and so the symbol would never be NULL. Leave the value if
7579 there were any relocations where pointer equality matters
7580 (this is a clue for the dynamic linker, to make function
7581 pointer comparisons work between an application and shared
7583 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
7588 if (h->got.offset != (bfd_vma) - 1
7589 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
7591 Elf_Internal_Rela rela;
7594 /* This symbol has an entry in the global offset table. Set it
7596 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
7599 rela.r_offset = (htab->root.sgot->output_section->vma
7600 + htab->root.sgot->output_offset
7601 + (h->got.offset & ~(bfd_vma) 1));
7604 && h->type == STT_GNU_IFUNC)
7608 /* Generate R_AARCH64_GLOB_DAT. */
7615 if (!h->pointer_equality_needed)
7618 /* For non-shared object, we can't use .got.plt, which
7619 contains the real function address if we need pointer
7620 equality. We load the GOT entry with the PLT entry. */
7621 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
7622 bfd_put_NN (output_bfd, (plt->output_section->vma
7623 + plt->output_offset
7625 htab->root.sgot->contents
7626 + (h->got.offset & ~(bfd_vma) 1));
7630 else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
7632 if (!h->def_regular)
7635 BFD_ASSERT ((h->got.offset & 1) != 0);
7636 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
7637 rela.r_addend = (h->root.u.def.value
7638 + h->root.u.def.section->output_section->vma
7639 + h->root.u.def.section->output_offset);
7644 BFD_ASSERT ((h->got.offset & 1) == 0);
7645 bfd_put_NN (output_bfd, (bfd_vma) 0,
7646 htab->root.sgot->contents + h->got.offset);
7647 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
7651 loc = htab->root.srelgot->contents;
7652 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
7653 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7658 Elf_Internal_Rela rela;
7661 /* This symbol needs a copy reloc. Set it up. */
7663 if (h->dynindx == -1
7664 || (h->root.type != bfd_link_hash_defined
7665 && h->root.type != bfd_link_hash_defweak)
7666 || htab->srelbss == NULL)
7669 rela.r_offset = (h->root.u.def.value
7670 + h->root.u.def.section->output_section->vma
7671 + h->root.u.def.section->output_offset);
7672 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
7674 loc = htab->srelbss->contents;
7675 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
7676 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7679 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
7680 be NULL for local symbols. */
7682 && (h == elf_hash_table (info)->hdynamic
7683 || h == elf_hash_table (info)->hgot))
7684 sym->st_shndx = SHN_ABS;
7689 /* Finish up local dynamic symbol handling. We set the contents of
7690 various dynamic sections here. */
7693 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
7695 struct elf_link_hash_entry *h
7696 = (struct elf_link_hash_entry *) *slot;
7697 struct bfd_link_info *info
7698 = (struct bfd_link_info *) inf;
7700 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
7705 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
7706 struct elf_aarch64_link_hash_table
7709 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
7710 small and large plts and at the minute just generates
7713 /* PLT0 of the small PLT looks like this in ELF64 -
7714 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
7715 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
7716 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
7718 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
7719 // GOTPLT entry for this.
7721 PLT0 will be slightly different in ELF32 due to different got entry
7724 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
7728 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
7730 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
7733 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
7734 + htab->root.sgotplt->output_offset
7735 + GOT_ENTRY_SIZE * 2);
7737 plt_base = htab->root.splt->output_section->vma +
7738 htab->root.splt->output_offset;
7740 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7741 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7742 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7743 htab->root.splt->contents + 4,
7744 PG (plt_got_2nd_ent) - PG (plt_base + 4));
7746 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7747 htab->root.splt->contents + 8,
7748 PG_OFFSET (plt_got_2nd_ent));
7750 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7751 htab->root.splt->contents + 12,
7752 PG_OFFSET (plt_got_2nd_ent));
7756 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
7757 struct bfd_link_info *info)
7759 struct elf_aarch64_link_hash_table *htab;
7763 htab = elf_aarch64_hash_table (info);
7764 dynobj = htab->root.dynobj;
7765 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
7767 if (htab->root.dynamic_sections_created)
7769 ElfNN_External_Dyn *dyncon, *dynconend;
7771 if (sdyn == NULL || htab->root.sgot == NULL)
7774 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
7775 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
7776 for (; dyncon < dynconend; dyncon++)
7778 Elf_Internal_Dyn dyn;
7781 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
7789 s = htab->root.sgotplt;
7790 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
7794 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
7798 s = htab->root.srelplt;
7799 dyn.d_un.d_val = s->size;
7803 /* The procedure linkage table relocs (DT_JMPREL) should
7804 not be included in the overall relocs (DT_RELA).
7805 Therefore, we override the DT_RELASZ entry here to
7806 make it not include the JMPREL relocs. Since the
7807 linker script arranges for .rela.plt to follow all
7808 other relocation sections, we don't have to worry
7809 about changing the DT_RELA entry. */
7810 if (htab->root.srelplt != NULL)
7812 s = htab->root.srelplt;
7813 dyn.d_un.d_val -= s->size;
7817 case DT_TLSDESC_PLT:
7818 s = htab->root.splt;
7819 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
7820 + htab->tlsdesc_plt;
7823 case DT_TLSDESC_GOT:
7824 s = htab->root.sgot;
7825 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
7826 + htab->dt_tlsdesc_got;
7830 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
7835 /* Fill in the special first entry in the procedure linkage table. */
7836 if (htab->root.splt && htab->root.splt->size > 0)
7838 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
7840 elf_section_data (htab->root.splt->output_section)->
7841 this_hdr.sh_entsize = htab->plt_entry_size;
7844 if (htab->tlsdesc_plt)
7846 bfd_put_NN (output_bfd, (bfd_vma) 0,
7847 htab->root.sgot->contents + htab->dt_tlsdesc_got);
7849 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
7850 elfNN_aarch64_tlsdesc_small_plt_entry,
7851 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
7854 bfd_vma adrp1_addr =
7855 htab->root.splt->output_section->vma
7856 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
7858 bfd_vma adrp2_addr = adrp1_addr + 4;
7861 htab->root.sgot->output_section->vma
7862 + htab->root.sgot->output_offset;
7864 bfd_vma pltgot_addr =
7865 htab->root.sgotplt->output_section->vma
7866 + htab->root.sgotplt->output_offset;
7868 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
7870 bfd_byte *plt_entry =
7871 htab->root.splt->contents + htab->tlsdesc_plt;
7873 /* adrp x2, DT_TLSDESC_GOT */
7874 elf_aarch64_update_plt_entry (output_bfd,
7875 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7877 (PG (dt_tlsdesc_got)
7878 - PG (adrp1_addr)));
7881 elf_aarch64_update_plt_entry (output_bfd,
7882 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7885 - PG (adrp2_addr)));
7887 /* ldr x2, [x2, #0] */
7888 elf_aarch64_update_plt_entry (output_bfd,
7889 BFD_RELOC_AARCH64_LDSTNN_LO12,
7891 PG_OFFSET (dt_tlsdesc_got));
7894 elf_aarch64_update_plt_entry (output_bfd,
7895 BFD_RELOC_AARCH64_ADD_LO12,
7897 PG_OFFSET (pltgot_addr));
7902 if (htab->root.sgotplt)
7904 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
7906 (*_bfd_error_handler)
7907 (_("discarded output section: `%A'"), htab->root.sgotplt);
7911 /* Fill in the first three entries in the global offset table. */
7912 if (htab->root.sgotplt->size > 0)
7914 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
7916 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
7917 bfd_put_NN (output_bfd,
7919 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
7920 bfd_put_NN (output_bfd,
7922 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
7925 if (htab->root.sgot)
7927 if (htab->root.sgot->size > 0)
7930 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
7931 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
7935 elf_section_data (htab->root.sgotplt->output_section)->
7936 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
7939 if (htab->root.sgot && htab->root.sgot->size > 0)
7940 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
7943 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
7944 htab_traverse (htab->loc_hash_table,
7945 elfNN_aarch64_finish_local_dynamic_symbol,
7951 /* Return address for Ith PLT stub in section PLT, for relocation REL
7952 or (bfd_vma) -1 if it should not be included. */
7955 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
7956 const arelent *rel ATTRIBUTE_UNUSED)
7958 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
7962 /* We use this so we can override certain functions
7963 (though currently we don't). */
7965 const struct elf_size_info elfNN_aarch64_size_info =
7967 sizeof (ElfNN_External_Ehdr),
7968 sizeof (ElfNN_External_Phdr),
7969 sizeof (ElfNN_External_Shdr),
7970 sizeof (ElfNN_External_Rel),
7971 sizeof (ElfNN_External_Rela),
7972 sizeof (ElfNN_External_Sym),
7973 sizeof (ElfNN_External_Dyn),
7974 sizeof (Elf_External_Note),
7975 4, /* Hash table entry size. */
7976 1, /* Internal relocs per external relocs. */
7977 ARCH_SIZE, /* Arch size. */
7978 LOG_FILE_ALIGN, /* Log_file_align. */
7979 ELFCLASSNN, EV_CURRENT,
7980 bfd_elfNN_write_out_phdrs,
7981 bfd_elfNN_write_shdrs_and_ehdr,
7982 bfd_elfNN_checksum_contents,
7983 bfd_elfNN_write_relocs,
7984 bfd_elfNN_swap_symbol_in,
7985 bfd_elfNN_swap_symbol_out,
7986 bfd_elfNN_slurp_reloc_table,
7987 bfd_elfNN_slurp_symbol_table,
7988 bfd_elfNN_swap_dyn_in,
7989 bfd_elfNN_swap_dyn_out,
7990 bfd_elfNN_swap_reloc_in,
7991 bfd_elfNN_swap_reloc_out,
7992 bfd_elfNN_swap_reloca_in,
7993 bfd_elfNN_swap_reloca_out
7996 #define ELF_ARCH bfd_arch_aarch64
7997 #define ELF_MACHINE_CODE EM_AARCH64
7998 #define ELF_MAXPAGESIZE 0x10000
7999 #define ELF_MINPAGESIZE 0x1000
8000 #define ELF_COMMONPAGESIZE 0x1000
8002 #define bfd_elfNN_close_and_cleanup \
8003 elfNN_aarch64_close_and_cleanup
8005 #define bfd_elfNN_bfd_free_cached_info \
8006 elfNN_aarch64_bfd_free_cached_info
8008 #define bfd_elfNN_bfd_is_target_special_symbol \
8009 elfNN_aarch64_is_target_special_symbol
8011 #define bfd_elfNN_bfd_link_hash_table_create \
8012 elfNN_aarch64_link_hash_table_create
8014 #define bfd_elfNN_bfd_merge_private_bfd_data \
8015 elfNN_aarch64_merge_private_bfd_data
8017 #define bfd_elfNN_bfd_print_private_bfd_data \
8018 elfNN_aarch64_print_private_bfd_data
8020 #define bfd_elfNN_bfd_reloc_type_lookup \
8021 elfNN_aarch64_reloc_type_lookup
8023 #define bfd_elfNN_bfd_reloc_name_lookup \
8024 elfNN_aarch64_reloc_name_lookup
8026 #define bfd_elfNN_bfd_set_private_flags \
8027 elfNN_aarch64_set_private_flags
8029 #define bfd_elfNN_find_inliner_info \
8030 elfNN_aarch64_find_inliner_info
8032 #define bfd_elfNN_find_nearest_line \
8033 elfNN_aarch64_find_nearest_line
8035 #define bfd_elfNN_mkobject \
8036 elfNN_aarch64_mkobject
8038 #define bfd_elfNN_new_section_hook \
8039 elfNN_aarch64_new_section_hook
8041 #define elf_backend_adjust_dynamic_symbol \
8042 elfNN_aarch64_adjust_dynamic_symbol
8044 #define elf_backend_always_size_sections \
8045 elfNN_aarch64_always_size_sections
8047 #define elf_backend_check_relocs \
8048 elfNN_aarch64_check_relocs
8050 #define elf_backend_copy_indirect_symbol \
8051 elfNN_aarch64_copy_indirect_symbol
8053 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
8054 to them in our hash. */
8055 #define elf_backend_create_dynamic_sections \
8056 elfNN_aarch64_create_dynamic_sections
8058 #define elf_backend_init_index_section \
8059 _bfd_elf_init_2_index_sections
8061 #define elf_backend_finish_dynamic_sections \
8062 elfNN_aarch64_finish_dynamic_sections
8064 #define elf_backend_finish_dynamic_symbol \
8065 elfNN_aarch64_finish_dynamic_symbol
8067 #define elf_backend_gc_sweep_hook \
8068 elfNN_aarch64_gc_sweep_hook
8070 #define elf_backend_object_p \
8071 elfNN_aarch64_object_p
8073 #define elf_backend_output_arch_local_syms \
8074 elfNN_aarch64_output_arch_local_syms
8076 #define elf_backend_plt_sym_val \
8077 elfNN_aarch64_plt_sym_val
8079 #define elf_backend_post_process_headers \
8080 elfNN_aarch64_post_process_headers
8082 #define elf_backend_relocate_section \
8083 elfNN_aarch64_relocate_section
8085 #define elf_backend_reloc_type_class \
8086 elfNN_aarch64_reloc_type_class
8088 #define elf_backend_section_from_shdr \
8089 elfNN_aarch64_section_from_shdr
8091 #define elf_backend_size_dynamic_sections \
8092 elfNN_aarch64_size_dynamic_sections
8094 #define elf_backend_size_info \
8095 elfNN_aarch64_size_info
8097 #define elf_backend_write_section \
8098 elfNN_aarch64_write_section
8100 #define elf_backend_can_refcount 1
8101 #define elf_backend_can_gc_sections 1
8102 #define elf_backend_plt_readonly 1
8103 #define elf_backend_want_got_plt 1
8104 #define elf_backend_want_plt_sym 0
8105 #define elf_backend_may_use_rel_p 0
8106 #define elf_backend_may_use_rela_p 1
8107 #define elf_backend_default_use_rela_p 1
8108 #define elf_backend_rela_normal 1
8109 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8110 #define elf_backend_default_execstack 0
8112 #undef elf_backend_obj_attrs_section
8113 #define elf_backend_obj_attrs_section ".ARM.attributes"
8115 #include "elfNN-target.h"