1 /* AArch64-specific support for NN-bit ELF.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
21 /* Notes on implementation:
23 Thread Local Store (TLS)
27 The implementation currently supports both traditional TLS and TLS
28 descriptors, but only general dynamic (GD).
30 For traditional TLS the assembler will present us with code
31 fragments of the form:
34 R_AARCH64_TLSGD_ADR_PAGE21(foo)
35 add x0, :tlsgd_lo12:foo
36 R_AARCH64_TLSGD_ADD_LO12_NC(foo)
40 For TLS descriptors the assembler will present us with code
41 fragments of the form:
43 adrp x0, :tlsdesc:foo R_AARCH64_TLSDESC_ADR_PAGE21(foo)
44 ldr x1, [x0, #:tlsdesc_lo12:foo] R_AARCH64_TLSDESC_LD64_LO12(foo)
45 add x0, x0, #:tlsdesc_lo12:foo R_AARCH64_TLSDESC_ADD_LO12(foo)
47 blr x1 R_AARCH64_TLSDESC_CALL(foo)
49 The relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} against foo
50 indicate that foo is thread local and should be accessed via the
51 traditional TLS mechanims.
53 The relocations R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC}
54 against foo indicate that 'foo' is thread local and should be accessed
55 via a TLS descriptor mechanism.
57 The precise instruction sequence is only relevant from the
58 perspective of linker relaxation which is currently not implemented.
60 The static linker must detect that 'foo' is a TLS object and
61 allocate a double GOT entry. The GOT entry must be created for both
62 global and local TLS symbols. Note that this is different to none
63 TLS local objects which do not need a GOT entry.
65 In the traditional TLS mechanism, the double GOT entry is used to
66 provide the tls_index structure, containing module and offset
67 entries. The static linker places the relocation R_AARCH64_TLS_DTPMOD
68 on the module entry. The loader will subsequently fixup this
69 relocation with the module identity.
71 For global traditional TLS symbols the static linker places an
72 R_AARCH64_TLS_DTPREL relocation on the offset entry. The loader
73 will subsequently fixup the offset. For local TLS symbols the static
74 linker fixes up offset.
76 In the TLS descriptor mechanism the double GOT entry is used to
77 provide the descriptor. The static linker places the relocation
78 R_AARCH64_TLSDESC on the first GOT slot. The loader will
79 subsequently fix this up.
83 The handling of TLS symbols is implemented across a number of
84 different backend functions. The following is a top level view of
85 what processing is performed where.
87 The TLS implementation maintains state information for each TLS
88 symbol. The state information for local and global symbols is kept
89 in different places. Global symbols use generic BFD structures while
90 local symbols use backend specific structures that are allocated and
91 maintained entirely by the backend.
95 elfNN_aarch64_check_relocs()
97 This function is invoked for each relocation.
99 The TLS relocations R_AARCH64_TLSGD_{ADR_PREL21,ADD_LO12_NC} and
100 R_AARCH64_TLSDESC_{ADR_PAGE21,LD64_LO12_NC,ADD_LO12_NC} are
101 spotted. One time creation of local symbol data structures are
102 created when the first local symbol is seen.
104 The reference count for a symbol is incremented. The GOT type for
105 each symbol is marked as general dynamic.
107 elfNN_aarch64_allocate_dynrelocs ()
109 For each global with positive reference count we allocate a double
110 GOT slot. For a traditional TLS symbol we allocate space for two
111 relocation entries on the GOT, for a TLS descriptor symbol we
112 allocate space for one relocation on the slot. Record the GOT offset
115 elfNN_aarch64_size_dynamic_sections ()
117 Iterate all input BFDS, look for in the local symbol data structure
118 constructed earlier for local TLS symbols and allocate them double
119 GOT slots along with space for a single GOT relocation. Update the
120 local symbol structure to record the GOT offset allocated.
122 elfNN_aarch64_relocate_section ()
124 Calls elfNN_aarch64_final_link_relocate ()
126 Emit the relevant TLS relocations against the GOT for each TLS
127 symbol. For local TLS symbols emit the GOT offset directly. The GOT
128 relocations are emitted once the first time a TLS symbol is
129 encountered. The implementation uses the LSB of the GOT offset to
130 flag that the relevant GOT relocations for a symbol have been
131 emitted. All of the TLS code that uses the GOT offset needs to take
132 care to mask out this flag bit before using the offset.
134 elfNN_aarch64_final_link_relocate ()
136 Fixup the R_AARCH64_TLSGD_{ADR_PREL21, ADD_LO12_NC} relocations. */
140 #include "libiberty.h"
142 #include "bfd_stdint.h"
145 #include "objalloc.h"
146 #include "elf/aarch64.h"
147 #include "elfxx-aarch64.h"
152 #define AARCH64_R(NAME) R_AARCH64_ ## NAME
153 #define AARCH64_R_STR(NAME) "R_AARCH64_" #NAME
154 #define HOWTO64(...) HOWTO (__VA_ARGS__)
155 #define HOWTO32(...) EMPTY_HOWTO (0)
156 #define LOG_FILE_ALIGN 3
160 #define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
161 #define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
162 #define HOWTO64(...) EMPTY_HOWTO (0)
163 #define HOWTO32(...) HOWTO (__VA_ARGS__)
164 #define LOG_FILE_ALIGN 2
167 #define IS_AARCH64_TLS_RELOC(R_TYPE) \
168 ((R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21 \
169 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADR_PREL21 \
170 || (R_TYPE) == BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC \
171 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1 \
172 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC \
173 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 \
174 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC \
175 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC \
176 || (R_TYPE) == BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19 \
177 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12 \
178 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12 \
179 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC \
180 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2 \
181 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 \
182 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC \
183 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0 \
184 || (R_TYPE) == BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC \
185 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPMOD \
186 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_DTPREL \
187 || (R_TYPE) == BFD_RELOC_AARCH64_TLS_TPREL \
188 || IS_AARCH64_TLSDESC_RELOC ((R_TYPE)))
190 #define IS_AARCH64_TLSDESC_RELOC(R_TYPE) \
191 ((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
192 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
193 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
194 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC \
195 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC \
196 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
197 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G1 \
198 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
199 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
200 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
201 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
202 || (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC)
204 #define ELIMINATE_COPY_RELOCS 0
206 /* Return size of a relocation entry. HTAB is the bfd's
207 elf_aarch64_link_hash_entry. */
208 #define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
210 /* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
211 #define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
212 #define PLT_ENTRY_SIZE (32)
213 #define PLT_SMALL_ENTRY_SIZE (16)
214 #define PLT_TLSDESC_ENTRY_SIZE (32)
216 /* Encoding of the nop instruction */
217 #define INSN_NOP 0xd503201f
219 #define aarch64_compute_jump_table_size(htab) \
220 (((htab)->root.srelplt == NULL) ? 0 \
221 : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
223 /* The first entry in a procedure linkage table looks like this
224 if the distance between the PLTGOT and the PLT is < 4GB use
225 these PLT entries. Note that the dynamic linker gets &PLTGOT[2]
226 in x16 and needs to work out PLTGOT[1] by using an address of
227 [x16,#-GOT_ENTRY_SIZE]. */
228 static const bfd_byte elfNN_aarch64_small_plt0_entry[PLT_ENTRY_SIZE] =
230 0xf0, 0x7b, 0xbf, 0xa9, /* stp x16, x30, [sp, #-16]! */
231 0x10, 0x00, 0x00, 0x90, /* adrp x16, (GOT+16) */
233 0x11, 0x0A, 0x40, 0xf9, /* ldr x17, [x16, #PLT_GOT+0x10] */
234 0x10, 0x42, 0x00, 0x91, /* add x16, x16,#PLT_GOT+0x10 */
236 0x11, 0x0A, 0x40, 0xb9, /* ldr w17, [x16, #PLT_GOT+0x8] */
237 0x10, 0x22, 0x00, 0x11, /* add w16, w16,#PLT_GOT+0x8 */
239 0x20, 0x02, 0x1f, 0xd6, /* br x17 */
240 0x1f, 0x20, 0x03, 0xd5, /* nop */
241 0x1f, 0x20, 0x03, 0xd5, /* nop */
242 0x1f, 0x20, 0x03, 0xd5, /* nop */
245 /* Per function entry in a procedure linkage table looks like this
246 if the distance between the PLTGOT and the PLT is < 4GB use
247 these PLT entries. */
248 static const bfd_byte elfNN_aarch64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
250 0x10, 0x00, 0x00, 0x90, /* adrp x16, PLTGOT + n * 8 */
252 0x11, 0x02, 0x40, 0xf9, /* ldr x17, [x16, PLTGOT + n * 8] */
253 0x10, 0x02, 0x00, 0x91, /* add x16, x16, :lo12:PLTGOT + n * 8 */
255 0x11, 0x02, 0x40, 0xb9, /* ldr w17, [x16, PLTGOT + n * 4] */
256 0x10, 0x02, 0x00, 0x11, /* add w16, w16, :lo12:PLTGOT + n * 4 */
258 0x20, 0x02, 0x1f, 0xd6, /* br x17. */
261 static const bfd_byte
262 elfNN_aarch64_tlsdesc_small_plt_entry[PLT_TLSDESC_ENTRY_SIZE] =
264 0xe2, 0x0f, 0xbf, 0xa9, /* stp x2, x3, [sp, #-16]! */
265 0x02, 0x00, 0x00, 0x90, /* adrp x2, 0 */
266 0x03, 0x00, 0x00, 0x90, /* adrp x3, 0 */
268 0x42, 0x00, 0x40, 0xf9, /* ldr x2, [x2, #0] */
269 0x63, 0x00, 0x00, 0x91, /* add x3, x3, 0 */
271 0x42, 0x00, 0x40, 0xb9, /* ldr w2, [x2, #0] */
272 0x63, 0x00, 0x00, 0x11, /* add w3, w3, 0 */
274 0x40, 0x00, 0x1f, 0xd6, /* br x2 */
275 0x1f, 0x20, 0x03, 0xd5, /* nop */
276 0x1f, 0x20, 0x03, 0xd5, /* nop */
279 #define elf_info_to_howto elfNN_aarch64_info_to_howto
280 #define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
282 #define AARCH64_ELF_ABI_VERSION 0
284 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
285 #define ALL_ONES (~ (bfd_vma) 0)
287 /* Indexed by the bfd interal reloc enumerators.
288 Therefore, the table needs to be synced with BFD_RELOC_AARCH64_*
291 static reloc_howto_type elfNN_aarch64_howto_table[] =
295 /* Basic data relocations. */
298 HOWTO (R_AARCH64_NULL, /* type */
300 3, /* size (0 = byte, 1 = short, 2 = long) */
302 FALSE, /* pc_relative */
304 complain_overflow_dont, /* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_AARCH64_NULL", /* name */
307 FALSE, /* partial_inplace */
310 FALSE), /* pcrel_offset */
312 HOWTO (R_AARCH64_NONE, /* type */
314 3, /* size (0 = byte, 1 = short, 2 = long) */
316 FALSE, /* pc_relative */
318 complain_overflow_dont, /* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_AARCH64_NONE", /* name */
321 FALSE, /* partial_inplace */
324 FALSE), /* pcrel_offset */
328 HOWTO64 (AARCH64_R (ABS64), /* type */
330 4, /* size (4 = long long) */
332 FALSE, /* pc_relative */
334 complain_overflow_unsigned, /* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 AARCH64_R_STR (ABS64), /* name */
337 FALSE, /* partial_inplace */
338 ALL_ONES, /* src_mask */
339 ALL_ONES, /* dst_mask */
340 FALSE), /* pcrel_offset */
343 HOWTO (AARCH64_R (ABS32), /* type */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
347 FALSE, /* pc_relative */
349 complain_overflow_unsigned, /* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 AARCH64_R_STR (ABS32), /* name */
352 FALSE, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 FALSE), /* pcrel_offset */
358 HOWTO (AARCH64_R (ABS16), /* type */
360 1, /* size (0 = byte, 1 = short, 2 = long) */
362 FALSE, /* pc_relative */
364 complain_overflow_unsigned, /* complain_on_overflow */
365 bfd_elf_generic_reloc, /* special_function */
366 AARCH64_R_STR (ABS16), /* name */
367 FALSE, /* partial_inplace */
368 0xffff, /* src_mask */
369 0xffff, /* dst_mask */
370 FALSE), /* pcrel_offset */
372 /* .xword: (S+A-P) */
373 HOWTO64 (AARCH64_R (PREL64), /* type */
375 4, /* size (4 = long long) */
377 TRUE, /* pc_relative */
379 complain_overflow_signed, /* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 AARCH64_R_STR (PREL64), /* name */
382 FALSE, /* partial_inplace */
383 ALL_ONES, /* src_mask */
384 ALL_ONES, /* dst_mask */
385 TRUE), /* pcrel_offset */
388 HOWTO (AARCH64_R (PREL32), /* type */
390 2, /* size (0 = byte, 1 = short, 2 = long) */
392 TRUE, /* pc_relative */
394 complain_overflow_signed, /* complain_on_overflow */
395 bfd_elf_generic_reloc, /* special_function */
396 AARCH64_R_STR (PREL32), /* name */
397 FALSE, /* partial_inplace */
398 0xffffffff, /* src_mask */
399 0xffffffff, /* dst_mask */
400 TRUE), /* pcrel_offset */
403 HOWTO (AARCH64_R (PREL16), /* type */
405 1, /* size (0 = byte, 1 = short, 2 = long) */
407 TRUE, /* pc_relative */
409 complain_overflow_signed, /* complain_on_overflow */
410 bfd_elf_generic_reloc, /* special_function */
411 AARCH64_R_STR (PREL16), /* name */
412 FALSE, /* partial_inplace */
413 0xffff, /* src_mask */
414 0xffff, /* dst_mask */
415 TRUE), /* pcrel_offset */
417 /* Group relocations to create a 16, 32, 48 or 64 bit
418 unsigned data or abs address inline. */
420 /* MOVZ: ((S+A) >> 0) & 0xffff */
421 HOWTO (AARCH64_R (MOVW_UABS_G0), /* type */
423 2, /* size (0 = byte, 1 = short, 2 = long) */
425 FALSE, /* pc_relative */
427 complain_overflow_unsigned, /* complain_on_overflow */
428 bfd_elf_generic_reloc, /* special_function */
429 AARCH64_R_STR (MOVW_UABS_G0), /* name */
430 FALSE, /* partial_inplace */
431 0xffff, /* src_mask */
432 0xffff, /* dst_mask */
433 FALSE), /* pcrel_offset */
435 /* MOVK: ((S+A) >> 0) & 0xffff [no overflow check] */
436 HOWTO (AARCH64_R (MOVW_UABS_G0_NC), /* type */
438 2, /* size (0 = byte, 1 = short, 2 = long) */
440 FALSE, /* pc_relative */
442 complain_overflow_dont, /* complain_on_overflow */
443 bfd_elf_generic_reloc, /* special_function */
444 AARCH64_R_STR (MOVW_UABS_G0_NC), /* name */
445 FALSE, /* partial_inplace */
446 0xffff, /* src_mask */
447 0xffff, /* dst_mask */
448 FALSE), /* pcrel_offset */
450 /* MOVZ: ((S+A) >> 16) & 0xffff */
451 HOWTO (AARCH64_R (MOVW_UABS_G1), /* type */
453 2, /* size (0 = byte, 1 = short, 2 = long) */
455 FALSE, /* pc_relative */
457 complain_overflow_unsigned, /* complain_on_overflow */
458 bfd_elf_generic_reloc, /* special_function */
459 AARCH64_R_STR (MOVW_UABS_G1), /* name */
460 FALSE, /* partial_inplace */
461 0xffff, /* src_mask */
462 0xffff, /* dst_mask */
463 FALSE), /* pcrel_offset */
465 /* MOVK: ((S+A) >> 16) & 0xffff [no overflow check] */
466 HOWTO64 (AARCH64_R (MOVW_UABS_G1_NC), /* type */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
470 FALSE, /* pc_relative */
472 complain_overflow_dont, /* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 AARCH64_R_STR (MOVW_UABS_G1_NC), /* name */
475 FALSE, /* partial_inplace */
476 0xffff, /* src_mask */
477 0xffff, /* dst_mask */
478 FALSE), /* pcrel_offset */
480 /* MOVZ: ((S+A) >> 32) & 0xffff */
481 HOWTO64 (AARCH64_R (MOVW_UABS_G2), /* type */
483 2, /* size (0 = byte, 1 = short, 2 = long) */
485 FALSE, /* pc_relative */
487 complain_overflow_unsigned, /* complain_on_overflow */
488 bfd_elf_generic_reloc, /* special_function */
489 AARCH64_R_STR (MOVW_UABS_G2), /* name */
490 FALSE, /* partial_inplace */
491 0xffff, /* src_mask */
492 0xffff, /* dst_mask */
493 FALSE), /* pcrel_offset */
495 /* MOVK: ((S+A) >> 32) & 0xffff [no overflow check] */
496 HOWTO64 (AARCH64_R (MOVW_UABS_G2_NC), /* type */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
500 FALSE, /* pc_relative */
502 complain_overflow_dont, /* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 AARCH64_R_STR (MOVW_UABS_G2_NC), /* name */
505 FALSE, /* partial_inplace */
506 0xffff, /* src_mask */
507 0xffff, /* dst_mask */
508 FALSE), /* pcrel_offset */
510 /* MOVZ: ((S+A) >> 48) & 0xffff */
511 HOWTO64 (AARCH64_R (MOVW_UABS_G3), /* type */
513 2, /* size (0 = byte, 1 = short, 2 = long) */
515 FALSE, /* pc_relative */
517 complain_overflow_unsigned, /* complain_on_overflow */
518 bfd_elf_generic_reloc, /* special_function */
519 AARCH64_R_STR (MOVW_UABS_G3), /* name */
520 FALSE, /* partial_inplace */
521 0xffff, /* src_mask */
522 0xffff, /* dst_mask */
523 FALSE), /* pcrel_offset */
525 /* Group relocations to create high part of a 16, 32, 48 or 64 bit
526 signed data or abs address inline. Will change instruction
527 to MOVN or MOVZ depending on sign of calculated value. */
529 /* MOV[ZN]: ((S+A) >> 0) & 0xffff */
530 HOWTO (AARCH64_R (MOVW_SABS_G0), /* type */
532 2, /* size (0 = byte, 1 = short, 2 = long) */
534 FALSE, /* pc_relative */
536 complain_overflow_signed, /* complain_on_overflow */
537 bfd_elf_generic_reloc, /* special_function */
538 AARCH64_R_STR (MOVW_SABS_G0), /* name */
539 FALSE, /* partial_inplace */
540 0xffff, /* src_mask */
541 0xffff, /* dst_mask */
542 FALSE), /* pcrel_offset */
544 /* MOV[ZN]: ((S+A) >> 16) & 0xffff */
545 HOWTO64 (AARCH64_R (MOVW_SABS_G1), /* type */
547 2, /* size (0 = byte, 1 = short, 2 = long) */
549 FALSE, /* pc_relative */
551 complain_overflow_signed, /* complain_on_overflow */
552 bfd_elf_generic_reloc, /* special_function */
553 AARCH64_R_STR (MOVW_SABS_G1), /* name */
554 FALSE, /* partial_inplace */
555 0xffff, /* src_mask */
556 0xffff, /* dst_mask */
557 FALSE), /* pcrel_offset */
559 /* MOV[ZN]: ((S+A) >> 32) & 0xffff */
560 HOWTO64 (AARCH64_R (MOVW_SABS_G2), /* type */
562 2, /* size (0 = byte, 1 = short, 2 = long) */
564 FALSE, /* pc_relative */
566 complain_overflow_signed, /* complain_on_overflow */
567 bfd_elf_generic_reloc, /* special_function */
568 AARCH64_R_STR (MOVW_SABS_G2), /* name */
569 FALSE, /* partial_inplace */
570 0xffff, /* src_mask */
571 0xffff, /* dst_mask */
572 FALSE), /* pcrel_offset */
574 /* Relocations to generate 19, 21 and 33 bit PC-relative load/store
575 addresses: PG(x) is (x & ~0xfff). */
577 /* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
578 HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
582 TRUE, /* pc_relative */
584 complain_overflow_signed, /* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 AARCH64_R_STR (LD_PREL_LO19), /* name */
587 FALSE, /* partial_inplace */
588 0x7ffff, /* src_mask */
589 0x7ffff, /* dst_mask */
590 TRUE), /* pcrel_offset */
592 /* ADR: (S+A-P) & 0x1fffff */
593 HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
595 2, /* size (0 = byte, 1 = short, 2 = long) */
597 TRUE, /* pc_relative */
599 complain_overflow_signed, /* complain_on_overflow */
600 bfd_elf_generic_reloc, /* special_function */
601 AARCH64_R_STR (ADR_PREL_LO21), /* name */
602 FALSE, /* partial_inplace */
603 0x1fffff, /* src_mask */
604 0x1fffff, /* dst_mask */
605 TRUE), /* pcrel_offset */
607 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
608 HOWTO (AARCH64_R (ADR_PREL_PG_HI21), /* type */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
612 TRUE, /* pc_relative */
614 complain_overflow_signed, /* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 AARCH64_R_STR (ADR_PREL_PG_HI21), /* name */
617 FALSE, /* partial_inplace */
618 0x1fffff, /* src_mask */
619 0x1fffff, /* dst_mask */
620 TRUE), /* pcrel_offset */
622 /* ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff [no overflow check] */
623 HOWTO64 (AARCH64_R (ADR_PREL_PG_HI21_NC), /* type */
625 2, /* size (0 = byte, 1 = short, 2 = long) */
627 TRUE, /* pc_relative */
629 complain_overflow_dont, /* complain_on_overflow */
630 bfd_elf_generic_reloc, /* special_function */
631 AARCH64_R_STR (ADR_PREL_PG_HI21_NC), /* name */
632 FALSE, /* partial_inplace */
633 0x1fffff, /* src_mask */
634 0x1fffff, /* dst_mask */
635 TRUE), /* pcrel_offset */
637 /* ADD: (S+A) & 0xfff [no overflow check] */
638 HOWTO (AARCH64_R (ADD_ABS_LO12_NC), /* type */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
642 FALSE, /* pc_relative */
644 complain_overflow_dont, /* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 AARCH64_R_STR (ADD_ABS_LO12_NC), /* name */
647 FALSE, /* partial_inplace */
648 0x3ffc00, /* src_mask */
649 0x3ffc00, /* dst_mask */
650 FALSE), /* pcrel_offset */
652 /* LD/ST8: (S+A) & 0xfff */
653 HOWTO (AARCH64_R (LDST8_ABS_LO12_NC), /* type */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
657 FALSE, /* pc_relative */
659 complain_overflow_dont, /* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 AARCH64_R_STR (LDST8_ABS_LO12_NC), /* name */
662 FALSE, /* partial_inplace */
663 0xfff, /* src_mask */
664 0xfff, /* dst_mask */
665 FALSE), /* pcrel_offset */
667 /* Relocations for control-flow instructions. */
669 /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
670 HOWTO (AARCH64_R (TSTBR14), /* type */
672 2, /* size (0 = byte, 1 = short, 2 = long) */
674 TRUE, /* pc_relative */
676 complain_overflow_signed, /* complain_on_overflow */
677 bfd_elf_generic_reloc, /* special_function */
678 AARCH64_R_STR (TSTBR14), /* name */
679 FALSE, /* partial_inplace */
680 0x3fff, /* src_mask */
681 0x3fff, /* dst_mask */
682 TRUE), /* pcrel_offset */
684 /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
685 HOWTO (AARCH64_R (CONDBR19), /* type */
687 2, /* size (0 = byte, 1 = short, 2 = long) */
689 TRUE, /* pc_relative */
691 complain_overflow_signed, /* complain_on_overflow */
692 bfd_elf_generic_reloc, /* special_function */
693 AARCH64_R_STR (CONDBR19), /* name */
694 FALSE, /* partial_inplace */
695 0x7ffff, /* src_mask */
696 0x7ffff, /* dst_mask */
697 TRUE), /* pcrel_offset */
699 /* B: ((S+A-P) >> 2) & 0x3ffffff */
700 HOWTO (AARCH64_R (JUMP26), /* type */
702 2, /* size (0 = byte, 1 = short, 2 = long) */
704 TRUE, /* pc_relative */
706 complain_overflow_signed, /* complain_on_overflow */
707 bfd_elf_generic_reloc, /* special_function */
708 AARCH64_R_STR (JUMP26), /* name */
709 FALSE, /* partial_inplace */
710 0x3ffffff, /* src_mask */
711 0x3ffffff, /* dst_mask */
712 TRUE), /* pcrel_offset */
714 /* BL: ((S+A-P) >> 2) & 0x3ffffff */
715 HOWTO (AARCH64_R (CALL26), /* type */
717 2, /* size (0 = byte, 1 = short, 2 = long) */
719 TRUE, /* pc_relative */
721 complain_overflow_signed, /* complain_on_overflow */
722 bfd_elf_generic_reloc, /* special_function */
723 AARCH64_R_STR (CALL26), /* name */
724 FALSE, /* partial_inplace */
725 0x3ffffff, /* src_mask */
726 0x3ffffff, /* dst_mask */
727 TRUE), /* pcrel_offset */
729 /* LD/ST16: (S+A) & 0xffe */
730 HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
732 2, /* size (0 = byte, 1 = short, 2 = long) */
734 FALSE, /* pc_relative */
736 complain_overflow_dont, /* complain_on_overflow */
737 bfd_elf_generic_reloc, /* special_function */
738 AARCH64_R_STR (LDST16_ABS_LO12_NC), /* name */
739 FALSE, /* partial_inplace */
740 0xffe, /* src_mask */
741 0xffe, /* dst_mask */
742 FALSE), /* pcrel_offset */
744 /* LD/ST32: (S+A) & 0xffc */
745 HOWTO (AARCH64_R (LDST32_ABS_LO12_NC), /* type */
747 2, /* size (0 = byte, 1 = short, 2 = long) */
749 FALSE, /* pc_relative */
751 complain_overflow_dont, /* complain_on_overflow */
752 bfd_elf_generic_reloc, /* special_function */
753 AARCH64_R_STR (LDST32_ABS_LO12_NC), /* name */
754 FALSE, /* partial_inplace */
755 0xffc, /* src_mask */
756 0xffc, /* dst_mask */
757 FALSE), /* pcrel_offset */
759 /* LD/ST64: (S+A) & 0xff8 */
760 HOWTO (AARCH64_R (LDST64_ABS_LO12_NC), /* type */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
764 FALSE, /* pc_relative */
766 complain_overflow_dont, /* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 AARCH64_R_STR (LDST64_ABS_LO12_NC), /* name */
769 FALSE, /* partial_inplace */
770 0xff8, /* src_mask */
771 0xff8, /* dst_mask */
772 FALSE), /* pcrel_offset */
774 /* LD/ST128: (S+A) & 0xff0 */
775 HOWTO (AARCH64_R (LDST128_ABS_LO12_NC), /* type */
777 2, /* size (0 = byte, 1 = short, 2 = long) */
779 FALSE, /* pc_relative */
781 complain_overflow_dont, /* complain_on_overflow */
782 bfd_elf_generic_reloc, /* special_function */
783 AARCH64_R_STR (LDST128_ABS_LO12_NC), /* name */
784 FALSE, /* partial_inplace */
785 0xff0, /* src_mask */
786 0xff0, /* dst_mask */
787 FALSE), /* pcrel_offset */
789 /* Set a load-literal immediate field to bits
790 0x1FFFFC of G(S)-P */
791 HOWTO (AARCH64_R (GOT_LD_PREL19), /* type */
793 2, /* size (0 = byte,1 = short,2 = long) */
795 TRUE, /* pc_relative */
797 complain_overflow_signed, /* complain_on_overflow */
798 bfd_elf_generic_reloc, /* special_function */
799 AARCH64_R_STR (GOT_LD_PREL19), /* name */
800 FALSE, /* partial_inplace */
801 0xffffe0, /* src_mask */
802 0xffffe0, /* dst_mask */
803 TRUE), /* pcrel_offset */
805 /* Get to the page for the GOT entry for the symbol
806 (G(S) - P) using an ADRP instruction. */
807 HOWTO (AARCH64_R (ADR_GOT_PAGE), /* type */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
811 TRUE, /* pc_relative */
813 complain_overflow_dont, /* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 AARCH64_R_STR (ADR_GOT_PAGE), /* name */
816 FALSE, /* partial_inplace */
817 0x1fffff, /* src_mask */
818 0x1fffff, /* dst_mask */
819 TRUE), /* pcrel_offset */
821 /* LD64: GOT offset G(S) & 0xff8 */
822 HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
824 2, /* size (0 = byte, 1 = short, 2 = long) */
826 FALSE, /* pc_relative */
828 complain_overflow_dont, /* complain_on_overflow */
829 bfd_elf_generic_reloc, /* special_function */
830 AARCH64_R_STR (LD64_GOT_LO12_NC), /* name */
831 FALSE, /* partial_inplace */
832 0xff8, /* src_mask */
833 0xff8, /* dst_mask */
834 FALSE), /* pcrel_offset */
836 /* LD32: GOT offset G(S) & 0xffc */
837 HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
841 FALSE, /* pc_relative */
843 complain_overflow_dont, /* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 AARCH64_R_STR (LD32_GOT_LO12_NC), /* name */
846 FALSE, /* partial_inplace */
847 0xffc, /* src_mask */
848 0xffc, /* dst_mask */
849 FALSE), /* pcrel_offset */
851 /* Get to the page for the GOT entry for the symbol
852 (G(S) - P) using an ADRP instruction. */
853 HOWTO (AARCH64_R (TLSGD_ADR_PAGE21), /* type */
855 2, /* size (0 = byte, 1 = short, 2 = long) */
857 TRUE, /* pc_relative */
859 complain_overflow_dont, /* complain_on_overflow */
860 bfd_elf_generic_reloc, /* special_function */
861 AARCH64_R_STR (TLSGD_ADR_PAGE21), /* name */
862 FALSE, /* partial_inplace */
863 0x1fffff, /* src_mask */
864 0x1fffff, /* dst_mask */
865 TRUE), /* pcrel_offset */
867 HOWTO (AARCH64_R (TLSGD_ADR_PREL21), /* type */
869 2, /* size (0 = byte, 1 = short, 2 = long) */
871 TRUE, /* pc_relative */
873 complain_overflow_dont, /* complain_on_overflow */
874 bfd_elf_generic_reloc, /* special_function */
875 AARCH64_R_STR (TLSGD_ADR_PREL21), /* name */
876 FALSE, /* partial_inplace */
877 0x1fffff, /* src_mask */
878 0x1fffff, /* dst_mask */
879 TRUE), /* pcrel_offset */
881 /* ADD: GOT offset G(S) & 0xff8 [no overflow check] */
882 HOWTO (AARCH64_R (TLSGD_ADD_LO12_NC), /* type */
884 2, /* size (0 = byte, 1 = short, 2 = long) */
886 FALSE, /* pc_relative */
888 complain_overflow_dont, /* complain_on_overflow */
889 bfd_elf_generic_reloc, /* special_function */
890 AARCH64_R_STR (TLSGD_ADD_LO12_NC), /* name */
891 FALSE, /* partial_inplace */
892 0xfff, /* src_mask */
893 0xfff, /* dst_mask */
894 FALSE), /* pcrel_offset */
896 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G1), /* type */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
900 FALSE, /* pc_relative */
902 complain_overflow_dont, /* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G1), /* name */
905 FALSE, /* partial_inplace */
906 0xffff, /* src_mask */
907 0xffff, /* dst_mask */
908 FALSE), /* pcrel_offset */
910 HOWTO64 (AARCH64_R (TLSIE_MOVW_GOTTPREL_G0_NC), /* type */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
914 FALSE, /* pc_relative */
916 complain_overflow_dont, /* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 AARCH64_R_STR (TLSIE_MOVW_GOTTPREL_G0_NC), /* name */
919 FALSE, /* partial_inplace */
920 0xffff, /* src_mask */
921 0xffff, /* dst_mask */
922 FALSE), /* pcrel_offset */
924 HOWTO (AARCH64_R (TLSIE_ADR_GOTTPREL_PAGE21), /* type */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
928 FALSE, /* pc_relative */
930 complain_overflow_dont, /* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 AARCH64_R_STR (TLSIE_ADR_GOTTPREL_PAGE21), /* name */
933 FALSE, /* partial_inplace */
934 0x1fffff, /* src_mask */
935 0x1fffff, /* dst_mask */
936 FALSE), /* pcrel_offset */
938 HOWTO64 (AARCH64_R (TLSIE_LD64_GOTTPREL_LO12_NC), /* type */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
942 FALSE, /* pc_relative */
944 complain_overflow_dont, /* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 AARCH64_R_STR (TLSIE_LD64_GOTTPREL_LO12_NC), /* name */
947 FALSE, /* partial_inplace */
948 0xff8, /* src_mask */
949 0xff8, /* dst_mask */
950 FALSE), /* pcrel_offset */
952 HOWTO32 (AARCH64_R (TLSIE_LD32_GOTTPREL_LO12_NC), /* type */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
956 FALSE, /* pc_relative */
958 complain_overflow_dont, /* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 AARCH64_R_STR (TLSIE_LD32_GOTTPREL_LO12_NC), /* name */
961 FALSE, /* partial_inplace */
962 0xffc, /* src_mask */
963 0xffc, /* dst_mask */
964 FALSE), /* pcrel_offset */
966 HOWTO (AARCH64_R (TLSIE_LD_GOTTPREL_PREL19), /* type */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
970 FALSE, /* pc_relative */
972 complain_overflow_dont, /* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 AARCH64_R_STR (TLSIE_LD_GOTTPREL_PREL19), /* name */
975 FALSE, /* partial_inplace */
976 0x1ffffc, /* src_mask */
977 0x1ffffc, /* dst_mask */
978 FALSE), /* pcrel_offset */
980 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G2), /* type */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
984 FALSE, /* pc_relative */
986 complain_overflow_unsigned, /* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 AARCH64_R_STR (TLSLE_MOVW_TPREL_G2), /* name */
989 FALSE, /* partial_inplace */
990 0xffff, /* src_mask */
991 0xffff, /* dst_mask */
992 FALSE), /* pcrel_offset */
994 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G1), /* type */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
998 FALSE, /* pc_relative */
1000 complain_overflow_dont, /* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1), /* name */
1003 FALSE, /* partial_inplace */
1004 0xffff, /* src_mask */
1005 0xffff, /* dst_mask */
1006 FALSE), /* pcrel_offset */
1008 HOWTO64 (AARCH64_R (TLSLE_MOVW_TPREL_G1_NC), /* type */
1009 16, /* rightshift */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1012 FALSE, /* pc_relative */
1014 complain_overflow_dont, /* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 AARCH64_R_STR (TLSLE_MOVW_TPREL_G1_NC), /* name */
1017 FALSE, /* partial_inplace */
1018 0xffff, /* src_mask */
1019 0xffff, /* dst_mask */
1020 FALSE), /* pcrel_offset */
1022 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0), /* type */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1026 FALSE, /* pc_relative */
1028 complain_overflow_dont, /* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0), /* name */
1031 FALSE, /* partial_inplace */
1032 0xffff, /* src_mask */
1033 0xffff, /* dst_mask */
1034 FALSE), /* pcrel_offset */
1036 HOWTO (AARCH64_R (TLSLE_MOVW_TPREL_G0_NC), /* type */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1040 FALSE, /* pc_relative */
1042 complain_overflow_dont, /* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 AARCH64_R_STR (TLSLE_MOVW_TPREL_G0_NC), /* name */
1045 FALSE, /* partial_inplace */
1046 0xffff, /* src_mask */
1047 0xffff, /* dst_mask */
1048 FALSE), /* pcrel_offset */
1050 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_HI12), /* type */
1051 12, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1054 FALSE, /* pc_relative */
1056 complain_overflow_unsigned, /* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 AARCH64_R_STR (TLSLE_ADD_TPREL_HI12), /* name */
1059 FALSE, /* partial_inplace */
1060 0xfff, /* src_mask */
1061 0xfff, /* dst_mask */
1062 FALSE), /* pcrel_offset */
1064 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12), /* type */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1068 FALSE, /* pc_relative */
1070 complain_overflow_dont, /* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12), /* name */
1073 FALSE, /* partial_inplace */
1074 0xfff, /* src_mask */
1075 0xfff, /* dst_mask */
1076 FALSE), /* pcrel_offset */
1078 HOWTO (AARCH64_R (TLSLE_ADD_TPREL_LO12_NC), /* type */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1082 FALSE, /* pc_relative */
1084 complain_overflow_dont, /* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 AARCH64_R_STR (TLSLE_ADD_TPREL_LO12_NC), /* name */
1087 FALSE, /* partial_inplace */
1088 0xfff, /* src_mask */
1089 0xfff, /* dst_mask */
1090 FALSE), /* pcrel_offset */
1092 HOWTO (AARCH64_R (TLSDESC_LD_PREL19), /* type */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1096 TRUE, /* pc_relative */
1098 complain_overflow_dont, /* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 AARCH64_R_STR (TLSDESC_LD_PREL19), /* name */
1101 FALSE, /* partial_inplace */
1102 0x0ffffe0, /* src_mask */
1103 0x0ffffe0, /* dst_mask */
1104 TRUE), /* pcrel_offset */
1106 HOWTO (AARCH64_R (TLSDESC_ADR_PREL21), /* type */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1110 TRUE, /* pc_relative */
1112 complain_overflow_dont, /* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 AARCH64_R_STR (TLSDESC_ADR_PREL21), /* name */
1115 FALSE, /* partial_inplace */
1116 0x1fffff, /* src_mask */
1117 0x1fffff, /* dst_mask */
1118 TRUE), /* pcrel_offset */
1120 /* Get to the page for the GOT entry for the symbol
1121 (G(S) - P) using an ADRP instruction. */
1122 HOWTO (AARCH64_R (TLSDESC_ADR_PAGE21), /* type */
1123 12, /* rightshift */
1124 2, /* size (0 = byte, 1 = short, 2 = long) */
1126 TRUE, /* pc_relative */
1128 complain_overflow_dont, /* complain_on_overflow */
1129 bfd_elf_generic_reloc, /* special_function */
1130 AARCH64_R_STR (TLSDESC_ADR_PAGE21), /* name */
1131 FALSE, /* partial_inplace */
1132 0x1fffff, /* src_mask */
1133 0x1fffff, /* dst_mask */
1134 TRUE), /* pcrel_offset */
1136 /* LD64: GOT offset G(S) & 0xff8. */
1137 HOWTO64 (AARCH64_R (TLSDESC_LD64_LO12_NC), /* type */
1139 2, /* size (0 = byte, 1 = short, 2 = long) */
1141 FALSE, /* pc_relative */
1143 complain_overflow_dont, /* complain_on_overflow */
1144 bfd_elf_generic_reloc, /* special_function */
1145 AARCH64_R_STR (TLSDESC_LD64_LO12_NC), /* name */
1146 FALSE, /* partial_inplace */
1147 0xff8, /* src_mask */
1148 0xff8, /* dst_mask */
1149 FALSE), /* pcrel_offset */
1151 /* LD32: GOT offset G(S) & 0xffc. */
1152 HOWTO32 (AARCH64_R (TLSDESC_LD32_LO12_NC), /* type */
1154 2, /* size (0 = byte, 1 = short, 2 = long) */
1156 FALSE, /* pc_relative */
1158 complain_overflow_dont, /* complain_on_overflow */
1159 bfd_elf_generic_reloc, /* special_function */
1160 AARCH64_R_STR (TLSDESC_LD32_LO12_NC), /* name */
1161 FALSE, /* partial_inplace */
1162 0xffc, /* src_mask */
1163 0xffc, /* dst_mask */
1164 FALSE), /* pcrel_offset */
1166 /* ADD: GOT offset G(S) & 0xfff. */
1167 HOWTO (AARCH64_R (TLSDESC_ADD_LO12_NC), /* type */
1169 2, /* size (0 = byte, 1 = short, 2 = long) */
1171 FALSE, /* pc_relative */
1173 complain_overflow_dont, /* complain_on_overflow */
1174 bfd_elf_generic_reloc, /* special_function */
1175 AARCH64_R_STR (TLSDESC_ADD_LO12_NC), /* name */
1176 FALSE, /* partial_inplace */
1177 0xfff, /* src_mask */
1178 0xfff, /* dst_mask */
1179 FALSE), /* pcrel_offset */
1181 HOWTO64 (AARCH64_R (TLSDESC_OFF_G1), /* type */
1182 16, /* rightshift */
1183 2, /* size (0 = byte, 1 = short, 2 = long) */
1185 FALSE, /* pc_relative */
1187 complain_overflow_dont, /* complain_on_overflow */
1188 bfd_elf_generic_reloc, /* special_function */
1189 AARCH64_R_STR (TLSDESC_OFF_G1), /* name */
1190 FALSE, /* partial_inplace */
1191 0xffff, /* src_mask */
1192 0xffff, /* dst_mask */
1193 FALSE), /* pcrel_offset */
1195 HOWTO64 (AARCH64_R (TLSDESC_OFF_G0_NC), /* type */
1197 2, /* size (0 = byte, 1 = short, 2 = long) */
1199 FALSE, /* pc_relative */
1201 complain_overflow_dont, /* complain_on_overflow */
1202 bfd_elf_generic_reloc, /* special_function */
1203 AARCH64_R_STR (TLSDESC_OFF_G0_NC), /* name */
1204 FALSE, /* partial_inplace */
1205 0xffff, /* src_mask */
1206 0xffff, /* dst_mask */
1207 FALSE), /* pcrel_offset */
1209 HOWTO64 (AARCH64_R (TLSDESC_LDR), /* type */
1211 2, /* size (0 = byte, 1 = short, 2 = long) */
1213 FALSE, /* pc_relative */
1215 complain_overflow_dont, /* complain_on_overflow */
1216 bfd_elf_generic_reloc, /* special_function */
1217 AARCH64_R_STR (TLSDESC_LDR), /* name */
1218 FALSE, /* partial_inplace */
1221 FALSE), /* pcrel_offset */
1223 HOWTO64 (AARCH64_R (TLSDESC_ADD), /* type */
1225 2, /* size (0 = byte, 1 = short, 2 = long) */
1227 FALSE, /* pc_relative */
1229 complain_overflow_dont, /* complain_on_overflow */
1230 bfd_elf_generic_reloc, /* special_function */
1231 AARCH64_R_STR (TLSDESC_ADD), /* name */
1232 FALSE, /* partial_inplace */
1235 FALSE), /* pcrel_offset */
1237 HOWTO (AARCH64_R (TLSDESC_CALL), /* type */
1239 2, /* size (0 = byte, 1 = short, 2 = long) */
1241 FALSE, /* pc_relative */
1243 complain_overflow_dont, /* complain_on_overflow */
1244 bfd_elf_generic_reloc, /* special_function */
1245 AARCH64_R_STR (TLSDESC_CALL), /* name */
1246 FALSE, /* partial_inplace */
1249 FALSE), /* pcrel_offset */
1251 HOWTO (AARCH64_R (COPY), /* type */
1253 2, /* size (0 = byte, 1 = short, 2 = long) */
1255 FALSE, /* pc_relative */
1257 complain_overflow_bitfield, /* complain_on_overflow */
1258 bfd_elf_generic_reloc, /* special_function */
1259 AARCH64_R_STR (COPY), /* name */
1260 TRUE, /* partial_inplace */
1261 0xffffffff, /* src_mask */
1262 0xffffffff, /* dst_mask */
1263 FALSE), /* pcrel_offset */
1265 HOWTO (AARCH64_R (GLOB_DAT), /* type */
1267 2, /* size (0 = byte, 1 = short, 2 = long) */
1269 FALSE, /* pc_relative */
1271 complain_overflow_bitfield, /* complain_on_overflow */
1272 bfd_elf_generic_reloc, /* special_function */
1273 AARCH64_R_STR (GLOB_DAT), /* name */
1274 TRUE, /* partial_inplace */
1275 0xffffffff, /* src_mask */
1276 0xffffffff, /* dst_mask */
1277 FALSE), /* pcrel_offset */
1279 HOWTO (AARCH64_R (JUMP_SLOT), /* type */
1281 2, /* size (0 = byte, 1 = short, 2 = long) */
1283 FALSE, /* pc_relative */
1285 complain_overflow_bitfield, /* complain_on_overflow */
1286 bfd_elf_generic_reloc, /* special_function */
1287 AARCH64_R_STR (JUMP_SLOT), /* name */
1288 TRUE, /* partial_inplace */
1289 0xffffffff, /* src_mask */
1290 0xffffffff, /* dst_mask */
1291 FALSE), /* pcrel_offset */
1293 HOWTO (AARCH64_R (RELATIVE), /* type */
1295 2, /* size (0 = byte, 1 = short, 2 = long) */
1297 FALSE, /* pc_relative */
1299 complain_overflow_bitfield, /* complain_on_overflow */
1300 bfd_elf_generic_reloc, /* special_function */
1301 AARCH64_R_STR (RELATIVE), /* name */
1302 TRUE, /* partial_inplace */
1303 ALL_ONES, /* src_mask */
1304 ALL_ONES, /* dst_mask */
1305 FALSE), /* pcrel_offset */
1307 HOWTO (AARCH64_R (TLS_DTPMOD), /* type */
1309 2, /* size (0 = byte, 1 = short, 2 = long) */
1311 FALSE, /* pc_relative */
1313 complain_overflow_dont, /* complain_on_overflow */
1314 bfd_elf_generic_reloc, /* special_function */
1316 AARCH64_R_STR (TLS_DTPMOD64), /* name */
1318 AARCH64_R_STR (TLS_DTPMOD), /* name */
1320 FALSE, /* partial_inplace */
1322 ALL_ONES, /* dst_mask */
1323 FALSE), /* pc_reloffset */
1325 HOWTO (AARCH64_R (TLS_DTPREL), /* type */
1327 2, /* size (0 = byte, 1 = short, 2 = long) */
1329 FALSE, /* pc_relative */
1331 complain_overflow_dont, /* complain_on_overflow */
1332 bfd_elf_generic_reloc, /* special_function */
1334 AARCH64_R_STR (TLS_DTPREL64), /* name */
1336 AARCH64_R_STR (TLS_DTPREL), /* name */
1338 FALSE, /* partial_inplace */
1340 ALL_ONES, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1343 HOWTO (AARCH64_R (TLS_TPREL), /* type */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1347 FALSE, /* pc_relative */
1349 complain_overflow_dont, /* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1352 AARCH64_R_STR (TLS_TPREL64), /* name */
1354 AARCH64_R_STR (TLS_TPREL), /* name */
1356 FALSE, /* partial_inplace */
1358 ALL_ONES, /* dst_mask */
1359 FALSE), /* pcrel_offset */
1361 HOWTO (AARCH64_R (TLSDESC), /* type */
1363 2, /* size (0 = byte, 1 = short, 2 = long) */
1365 FALSE, /* pc_relative */
1367 complain_overflow_dont, /* complain_on_overflow */
1368 bfd_elf_generic_reloc, /* special_function */
1369 AARCH64_R_STR (TLSDESC), /* name */
1370 FALSE, /* partial_inplace */
1372 ALL_ONES, /* dst_mask */
1373 FALSE), /* pcrel_offset */
1375 HOWTO (AARCH64_R (IRELATIVE), /* type */
1377 2, /* size (0 = byte, 1 = short, 2 = long) */
1379 FALSE, /* pc_relative */
1381 complain_overflow_bitfield, /* complain_on_overflow */
1382 bfd_elf_generic_reloc, /* special_function */
1383 AARCH64_R_STR (IRELATIVE), /* name */
1384 FALSE, /* partial_inplace */
1386 ALL_ONES, /* dst_mask */
1387 FALSE), /* pcrel_offset */
1392 static reloc_howto_type elfNN_aarch64_howto_none =
1393 HOWTO (R_AARCH64_NONE, /* type */
1395 3, /* size (0 = byte, 1 = short, 2 = long) */
1397 FALSE, /* pc_relative */
1399 complain_overflow_dont,/* complain_on_overflow */
1400 bfd_elf_generic_reloc, /* special_function */
1401 "R_AARCH64_NONE", /* name */
1402 FALSE, /* partial_inplace */
1405 FALSE); /* pcrel_offset */
1407 /* Given HOWTO, return the bfd internal relocation enumerator. */
1409 static bfd_reloc_code_real_type
1410 elfNN_aarch64_bfd_reloc_from_howto (reloc_howto_type *howto)
1413 = (int) ARRAY_SIZE (elfNN_aarch64_howto_table);
1414 const ptrdiff_t offset
1415 = howto - elfNN_aarch64_howto_table;
1417 if (offset > 0 && offset < size - 1)
1418 return BFD_RELOC_AARCH64_RELOC_START + offset;
1420 if (howto == &elfNN_aarch64_howto_none)
1421 return BFD_RELOC_AARCH64_NONE;
1423 return BFD_RELOC_AARCH64_RELOC_START;
1426 /* Given R_TYPE, return the bfd internal relocation enumerator. */
1428 static bfd_reloc_code_real_type
1429 elfNN_aarch64_bfd_reloc_from_type (unsigned int r_type)
1431 static bfd_boolean initialized_p = FALSE;
1432 /* Indexed by R_TYPE, values are offsets in the howto_table. */
1433 static unsigned int offsets[R_AARCH64_end];
1435 if (initialized_p == FALSE)
1439 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1440 if (elfNN_aarch64_howto_table[i].type != 0)
1441 offsets[elfNN_aarch64_howto_table[i].type] = i;
1443 initialized_p = TRUE;
1446 if (r_type == R_AARCH64_NONE || r_type == R_AARCH64_NULL)
1447 return BFD_RELOC_AARCH64_NONE;
1449 /* PR 17512: file: b371e70a. */
1450 if (r_type >= R_AARCH64_end)
1452 _bfd_error_handler (_("Invalid AArch64 reloc number: %d"), r_type);
1453 bfd_set_error (bfd_error_bad_value);
1454 return BFD_RELOC_AARCH64_NONE;
1457 return BFD_RELOC_AARCH64_RELOC_START + offsets[r_type];
1460 struct elf_aarch64_reloc_map
1462 bfd_reloc_code_real_type from;
1463 bfd_reloc_code_real_type to;
1466 /* Map bfd generic reloc to AArch64-specific reloc. */
1467 static const struct elf_aarch64_reloc_map elf_aarch64_reloc_map[] =
1469 {BFD_RELOC_NONE, BFD_RELOC_AARCH64_NONE},
1471 /* Basic data relocations. */
1472 {BFD_RELOC_CTOR, BFD_RELOC_AARCH64_NN},
1473 {BFD_RELOC_64, BFD_RELOC_AARCH64_64},
1474 {BFD_RELOC_32, BFD_RELOC_AARCH64_32},
1475 {BFD_RELOC_16, BFD_RELOC_AARCH64_16},
1476 {BFD_RELOC_64_PCREL, BFD_RELOC_AARCH64_64_PCREL},
1477 {BFD_RELOC_32_PCREL, BFD_RELOC_AARCH64_32_PCREL},
1478 {BFD_RELOC_16_PCREL, BFD_RELOC_AARCH64_16_PCREL},
1481 /* Given the bfd internal relocation enumerator in CODE, return the
1482 corresponding howto entry. */
1484 static reloc_howto_type *
1485 elfNN_aarch64_howto_from_bfd_reloc (bfd_reloc_code_real_type code)
1489 /* Convert bfd generic reloc to AArch64-specific reloc. */
1490 if (code < BFD_RELOC_AARCH64_RELOC_START
1491 || code > BFD_RELOC_AARCH64_RELOC_END)
1492 for (i = 0; i < ARRAY_SIZE (elf_aarch64_reloc_map); i++)
1493 if (elf_aarch64_reloc_map[i].from == code)
1495 code = elf_aarch64_reloc_map[i].to;
1499 if (code > BFD_RELOC_AARCH64_RELOC_START
1500 && code < BFD_RELOC_AARCH64_RELOC_END)
1501 if (elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START].type)
1502 return &elfNN_aarch64_howto_table[code - BFD_RELOC_AARCH64_RELOC_START];
1504 if (code == BFD_RELOC_AARCH64_NONE)
1505 return &elfNN_aarch64_howto_none;
1510 static reloc_howto_type *
1511 elfNN_aarch64_howto_from_type (unsigned int r_type)
1513 bfd_reloc_code_real_type val;
1514 reloc_howto_type *howto;
1519 bfd_set_error (bfd_error_bad_value);
1524 if (r_type == R_AARCH64_NONE)
1525 return &elfNN_aarch64_howto_none;
1527 val = elfNN_aarch64_bfd_reloc_from_type (r_type);
1528 howto = elfNN_aarch64_howto_from_bfd_reloc (val);
1533 bfd_set_error (bfd_error_bad_value);
1538 elfNN_aarch64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *bfd_reloc,
1539 Elf_Internal_Rela *elf_reloc)
1541 unsigned int r_type;
1543 r_type = ELFNN_R_TYPE (elf_reloc->r_info);
1544 bfd_reloc->howto = elfNN_aarch64_howto_from_type (r_type);
1547 static reloc_howto_type *
1548 elfNN_aarch64_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1549 bfd_reloc_code_real_type code)
1551 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (code);
1556 bfd_set_error (bfd_error_bad_value);
1560 static reloc_howto_type *
1561 elfNN_aarch64_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1566 for (i = 1; i < ARRAY_SIZE (elfNN_aarch64_howto_table) - 1; ++i)
1567 if (elfNN_aarch64_howto_table[i].name != NULL
1568 && strcasecmp (elfNN_aarch64_howto_table[i].name, r_name) == 0)
1569 return &elfNN_aarch64_howto_table[i];
1574 #define TARGET_LITTLE_SYM aarch64_elfNN_le_vec
1575 #define TARGET_LITTLE_NAME "elfNN-littleaarch64"
1576 #define TARGET_BIG_SYM aarch64_elfNN_be_vec
1577 #define TARGET_BIG_NAME "elfNN-bigaarch64"
1579 /* The linker script knows the section names for placement.
1580 The entry_names are used to do simple name mangling on the stubs.
1581 Given a function name, and its type, the stub can be found. The
1582 name can be changed. The only requirement is the %s be present. */
1583 #define STUB_ENTRY_NAME "__%s_veneer"
1585 /* The name of the dynamic interpreter. This is put in the .interp
1587 #define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
1589 #define AARCH64_MAX_FWD_BRANCH_OFFSET \
1590 (((1 << 25) - 1) << 2)
1591 #define AARCH64_MAX_BWD_BRANCH_OFFSET \
1594 #define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
1595 #define AARCH64_MIN_ADRP_IMM (-(1 << 20))
1598 aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
1600 bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
1601 return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
1605 aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
1607 bfd_signed_vma offset = (bfd_signed_vma) (value - place);
1608 return (offset <= AARCH64_MAX_FWD_BRANCH_OFFSET
1609 && offset >= AARCH64_MAX_BWD_BRANCH_OFFSET);
1612 static const uint32_t aarch64_adrp_branch_stub [] =
1614 0x90000010, /* adrp ip0, X */
1615 /* R_AARCH64_ADR_HI21_PCREL(X) */
1616 0x91000210, /* add ip0, ip0, :lo12:X */
1617 /* R_AARCH64_ADD_ABS_LO12_NC(X) */
1618 0xd61f0200, /* br ip0 */
1621 static const uint32_t aarch64_long_branch_stub[] =
1624 0x58000090, /* ldr ip0, 1f */
1626 0x18000090, /* ldr wip0, 1f */
1628 0x10000011, /* adr ip1, #0 */
1629 0x8b110210, /* add ip0, ip0, ip1 */
1630 0xd61f0200, /* br ip0 */
1631 0x00000000, /* 1: .xword or .word
1632 R_AARCH64_PRELNN(X) + 12
1637 static const uint32_t aarch64_erratum_835769_stub[] =
1639 0x00000000, /* Placeholder for multiply accumulate. */
1640 0x14000000, /* b <label> */
1643 static const uint32_t aarch64_erratum_843419_stub[] =
1645 0x00000000, /* Placeholder for LDR instruction. */
1646 0x14000000, /* b <label> */
1649 /* Section name for stubs is the associated section name plus this
1651 #define STUB_SUFFIX ".stub"
1653 enum elf_aarch64_stub_type
1656 aarch64_stub_adrp_branch,
1657 aarch64_stub_long_branch,
1658 aarch64_stub_erratum_835769_veneer,
1659 aarch64_stub_erratum_843419_veneer,
1662 struct elf_aarch64_stub_hash_entry
1664 /* Base hash table entry structure. */
1665 struct bfd_hash_entry root;
1667 /* The stub section. */
1670 /* Offset within stub_sec of the beginning of this stub. */
1671 bfd_vma stub_offset;
1673 /* Given the symbol's value and its section we can determine its final
1674 value when building the stubs (so the stub knows where to jump). */
1675 bfd_vma target_value;
1676 asection *target_section;
1678 enum elf_aarch64_stub_type stub_type;
1680 /* The symbol table entry, if any, that this was derived from. */
1681 struct elf_aarch64_link_hash_entry *h;
1683 /* Destination symbol type */
1684 unsigned char st_type;
1686 /* Where this stub is being called from, or, in the case of combined
1687 stub sections, the first input section in the group. */
1690 /* The name for the local symbol at the start of this stub. The
1691 stub name in the hash table has to be unique; this does not, so
1692 it can be friendlier. */
1695 /* The instruction which caused this stub to be generated (only valid for
1696 erratum 835769 workaround stubs at present). */
1697 uint32_t veneered_insn;
1699 /* In an erratum 843419 workaround stub, the ADRP instruction offset. */
1700 bfd_vma adrp_offset;
1703 /* Used to build a map of a section. This is required for mixed-endian
1706 typedef struct elf_elf_section_map
1711 elf_aarch64_section_map;
1714 typedef struct _aarch64_elf_section_data
1716 struct bfd_elf_section_data elf;
1717 unsigned int mapcount;
1718 unsigned int mapsize;
1719 elf_aarch64_section_map *map;
1721 _aarch64_elf_section_data;
1723 #define elf_aarch64_section_data(sec) \
1724 ((_aarch64_elf_section_data *) elf_section_data (sec))
1726 /* The size of the thread control block which is defined to be two pointers. */
1727 #define TCB_SIZE (ARCH_SIZE/8)*2
1729 struct elf_aarch64_local_symbol
1731 unsigned int got_type;
1732 bfd_signed_vma got_refcount;
1735 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The
1736 offset is from the end of the jump table and reserved entries
1739 The magic value (bfd_vma) -1 indicates that an offset has not be
1741 bfd_vma tlsdesc_got_jump_table_offset;
1744 struct elf_aarch64_obj_tdata
1746 struct elf_obj_tdata root;
1748 /* local symbol descriptors */
1749 struct elf_aarch64_local_symbol *locals;
1751 /* Zero to warn when linking objects with incompatible enum sizes. */
1752 int no_enum_size_warning;
1754 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
1755 int no_wchar_size_warning;
1758 #define elf_aarch64_tdata(bfd) \
1759 ((struct elf_aarch64_obj_tdata *) (bfd)->tdata.any)
1761 #define elf_aarch64_locals(bfd) (elf_aarch64_tdata (bfd)->locals)
1763 #define is_aarch64_elf(bfd) \
1764 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
1765 && elf_tdata (bfd) != NULL \
1766 && elf_object_id (bfd) == AARCH64_ELF_DATA)
1769 elfNN_aarch64_mkobject (bfd *abfd)
1771 return bfd_elf_allocate_object (abfd, sizeof (struct elf_aarch64_obj_tdata),
1775 #define elf_aarch64_hash_entry(ent) \
1776 ((struct elf_aarch64_link_hash_entry *)(ent))
1778 #define GOT_UNKNOWN 0
1779 #define GOT_NORMAL 1
1780 #define GOT_TLS_GD 2
1781 #define GOT_TLS_IE 4
1782 #define GOT_TLSDESC_GD 8
1784 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
1786 /* AArch64 ELF linker hash entry. */
1787 struct elf_aarch64_link_hash_entry
1789 struct elf_link_hash_entry root;
1791 /* Track dynamic relocs copied for this symbol. */
1792 struct elf_dyn_relocs *dyn_relocs;
1794 /* Since PLT entries have variable size, we need to record the
1795 index into .got.plt instead of recomputing it from the PLT
1797 bfd_signed_vma plt_got_offset;
1799 /* Bit mask representing the type of GOT entry(s) if any required by
1801 unsigned int got_type;
1803 /* A pointer to the most recently used stub hash entry against this
1805 struct elf_aarch64_stub_hash_entry *stub_cache;
1807 /* Offset of the GOTPLT entry reserved for the TLS descriptor. The offset
1808 is from the end of the jump table and reserved entries within the PLTGOT.
1810 The magic value (bfd_vma) -1 indicates that an offset has not
1812 bfd_vma tlsdesc_got_jump_table_offset;
1816 elfNN_aarch64_symbol_got_type (struct elf_link_hash_entry *h,
1818 unsigned long r_symndx)
1821 return elf_aarch64_hash_entry (h)->got_type;
1823 if (! elf_aarch64_locals (abfd))
1826 return elf_aarch64_locals (abfd)[r_symndx].got_type;
1829 /* Get the AArch64 elf linker hash table from a link_info structure. */
1830 #define elf_aarch64_hash_table(info) \
1831 ((struct elf_aarch64_link_hash_table *) ((info)->hash))
1833 #define aarch64_stub_hash_lookup(table, string, create, copy) \
1834 ((struct elf_aarch64_stub_hash_entry *) \
1835 bfd_hash_lookup ((table), (string), (create), (copy)))
1837 /* AArch64 ELF linker hash table. */
1838 struct elf_aarch64_link_hash_table
1840 /* The main hash table. */
1841 struct elf_link_hash_table root;
1843 /* Nonzero to force PIC branch veneers. */
1846 /* Fix erratum 835769. */
1847 int fix_erratum_835769;
1849 /* Fix erratum 843419. */
1850 int fix_erratum_843419;
1852 /* Enable ADRP->ADR rewrite for erratum 843419 workaround. */
1853 int fix_erratum_843419_adr;
1855 /* The number of bytes in the initial entry in the PLT. */
1856 bfd_size_type plt_header_size;
1858 /* The number of bytes in the subsequent PLT etries. */
1859 bfd_size_type plt_entry_size;
1861 /* Short-cuts to get to dynamic linker sections. */
1865 /* Small local sym cache. */
1866 struct sym_cache sym_cache;
1868 /* For convenience in allocate_dynrelocs. */
1871 /* The amount of space used by the reserved portion of the sgotplt
1872 section, plus whatever space is used by the jump slots. */
1873 bfd_vma sgotplt_jump_table_size;
1875 /* The stub hash table. */
1876 struct bfd_hash_table stub_hash_table;
1878 /* Linker stub bfd. */
1881 /* Linker call-backs. */
1882 asection *(*add_stub_section) (const char *, asection *);
1883 void (*layout_sections_again) (void);
1885 /* Array to keep track of which stub sections have been created, and
1886 information on stub grouping. */
1889 /* This is the section to which stubs in the group will be
1892 /* The stub section. */
1896 /* Assorted information used by elfNN_aarch64_size_stubs. */
1897 unsigned int bfd_count;
1899 asection **input_list;
1901 /* The offset into splt of the PLT entry for the TLS descriptor
1902 resolver. Special values are 0, if not necessary (or not found
1903 to be necessary yet), and -1 if needed but not determined
1905 bfd_vma tlsdesc_plt;
1907 /* The GOT offset for the lazy trampoline. Communicated to the
1908 loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
1909 indicates an offset is not allocated. */
1910 bfd_vma dt_tlsdesc_got;
1912 /* Used by local STT_GNU_IFUNC symbols. */
1913 htab_t loc_hash_table;
1914 void * loc_hash_memory;
1917 /* Create an entry in an AArch64 ELF linker hash table. */
1919 static struct bfd_hash_entry *
1920 elfNN_aarch64_link_hash_newfunc (struct bfd_hash_entry *entry,
1921 struct bfd_hash_table *table,
1924 struct elf_aarch64_link_hash_entry *ret =
1925 (struct elf_aarch64_link_hash_entry *) entry;
1927 /* Allocate the structure if it has not already been allocated by a
1930 ret = bfd_hash_allocate (table,
1931 sizeof (struct elf_aarch64_link_hash_entry));
1933 return (struct bfd_hash_entry *) ret;
1935 /* Call the allocation method of the superclass. */
1936 ret = ((struct elf_aarch64_link_hash_entry *)
1937 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
1941 ret->dyn_relocs = NULL;
1942 ret->got_type = GOT_UNKNOWN;
1943 ret->plt_got_offset = (bfd_vma) - 1;
1944 ret->stub_cache = NULL;
1945 ret->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
1948 return (struct bfd_hash_entry *) ret;
1951 /* Initialize an entry in the stub hash table. */
1953 static struct bfd_hash_entry *
1954 stub_hash_newfunc (struct bfd_hash_entry *entry,
1955 struct bfd_hash_table *table, const char *string)
1957 /* Allocate the structure if it has not already been allocated by a
1961 entry = bfd_hash_allocate (table,
1963 elf_aarch64_stub_hash_entry));
1968 /* Call the allocation method of the superclass. */
1969 entry = bfd_hash_newfunc (entry, table, string);
1972 struct elf_aarch64_stub_hash_entry *eh;
1974 /* Initialize the local fields. */
1975 eh = (struct elf_aarch64_stub_hash_entry *) entry;
1976 eh->adrp_offset = 0;
1977 eh->stub_sec = NULL;
1978 eh->stub_offset = 0;
1979 eh->target_value = 0;
1980 eh->target_section = NULL;
1981 eh->stub_type = aarch64_stub_none;
1989 /* Compute a hash of a local hash entry. We use elf_link_hash_entry
1990 for local symbol so that we can handle local STT_GNU_IFUNC symbols
1991 as global symbol. We reuse indx and dynstr_index for local symbol
1992 hash since they aren't used by global symbols in this backend. */
1995 elfNN_aarch64_local_htab_hash (const void *ptr)
1997 struct elf_link_hash_entry *h
1998 = (struct elf_link_hash_entry *) ptr;
1999 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index);
2002 /* Compare local hash entries. */
2005 elfNN_aarch64_local_htab_eq (const void *ptr1, const void *ptr2)
2007 struct elf_link_hash_entry *h1
2008 = (struct elf_link_hash_entry *) ptr1;
2009 struct elf_link_hash_entry *h2
2010 = (struct elf_link_hash_entry *) ptr2;
2012 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index;
2015 /* Find and/or create a hash entry for local symbol. */
2017 static struct elf_link_hash_entry *
2018 elfNN_aarch64_get_local_sym_hash (struct elf_aarch64_link_hash_table *htab,
2019 bfd *abfd, const Elf_Internal_Rela *rel,
2022 struct elf_aarch64_link_hash_entry e, *ret;
2023 asection *sec = abfd->sections;
2024 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id,
2025 ELFNN_R_SYM (rel->r_info));
2028 e.root.indx = sec->id;
2029 e.root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2030 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h,
2031 create ? INSERT : NO_INSERT);
2038 ret = (struct elf_aarch64_link_hash_entry *) *slot;
2042 ret = (struct elf_aarch64_link_hash_entry *)
2043 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory,
2044 sizeof (struct elf_aarch64_link_hash_entry));
2047 memset (ret, 0, sizeof (*ret));
2048 ret->root.indx = sec->id;
2049 ret->root.dynstr_index = ELFNN_R_SYM (rel->r_info);
2050 ret->root.dynindx = -1;
2056 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2059 elfNN_aarch64_copy_indirect_symbol (struct bfd_link_info *info,
2060 struct elf_link_hash_entry *dir,
2061 struct elf_link_hash_entry *ind)
2063 struct elf_aarch64_link_hash_entry *edir, *eind;
2065 edir = (struct elf_aarch64_link_hash_entry *) dir;
2066 eind = (struct elf_aarch64_link_hash_entry *) ind;
2068 if (eind->dyn_relocs != NULL)
2070 if (edir->dyn_relocs != NULL)
2072 struct elf_dyn_relocs **pp;
2073 struct elf_dyn_relocs *p;
2075 /* Add reloc counts against the indirect sym to the direct sym
2076 list. Merge any entries against the same section. */
2077 for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
2079 struct elf_dyn_relocs *q;
2081 for (q = edir->dyn_relocs; q != NULL; q = q->next)
2082 if (q->sec == p->sec)
2084 q->pc_count += p->pc_count;
2085 q->count += p->count;
2092 *pp = edir->dyn_relocs;
2095 edir->dyn_relocs = eind->dyn_relocs;
2096 eind->dyn_relocs = NULL;
2099 if (ind->root.type == bfd_link_hash_indirect)
2101 /* Copy over PLT info. */
2102 if (dir->got.refcount <= 0)
2104 edir->got_type = eind->got_type;
2105 eind->got_type = GOT_UNKNOWN;
2109 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2112 /* Destroy an AArch64 elf linker hash table. */
2115 elfNN_aarch64_link_hash_table_free (bfd *obfd)
2117 struct elf_aarch64_link_hash_table *ret
2118 = (struct elf_aarch64_link_hash_table *) obfd->link.hash;
2120 if (ret->loc_hash_table)
2121 htab_delete (ret->loc_hash_table);
2122 if (ret->loc_hash_memory)
2123 objalloc_free ((struct objalloc *) ret->loc_hash_memory);
2125 bfd_hash_table_free (&ret->stub_hash_table);
2126 _bfd_elf_link_hash_table_free (obfd);
2129 /* Create an AArch64 elf linker hash table. */
2131 static struct bfd_link_hash_table *
2132 elfNN_aarch64_link_hash_table_create (bfd *abfd)
2134 struct elf_aarch64_link_hash_table *ret;
2135 bfd_size_type amt = sizeof (struct elf_aarch64_link_hash_table);
2137 ret = bfd_zmalloc (amt);
2141 if (!_bfd_elf_link_hash_table_init
2142 (&ret->root, abfd, elfNN_aarch64_link_hash_newfunc,
2143 sizeof (struct elf_aarch64_link_hash_entry), AARCH64_ELF_DATA))
2149 ret->plt_header_size = PLT_ENTRY_SIZE;
2150 ret->plt_entry_size = PLT_SMALL_ENTRY_SIZE;
2152 ret->dt_tlsdesc_got = (bfd_vma) - 1;
2154 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2155 sizeof (struct elf_aarch64_stub_hash_entry)))
2157 _bfd_elf_link_hash_table_free (abfd);
2161 ret->loc_hash_table = htab_try_create (1024,
2162 elfNN_aarch64_local_htab_hash,
2163 elfNN_aarch64_local_htab_eq,
2165 ret->loc_hash_memory = objalloc_create ();
2166 if (!ret->loc_hash_table || !ret->loc_hash_memory)
2168 elfNN_aarch64_link_hash_table_free (abfd);
2171 ret->root.root.hash_table_free = elfNN_aarch64_link_hash_table_free;
2173 return &ret->root.root;
2177 aarch64_relocate (unsigned int r_type, bfd *input_bfd, asection *input_section,
2178 bfd_vma offset, bfd_vma value)
2180 reloc_howto_type *howto;
2183 howto = elfNN_aarch64_howto_from_type (r_type);
2184 place = (input_section->output_section->vma + input_section->output_offset
2187 r_type = elfNN_aarch64_bfd_reloc_from_type (r_type);
2188 value = _bfd_aarch64_elf_resolve_relocation (r_type, place, value, 0, FALSE);
2189 return _bfd_aarch64_elf_put_addend (input_bfd,
2190 input_section->contents + offset, r_type,
2194 static enum elf_aarch64_stub_type
2195 aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
2197 if (aarch64_valid_for_adrp_p (value, place))
2198 return aarch64_stub_adrp_branch;
2199 return aarch64_stub_long_branch;
2202 /* Determine the type of stub needed, if any, for a call. */
2204 static enum elf_aarch64_stub_type
2205 aarch64_type_of_stub (struct bfd_link_info *info,
2206 asection *input_sec,
2207 const Elf_Internal_Rela *rel,
2208 unsigned char st_type,
2209 struct elf_aarch64_link_hash_entry *hash,
2210 bfd_vma destination)
2213 bfd_signed_vma branch_offset;
2214 unsigned int r_type;
2215 struct elf_aarch64_link_hash_table *globals;
2216 enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
2217 bfd_boolean via_plt_p;
2219 if (st_type != STT_FUNC)
2222 globals = elf_aarch64_hash_table (info);
2223 via_plt_p = (globals->root.splt != NULL && hash != NULL
2224 && hash->root.plt.offset != (bfd_vma) - 1);
2229 /* Determine where the call point is. */
2230 location = (input_sec->output_offset
2231 + input_sec->output_section->vma + rel->r_offset);
2233 branch_offset = (bfd_signed_vma) (destination - location);
2235 r_type = ELFNN_R_TYPE (rel->r_info);
2237 /* We don't want to redirect any old unconditional jump in this way,
2238 only one which is being used for a sibcall, where it is
2239 acceptable for the IP0 and IP1 registers to be clobbered. */
2240 if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
2241 && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
2242 || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
2244 stub_type = aarch64_stub_long_branch;
2250 /* Build a name for an entry in the stub hash table. */
2253 elfNN_aarch64_stub_name (const asection *input_section,
2254 const asection *sym_sec,
2255 const struct elf_aarch64_link_hash_entry *hash,
2256 const Elf_Internal_Rela *rel)
2263 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
2264 stub_name = bfd_malloc (len);
2265 if (stub_name != NULL)
2266 snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
2267 (unsigned int) input_section->id,
2268 hash->root.root.root.string,
2273 len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
2274 stub_name = bfd_malloc (len);
2275 if (stub_name != NULL)
2276 snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
2277 (unsigned int) input_section->id,
2278 (unsigned int) sym_sec->id,
2279 (unsigned int) ELFNN_R_SYM (rel->r_info),
2286 /* Look up an entry in the stub hash. Stub entries are cached because
2287 creating the stub name takes a bit of time. */
2289 static struct elf_aarch64_stub_hash_entry *
2290 elfNN_aarch64_get_stub_entry (const asection *input_section,
2291 const asection *sym_sec,
2292 struct elf_link_hash_entry *hash,
2293 const Elf_Internal_Rela *rel,
2294 struct elf_aarch64_link_hash_table *htab)
2296 struct elf_aarch64_stub_hash_entry *stub_entry;
2297 struct elf_aarch64_link_hash_entry *h =
2298 (struct elf_aarch64_link_hash_entry *) hash;
2299 const asection *id_sec;
2301 if ((input_section->flags & SEC_CODE) == 0)
2304 /* If this input section is part of a group of sections sharing one
2305 stub section, then use the id of the first section in the group.
2306 Stub names need to include a section id, as there may well be
2307 more than one stub used to reach say, printf, and we need to
2308 distinguish between them. */
2309 id_sec = htab->stub_group[input_section->id].link_sec;
2311 if (h != NULL && h->stub_cache != NULL
2312 && h->stub_cache->h == h && h->stub_cache->id_sec == id_sec)
2314 stub_entry = h->stub_cache;
2320 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
2321 if (stub_name == NULL)
2324 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table,
2325 stub_name, FALSE, FALSE);
2327 h->stub_cache = stub_entry;
2336 /* Create a stub section. */
2339 _bfd_aarch64_create_stub_section (asection *section,
2340 struct elf_aarch64_link_hash_table *htab)
2346 namelen = strlen (section->name);
2347 len = namelen + sizeof (STUB_SUFFIX);
2348 s_name = bfd_alloc (htab->stub_bfd, len);
2352 memcpy (s_name, section->name, namelen);
2353 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
2354 return (*htab->add_stub_section) (s_name, section);
2358 /* Find or create a stub section for a link section.
2360 Fix or create the stub section used to collect stubs attached to
2361 the specified link section. */
2364 _bfd_aarch64_get_stub_for_link_section (asection *link_section,
2365 struct elf_aarch64_link_hash_table *htab)
2367 if (htab->stub_group[link_section->id].stub_sec == NULL)
2368 htab->stub_group[link_section->id].stub_sec
2369 = _bfd_aarch64_create_stub_section (link_section, htab);
2370 return htab->stub_group[link_section->id].stub_sec;
2374 /* Find or create a stub section in the stub group for an input
2378 _bfd_aarch64_create_or_find_stub_sec (asection *section,
2379 struct elf_aarch64_link_hash_table *htab)
2381 asection *link_sec = htab->stub_group[section->id].link_sec;
2382 return _bfd_aarch64_get_stub_for_link_section (link_sec, htab);
2386 /* Add a new stub entry in the stub group associated with an input
2387 section to the stub hash. Not all fields of the new stub entry are
2390 static struct elf_aarch64_stub_hash_entry *
2391 _bfd_aarch64_add_stub_entry_in_group (const char *stub_name,
2393 struct elf_aarch64_link_hash_table *htab)
2397 struct elf_aarch64_stub_hash_entry *stub_entry;
2399 link_sec = htab->stub_group[section->id].link_sec;
2400 stub_sec = _bfd_aarch64_create_or_find_stub_sec (section, htab);
2402 /* Enter this entry into the linker stub hash table. */
2403 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2405 if (stub_entry == NULL)
2407 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
2408 section->owner, stub_name);
2412 stub_entry->stub_sec = stub_sec;
2413 stub_entry->stub_offset = 0;
2414 stub_entry->id_sec = link_sec;
2419 /* Add a new stub entry in the final stub section to the stub hash.
2420 Not all fields of the new stub entry are initialised. */
2422 static struct elf_aarch64_stub_hash_entry *
2423 _bfd_aarch64_add_stub_entry_after (const char *stub_name,
2424 asection *link_section,
2425 struct elf_aarch64_link_hash_table *htab)
2428 struct elf_aarch64_stub_hash_entry *stub_entry;
2430 stub_sec = _bfd_aarch64_get_stub_for_link_section (link_section, htab);
2431 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
2433 if (stub_entry == NULL)
2435 (*_bfd_error_handler) (_("cannot create stub entry %s"), stub_name);
2439 stub_entry->stub_sec = stub_sec;
2440 stub_entry->stub_offset = 0;
2441 stub_entry->id_sec = link_section;
2448 aarch64_build_one_stub (struct bfd_hash_entry *gen_entry,
2449 void *in_arg ATTRIBUTE_UNUSED)
2451 struct elf_aarch64_stub_hash_entry *stub_entry;
2456 bfd_vma veneered_insn_loc;
2457 bfd_vma veneer_entry_loc;
2458 bfd_signed_vma branch_offset = 0;
2459 unsigned int template_size;
2460 const uint32_t *template;
2463 /* Massage our args to the form they really have. */
2464 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2466 stub_sec = stub_entry->stub_sec;
2468 /* Make a note of the offset within the stubs for this entry. */
2469 stub_entry->stub_offset = stub_sec->size;
2470 loc = stub_sec->contents + stub_entry->stub_offset;
2472 stub_bfd = stub_sec->owner;
2474 /* This is the address of the stub destination. */
2475 sym_value = (stub_entry->target_value
2476 + stub_entry->target_section->output_offset
2477 + stub_entry->target_section->output_section->vma);
2479 if (stub_entry->stub_type == aarch64_stub_long_branch)
2481 bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
2482 + stub_sec->output_offset);
2484 /* See if we can relax the stub. */
2485 if (aarch64_valid_for_adrp_p (sym_value, place))
2486 stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
2489 switch (stub_entry->stub_type)
2491 case aarch64_stub_adrp_branch:
2492 template = aarch64_adrp_branch_stub;
2493 template_size = sizeof (aarch64_adrp_branch_stub);
2495 case aarch64_stub_long_branch:
2496 template = aarch64_long_branch_stub;
2497 template_size = sizeof (aarch64_long_branch_stub);
2499 case aarch64_stub_erratum_835769_veneer:
2500 template = aarch64_erratum_835769_stub;
2501 template_size = sizeof (aarch64_erratum_835769_stub);
2503 case aarch64_stub_erratum_843419_veneer:
2504 template = aarch64_erratum_843419_stub;
2505 template_size = sizeof (aarch64_erratum_843419_stub);
2511 for (i = 0; i < (template_size / sizeof template[0]); i++)
2513 bfd_putl32 (template[i], loc);
2517 template_size = (template_size + 7) & ~7;
2518 stub_sec->size += template_size;
2520 switch (stub_entry->stub_type)
2522 case aarch64_stub_adrp_branch:
2523 if (aarch64_relocate (AARCH64_R (ADR_PREL_PG_HI21), stub_bfd, stub_sec,
2524 stub_entry->stub_offset, sym_value))
2525 /* The stub would not have been relaxed if the offset was out
2529 if (aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
2530 stub_entry->stub_offset + 4, sym_value))
2534 case aarch64_stub_long_branch:
2535 /* We want the value relative to the address 12 bytes back from the
2537 if (aarch64_relocate (AARCH64_R (PRELNN), stub_bfd, stub_sec,
2538 stub_entry->stub_offset + 16, sym_value + 12))
2542 case aarch64_stub_erratum_835769_veneer:
2543 veneered_insn_loc = stub_entry->target_section->output_section->vma
2544 + stub_entry->target_section->output_offset
2545 + stub_entry->target_value;
2546 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
2547 + stub_entry->stub_sec->output_offset
2548 + stub_entry->stub_offset;
2549 branch_offset = veneered_insn_loc - veneer_entry_loc;
2550 branch_offset >>= 2;
2551 branch_offset &= 0x3ffffff;
2552 bfd_putl32 (stub_entry->veneered_insn,
2553 stub_sec->contents + stub_entry->stub_offset);
2554 bfd_putl32 (template[1] | branch_offset,
2555 stub_sec->contents + stub_entry->stub_offset + 4);
2558 case aarch64_stub_erratum_843419_veneer:
2559 if (aarch64_relocate (AARCH64_R (JUMP26), stub_bfd, stub_sec,
2560 stub_entry->stub_offset + 4, sym_value + 4))
2571 /* As above, but don't actually build the stub. Just bump offset so
2572 we know stub section sizes. */
2575 aarch64_size_one_stub (struct bfd_hash_entry *gen_entry,
2576 void *in_arg ATTRIBUTE_UNUSED)
2578 struct elf_aarch64_stub_hash_entry *stub_entry;
2581 /* Massage our args to the form they really have. */
2582 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
2584 switch (stub_entry->stub_type)
2586 case aarch64_stub_adrp_branch:
2587 size = sizeof (aarch64_adrp_branch_stub);
2589 case aarch64_stub_long_branch:
2590 size = sizeof (aarch64_long_branch_stub);
2592 case aarch64_stub_erratum_835769_veneer:
2593 size = sizeof (aarch64_erratum_835769_stub);
2595 case aarch64_stub_erratum_843419_veneer:
2596 size = sizeof (aarch64_erratum_843419_stub);
2602 size = (size + 7) & ~7;
2603 stub_entry->stub_sec->size += size;
2607 /* External entry points for sizing and building linker stubs. */
2609 /* Set up various things so that we can make a list of input sections
2610 for each output section included in the link. Returns -1 on error,
2611 0 when no stubs will be needed, and 1 on success. */
2614 elfNN_aarch64_setup_section_lists (bfd *output_bfd,
2615 struct bfd_link_info *info)
2618 unsigned int bfd_count;
2619 int top_id, top_index;
2621 asection **input_list, **list;
2623 struct elf_aarch64_link_hash_table *htab =
2624 elf_aarch64_hash_table (info);
2626 if (!is_elf_hash_table (htab))
2629 /* Count the number of input BFDs and find the top input section id. */
2630 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
2631 input_bfd != NULL; input_bfd = input_bfd->link.next)
2634 for (section = input_bfd->sections;
2635 section != NULL; section = section->next)
2637 if (top_id < section->id)
2638 top_id = section->id;
2641 htab->bfd_count = bfd_count;
2643 amt = sizeof (struct map_stub) * (top_id + 1);
2644 htab->stub_group = bfd_zmalloc (amt);
2645 if (htab->stub_group == NULL)
2648 /* We can't use output_bfd->section_count here to find the top output
2649 section index as some sections may have been removed, and
2650 _bfd_strip_section_from_output doesn't renumber the indices. */
2651 for (section = output_bfd->sections, top_index = 0;
2652 section != NULL; section = section->next)
2654 if (top_index < section->index)
2655 top_index = section->index;
2658 htab->top_index = top_index;
2659 amt = sizeof (asection *) * (top_index + 1);
2660 input_list = bfd_malloc (amt);
2661 htab->input_list = input_list;
2662 if (input_list == NULL)
2665 /* For sections we aren't interested in, mark their entries with a
2666 value we can check later. */
2667 list = input_list + top_index;
2669 *list = bfd_abs_section_ptr;
2670 while (list-- != input_list);
2672 for (section = output_bfd->sections;
2673 section != NULL; section = section->next)
2675 if ((section->flags & SEC_CODE) != 0)
2676 input_list[section->index] = NULL;
2682 /* Used by elfNN_aarch64_next_input_section and group_sections. */
2683 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
2685 /* The linker repeatedly calls this function for each input section,
2686 in the order that input sections are linked into output sections.
2687 Build lists of input sections to determine groupings between which
2688 we may insert linker stubs. */
2691 elfNN_aarch64_next_input_section (struct bfd_link_info *info, asection *isec)
2693 struct elf_aarch64_link_hash_table *htab =
2694 elf_aarch64_hash_table (info);
2696 if (isec->output_section->index <= htab->top_index)
2698 asection **list = htab->input_list + isec->output_section->index;
2700 if (*list != bfd_abs_section_ptr)
2702 /* Steal the link_sec pointer for our list. */
2703 /* This happens to make the list in reverse order,
2704 which is what we want. */
2705 PREV_SEC (isec) = *list;
2711 /* See whether we can group stub sections together. Grouping stub
2712 sections may result in fewer stubs. More importantly, we need to
2713 put all .init* and .fini* stubs at the beginning of the .init or
2714 .fini output sections respectively, because glibc splits the
2715 _init and _fini functions into multiple parts. Putting a stub in
2716 the middle of a function is not a good idea. */
2719 group_sections (struct elf_aarch64_link_hash_table *htab,
2720 bfd_size_type stub_group_size,
2721 bfd_boolean stubs_always_before_branch)
2723 asection **list = htab->input_list + htab->top_index;
2727 asection *tail = *list;
2729 if (tail == bfd_abs_section_ptr)
2732 while (tail != NULL)
2736 bfd_size_type total;
2740 while ((prev = PREV_SEC (curr)) != NULL
2741 && ((total += curr->output_offset - prev->output_offset)
2745 /* OK, the size from the start of CURR to the end is less
2746 than stub_group_size and thus can be handled by one stub
2747 section. (Or the tail section is itself larger than
2748 stub_group_size, in which case we may be toast.)
2749 We should really be keeping track of the total size of
2750 stubs added here, as stubs contribute to the final output
2754 prev = PREV_SEC (tail);
2755 /* Set up this stub group. */
2756 htab->stub_group[tail->id].link_sec = curr;
2758 while (tail != curr && (tail = prev) != NULL);
2760 /* But wait, there's more! Input sections up to stub_group_size
2761 bytes before the stub section can be handled by it too. */
2762 if (!stubs_always_before_branch)
2766 && ((total += tail->output_offset - prev->output_offset)
2770 prev = PREV_SEC (tail);
2771 htab->stub_group[tail->id].link_sec = curr;
2777 while (list-- != htab->input_list);
2779 free (htab->input_list);
2784 #define AARCH64_BITS(x, pos, n) (((x) >> (pos)) & ((1 << (n)) - 1))
2786 #define AARCH64_RT(insn) AARCH64_BITS (insn, 0, 5)
2787 #define AARCH64_RT2(insn) AARCH64_BITS (insn, 10, 5)
2788 #define AARCH64_RA(insn) AARCH64_BITS (insn, 10, 5)
2789 #define AARCH64_RD(insn) AARCH64_BITS (insn, 0, 5)
2790 #define AARCH64_RN(insn) AARCH64_BITS (insn, 5, 5)
2791 #define AARCH64_RM(insn) AARCH64_BITS (insn, 16, 5)
2793 #define AARCH64_MAC(insn) (((insn) & 0xff000000) == 0x9b000000)
2794 #define AARCH64_BIT(insn, n) AARCH64_BITS (insn, n, 1)
2795 #define AARCH64_OP31(insn) AARCH64_BITS (insn, 21, 3)
2796 #define AARCH64_ZR 0x1f
2798 /* All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
2799 LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops. */
2801 #define AARCH64_LD(insn) (AARCH64_BIT (insn, 22) == 1)
2802 #define AARCH64_LDST(insn) (((insn) & 0x0a000000) == 0x08000000)
2803 #define AARCH64_LDST_EX(insn) (((insn) & 0x3f000000) == 0x08000000)
2804 #define AARCH64_LDST_PCREL(insn) (((insn) & 0x3b000000) == 0x18000000)
2805 #define AARCH64_LDST_NAP(insn) (((insn) & 0x3b800000) == 0x28000000)
2806 #define AARCH64_LDSTP_PI(insn) (((insn) & 0x3b800000) == 0x28800000)
2807 #define AARCH64_LDSTP_O(insn) (((insn) & 0x3b800000) == 0x29000000)
2808 #define AARCH64_LDSTP_PRE(insn) (((insn) & 0x3b800000) == 0x29800000)
2809 #define AARCH64_LDST_UI(insn) (((insn) & 0x3b200c00) == 0x38000000)
2810 #define AARCH64_LDST_PIIMM(insn) (((insn) & 0x3b200c00) == 0x38000400)
2811 #define AARCH64_LDST_U(insn) (((insn) & 0x3b200c00) == 0x38000800)
2812 #define AARCH64_LDST_PREIMM(insn) (((insn) & 0x3b200c00) == 0x38000c00)
2813 #define AARCH64_LDST_RO(insn) (((insn) & 0x3b200c00) == 0x38200800)
2814 #define AARCH64_LDST_UIMM(insn) (((insn) & 0x3b000000) == 0x39000000)
2815 #define AARCH64_LDST_SIMD_M(insn) (((insn) & 0xbfbf0000) == 0x0c000000)
2816 #define AARCH64_LDST_SIMD_M_PI(insn) (((insn) & 0xbfa00000) == 0x0c800000)
2817 #define AARCH64_LDST_SIMD_S(insn) (((insn) & 0xbf9f0000) == 0x0d000000)
2818 #define AARCH64_LDST_SIMD_S_PI(insn) (((insn) & 0xbf800000) == 0x0d800000)
2820 /* Classify an INSN if it is indeed a load/store.
2822 Return TRUE if INSN is a LD/ST instruction otherwise return FALSE.
2824 For scalar LD/ST instructions PAIR is FALSE, RT is returned and RT2
2827 For LD/ST pair instructions PAIR is TRUE, RT and RT2 are returned.
2832 aarch64_mem_op_p (uint32_t insn, unsigned int *rt, unsigned int *rt2,
2833 bfd_boolean *pair, bfd_boolean *load)
2841 /* Bail out quickly if INSN doesn't fall into the the load-store
2843 if (!AARCH64_LDST (insn))
2848 if (AARCH64_LDST_EX (insn))
2850 *rt = AARCH64_RT (insn);
2852 if (AARCH64_BIT (insn, 21) == 1)
2855 *rt2 = AARCH64_RT2 (insn);
2857 *load = AARCH64_LD (insn);
2860 else if (AARCH64_LDST_NAP (insn)
2861 || AARCH64_LDSTP_PI (insn)
2862 || AARCH64_LDSTP_O (insn)
2863 || AARCH64_LDSTP_PRE (insn))
2866 *rt = AARCH64_RT (insn);
2867 *rt2 = AARCH64_RT2 (insn);
2868 *load = AARCH64_LD (insn);
2871 else if (AARCH64_LDST_PCREL (insn)
2872 || AARCH64_LDST_UI (insn)
2873 || AARCH64_LDST_PIIMM (insn)
2874 || AARCH64_LDST_U (insn)
2875 || AARCH64_LDST_PREIMM (insn)
2876 || AARCH64_LDST_RO (insn)
2877 || AARCH64_LDST_UIMM (insn))
2879 *rt = AARCH64_RT (insn);
2881 if (AARCH64_LDST_PCREL (insn))
2883 opc = AARCH64_BITS (insn, 22, 2);
2884 v = AARCH64_BIT (insn, 26);
2885 opc_v = opc | (v << 2);
2886 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
2887 || opc_v == 5 || opc_v == 7);
2890 else if (AARCH64_LDST_SIMD_M (insn)
2891 || AARCH64_LDST_SIMD_M_PI (insn))
2893 *rt = AARCH64_RT (insn);
2894 *load = AARCH64_BIT (insn, 22);
2895 opcode = (insn >> 12) & 0xf;
2922 else if (AARCH64_LDST_SIMD_S (insn)
2923 || AARCH64_LDST_SIMD_S_PI (insn))
2925 *rt = AARCH64_RT (insn);
2926 r = (insn >> 21) & 1;
2927 *load = AARCH64_BIT (insn, 22);
2928 opcode = (insn >> 13) & 0x7;
2940 *rt2 = *rt + (r == 0 ? 2 : 3);
2948 *rt2 = *rt + (r == 0 ? 2 : 3);
2960 /* Return TRUE if INSN is multiply-accumulate. */
2963 aarch64_mlxl_p (uint32_t insn)
2965 uint32_t op31 = AARCH64_OP31 (insn);
2967 if (AARCH64_MAC (insn)
2968 && (op31 == 0 || op31 == 1 || op31 == 5)
2969 /* Exclude MUL instructions which are encoded as a multiple accumulate
2971 && AARCH64_RA (insn) != AARCH64_ZR)
2977 /* Some early revisions of the Cortex-A53 have an erratum (835769) whereby
2978 it is possible for a 64-bit multiply-accumulate instruction to generate an
2979 incorrect result. The details are quite complex and hard to
2980 determine statically, since branches in the code may exist in some
2981 circumstances, but all cases end with a memory (load, store, or
2982 prefetch) instruction followed immediately by the multiply-accumulate
2983 operation. We employ a linker patching technique, by moving the potentially
2984 affected multiply-accumulate instruction into a patch region and replacing
2985 the original instruction with a branch to the patch. This function checks
2986 if INSN_1 is the memory operation followed by a multiply-accumulate
2987 operation (INSN_2). Return TRUE if an erratum sequence is found, FALSE
2988 if INSN_1 and INSN_2 are safe. */
2991 aarch64_erratum_sequence (uint32_t insn_1, uint32_t insn_2)
3001 if (aarch64_mlxl_p (insn_2)
3002 && aarch64_mem_op_p (insn_1, &rt, &rt2, &pair, &load))
3004 /* Any SIMD memory op is independent of the subsequent MLA
3005 by definition of the erratum. */
3006 if (AARCH64_BIT (insn_1, 26))
3009 /* If not SIMD, check for integer memory ops and MLA relationship. */
3010 rn = AARCH64_RN (insn_2);
3011 ra = AARCH64_RA (insn_2);
3012 rm = AARCH64_RM (insn_2);
3014 /* If this is a load and there's a true(RAW) dependency, we are safe
3015 and this is not an erratum sequence. */
3017 (rt == rn || rt == rm || rt == ra
3018 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
3021 /* We conservatively put out stubs for all other cases (including
3029 /* Used to order a list of mapping symbols by address. */
3032 elf_aarch64_compare_mapping (const void *a, const void *b)
3034 const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
3035 const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
3037 if (amap->vma > bmap->vma)
3039 else if (amap->vma < bmap->vma)
3041 else if (amap->type > bmap->type)
3042 /* Ensure results do not depend on the host qsort for objects with
3043 multiple mapping symbols at the same address by sorting on type
3046 else if (amap->type < bmap->type)
3054 _bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
3056 char *stub_name = (char *) bfd_malloc
3057 (strlen ("__erratum_835769_veneer_") + 16);
3058 sprintf (stub_name,"__erratum_835769_veneer_%d", num_fixes);
3062 /* Scan for Cortex-A53 erratum 835769 sequence.
3064 Return TRUE else FALSE on abnormal termination. */
3067 _bfd_aarch64_erratum_835769_scan (bfd *input_bfd,
3068 struct bfd_link_info *info,
3069 unsigned int *num_fixes_p)
3072 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3073 unsigned int num_fixes = *num_fixes_p;
3078 for (section = input_bfd->sections;
3080 section = section->next)
3082 bfd_byte *contents = NULL;
3083 struct _aarch64_elf_section_data *sec_data;
3086 if (elf_section_type (section) != SHT_PROGBITS
3087 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3088 || (section->flags & SEC_EXCLUDE) != 0
3089 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3090 || (section->output_section == bfd_abs_section_ptr))
3093 if (elf_section_data (section)->this_hdr.contents != NULL)
3094 contents = elf_section_data (section)->this_hdr.contents;
3095 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3098 sec_data = elf_aarch64_section_data (section);
3100 qsort (sec_data->map, sec_data->mapcount,
3101 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3103 for (span = 0; span < sec_data->mapcount; span++)
3105 unsigned int span_start = sec_data->map[span].vma;
3106 unsigned int span_end = ((span == sec_data->mapcount - 1)
3107 ? sec_data->map[0].vma + section->size
3108 : sec_data->map[span + 1].vma);
3110 char span_type = sec_data->map[span].type;
3112 if (span_type == 'd')
3115 for (i = span_start; i + 4 < span_end; i += 4)
3117 uint32_t insn_1 = bfd_getl32 (contents + i);
3118 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3120 if (aarch64_erratum_sequence (insn_1, insn_2))
3122 struct elf_aarch64_stub_hash_entry *stub_entry;
3123 char *stub_name = _bfd_aarch64_erratum_835769_stub_name (num_fixes);
3127 stub_entry = _bfd_aarch64_add_stub_entry_in_group (stub_name,
3133 stub_entry->stub_type = aarch64_stub_erratum_835769_veneer;
3134 stub_entry->target_section = section;
3135 stub_entry->target_value = i + 4;
3136 stub_entry->veneered_insn = insn_2;
3137 stub_entry->output_name = stub_name;
3142 if (elf_section_data (section)->this_hdr.contents == NULL)
3146 *num_fixes_p = num_fixes;
3152 /* Test if instruction INSN is ADRP. */
3155 _bfd_aarch64_adrp_p (uint32_t insn)
3157 return ((insn & 0x9f000000) == 0x90000000);
3161 /* Helper predicate to look for cortex-a53 erratum 843419 sequence 1. */
3164 _bfd_aarch64_erratum_843419_sequence_p (uint32_t insn_1, uint32_t insn_2,
3172 return (aarch64_mem_op_p (insn_2, &rt, &rt2, &pair, &load)
3175 && AARCH64_LDST_UIMM (insn_3)
3176 && AARCH64_RN (insn_3) == AARCH64_RD (insn_1));
3180 /* Test for the presence of Cortex-A53 erratum 843419 instruction sequence.
3182 Return TRUE if section CONTENTS at offset I contains one of the
3183 erratum 843419 sequences, otherwise return FALSE. If a sequence is
3184 seen set P_VENEER_I to the offset of the final LOAD/STORE
3185 instruction in the sequence.
3189 _bfd_aarch64_erratum_843419_p (bfd_byte *contents, bfd_vma vma,
3190 bfd_vma i, bfd_vma span_end,
3191 bfd_vma *p_veneer_i)
3193 uint32_t insn_1 = bfd_getl32 (contents + i);
3195 if (!_bfd_aarch64_adrp_p (insn_1))
3198 if (span_end < i + 12)
3201 uint32_t insn_2 = bfd_getl32 (contents + i + 4);
3202 uint32_t insn_3 = bfd_getl32 (contents + i + 8);
3204 if ((vma & 0xfff) != 0xff8 && (vma & 0xfff) != 0xffc)
3207 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_3))
3209 *p_veneer_i = i + 8;
3213 if (span_end < i + 16)
3216 uint32_t insn_4 = bfd_getl32 (contents + i + 12);
3218 if (_bfd_aarch64_erratum_843419_sequence_p (insn_1, insn_2, insn_4))
3220 *p_veneer_i = i + 12;
3228 /* Resize all stub sections. */
3231 _bfd_aarch64_resize_stubs (struct elf_aarch64_link_hash_table *htab)
3235 /* OK, we've added some stubs. Find out the new size of the
3237 for (section = htab->stub_bfd->sections;
3238 section != NULL; section = section->next)
3240 /* Ignore non-stub sections. */
3241 if (!strstr (section->name, STUB_SUFFIX))
3246 bfd_hash_traverse (&htab->stub_hash_table, aarch64_size_one_stub, htab);
3248 for (section = htab->stub_bfd->sections;
3249 section != NULL; section = section->next)
3251 if (!strstr (section->name, STUB_SUFFIX))
3257 /* Ensure all stub sections have a size which is a multiple of
3258 4096. This is important in order to ensure that the insertion
3259 of stub sections does not in itself move existing code around
3260 in such a way that new errata sequences are created. */
3261 if (htab->fix_erratum_843419)
3263 section->size = BFD_ALIGN (section->size, 0x1000);
3268 /* Construct an erratum 843419 workaround stub name.
3272 _bfd_aarch64_erratum_843419_stub_name (asection *input_section,
3275 const bfd_size_type len = 8 + 4 + 1 + 8 + 1 + 16 + 1;
3276 char *stub_name = bfd_malloc (len);
3278 if (stub_name != NULL)
3279 snprintf (stub_name, len, "e843419@%04x_%08x_%" BFD_VMA_FMT "x",
3280 input_section->owner->id,
3286 /* Build a stub_entry structure describing an 843419 fixup.
3288 The stub_entry constructed is populated with the bit pattern INSN
3289 of the instruction located at OFFSET within input SECTION.
3291 Returns TRUE on success. */
3294 _bfd_aarch64_erratum_843419_fixup (uint32_t insn,
3295 bfd_vma adrp_offset,
3296 bfd_vma ldst_offset,
3298 struct bfd_link_info *info)
3300 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3302 struct elf_aarch64_stub_hash_entry *stub_entry;
3304 stub_name = _bfd_aarch64_erratum_843419_stub_name (section, ldst_offset);
3305 stub_entry = aarch64_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3313 /* We always place an 843419 workaround veneer in the stub section
3314 attached to the input section in which an erratum sequence has
3315 been found. This ensures that later in the link process (in
3316 elfNN_aarch64_write_section) when we copy the veneered
3317 instruction from the input section into the stub section the
3318 copied instruction will have had any relocations applied to it.
3319 If we placed workaround veneers in any other stub section then we
3320 could not assume that all relocations have been processed on the
3321 corresponding input section at the point we output the stub
3325 stub_entry = _bfd_aarch64_add_stub_entry_after (stub_name, section, htab);
3326 if (stub_entry == NULL)
3332 stub_entry->adrp_offset = adrp_offset;
3333 stub_entry->target_value = ldst_offset;
3334 stub_entry->target_section = section;
3335 stub_entry->stub_type = aarch64_stub_erratum_843419_veneer;
3336 stub_entry->veneered_insn = insn;
3337 stub_entry->output_name = stub_name;
3343 /* Scan an input section looking for the signature of erratum 843419.
3345 Scans input SECTION in INPUT_BFD looking for erratum 843419
3346 signatures, for each signature found a stub_entry is created
3347 describing the location of the erratum for subsequent fixup.
3349 Return TRUE on successful scan, FALSE on failure to scan.
3353 _bfd_aarch64_erratum_843419_scan (bfd *input_bfd, asection *section,
3354 struct bfd_link_info *info)
3356 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3361 if (elf_section_type (section) != SHT_PROGBITS
3362 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3363 || (section->flags & SEC_EXCLUDE) != 0
3364 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
3365 || (section->output_section == bfd_abs_section_ptr))
3370 bfd_byte *contents = NULL;
3371 struct _aarch64_elf_section_data *sec_data;
3374 if (elf_section_data (section)->this_hdr.contents != NULL)
3375 contents = elf_section_data (section)->this_hdr.contents;
3376 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3379 sec_data = elf_aarch64_section_data (section);
3381 qsort (sec_data->map, sec_data->mapcount,
3382 sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
3384 for (span = 0; span < sec_data->mapcount; span++)
3386 unsigned int span_start = sec_data->map[span].vma;
3387 unsigned int span_end = ((span == sec_data->mapcount - 1)
3388 ? sec_data->map[0].vma + section->size
3389 : sec_data->map[span + 1].vma);
3391 char span_type = sec_data->map[span].type;
3393 if (span_type == 'd')
3396 for (i = span_start; i + 8 < span_end; i += 4)
3398 bfd_vma vma = (section->output_section->vma
3399 + section->output_offset
3403 if (_bfd_aarch64_erratum_843419_p
3404 (contents, vma, i, span_end, &veneer_i))
3406 uint32_t insn = bfd_getl32 (contents + veneer_i);
3408 if (!_bfd_aarch64_erratum_843419_fixup (insn, i, veneer_i,
3415 if (elf_section_data (section)->this_hdr.contents == NULL)
3424 /* Determine and set the size of the stub section for a final link.
3426 The basic idea here is to examine all the relocations looking for
3427 PC-relative calls to a target that is unreachable with a "bl"
3431 elfNN_aarch64_size_stubs (bfd *output_bfd,
3433 struct bfd_link_info *info,
3434 bfd_signed_vma group_size,
3435 asection * (*add_stub_section) (const char *,
3437 void (*layout_sections_again) (void))
3439 bfd_size_type stub_group_size;
3440 bfd_boolean stubs_always_before_branch;
3441 bfd_boolean stub_changed = FALSE;
3442 struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
3443 unsigned int num_erratum_835769_fixes = 0;
3445 /* Propagate mach to stub bfd, because it may not have been
3446 finalized when we created stub_bfd. */
3447 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
3448 bfd_get_mach (output_bfd));
3450 /* Stash our params away. */
3451 htab->stub_bfd = stub_bfd;
3452 htab->add_stub_section = add_stub_section;
3453 htab->layout_sections_again = layout_sections_again;
3454 stubs_always_before_branch = group_size < 0;
3456 stub_group_size = -group_size;
3458 stub_group_size = group_size;
3460 if (stub_group_size == 1)
3462 /* Default values. */
3463 /* AArch64 branch range is +-128MB. The value used is 1MB less. */
3464 stub_group_size = 127 * 1024 * 1024;
3467 group_sections (htab, stub_group_size, stubs_always_before_branch);
3469 (*htab->layout_sections_again) ();
3471 if (htab->fix_erratum_835769)
3475 for (input_bfd = info->input_bfds;
3476 input_bfd != NULL; input_bfd = input_bfd->link.next)
3477 if (!_bfd_aarch64_erratum_835769_scan (input_bfd, info,
3478 &num_erratum_835769_fixes))
3481 _bfd_aarch64_resize_stubs (htab);
3482 (*htab->layout_sections_again) ();
3485 if (htab->fix_erratum_843419)
3489 for (input_bfd = info->input_bfds;
3491 input_bfd = input_bfd->link.next)
3495 for (section = input_bfd->sections;
3497 section = section->next)
3498 if (!_bfd_aarch64_erratum_843419_scan (input_bfd, section, info))
3502 _bfd_aarch64_resize_stubs (htab);
3503 (*htab->layout_sections_again) ();
3510 for (input_bfd = info->input_bfds;
3511 input_bfd != NULL; input_bfd = input_bfd->link.next)
3513 Elf_Internal_Shdr *symtab_hdr;
3515 Elf_Internal_Sym *local_syms = NULL;
3517 /* We'll need the symbol table in a second. */
3518 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3519 if (symtab_hdr->sh_info == 0)
3522 /* Walk over each section attached to the input bfd. */
3523 for (section = input_bfd->sections;
3524 section != NULL; section = section->next)
3526 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
3528 /* If there aren't any relocs, then there's nothing more
3530 if ((section->flags & SEC_RELOC) == 0
3531 || section->reloc_count == 0
3532 || (section->flags & SEC_CODE) == 0)
3535 /* If this section is a link-once section that will be
3536 discarded, then don't create any stubs. */
3537 if (section->output_section == NULL
3538 || section->output_section->owner != output_bfd)
3541 /* Get the relocs. */
3543 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
3544 NULL, info->keep_memory);
3545 if (internal_relocs == NULL)
3546 goto error_ret_free_local;
3548 /* Now examine each relocation. */
3549 irela = internal_relocs;
3550 irelaend = irela + section->reloc_count;
3551 for (; irela < irelaend; irela++)
3553 unsigned int r_type, r_indx;
3554 enum elf_aarch64_stub_type stub_type;
3555 struct elf_aarch64_stub_hash_entry *stub_entry;
3558 bfd_vma destination;
3559 struct elf_aarch64_link_hash_entry *hash;
3560 const char *sym_name;
3562 const asection *id_sec;
3563 unsigned char st_type;
3566 r_type = ELFNN_R_TYPE (irela->r_info);
3567 r_indx = ELFNN_R_SYM (irela->r_info);
3569 if (r_type >= (unsigned int) R_AARCH64_end)
3571 bfd_set_error (bfd_error_bad_value);
3572 error_ret_free_internal:
3573 if (elf_section_data (section)->relocs == NULL)
3574 free (internal_relocs);
3575 goto error_ret_free_local;
3578 /* Only look for stubs on unconditional branch and
3579 branch and link instructions. */
3580 if (r_type != (unsigned int) AARCH64_R (CALL26)
3581 && r_type != (unsigned int) AARCH64_R (JUMP26))
3584 /* Now determine the call target, its name, value,
3591 if (r_indx < symtab_hdr->sh_info)
3593 /* It's a local symbol. */
3594 Elf_Internal_Sym *sym;
3595 Elf_Internal_Shdr *hdr;
3597 if (local_syms == NULL)
3600 = (Elf_Internal_Sym *) symtab_hdr->contents;
3601 if (local_syms == NULL)
3603 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
3604 symtab_hdr->sh_info, 0,
3606 if (local_syms == NULL)
3607 goto error_ret_free_internal;
3610 sym = local_syms + r_indx;
3611 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
3612 sym_sec = hdr->bfd_section;
3614 /* This is an undefined symbol. It can never
3618 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
3619 sym_value = sym->st_value;
3620 destination = (sym_value + irela->r_addend
3621 + sym_sec->output_offset
3622 + sym_sec->output_section->vma);
3623 st_type = ELF_ST_TYPE (sym->st_info);
3625 = bfd_elf_string_from_elf_section (input_bfd,
3626 symtab_hdr->sh_link,
3633 e_indx = r_indx - symtab_hdr->sh_info;
3634 hash = ((struct elf_aarch64_link_hash_entry *)
3635 elf_sym_hashes (input_bfd)[e_indx]);
3637 while (hash->root.root.type == bfd_link_hash_indirect
3638 || hash->root.root.type == bfd_link_hash_warning)
3639 hash = ((struct elf_aarch64_link_hash_entry *)
3640 hash->root.root.u.i.link);
3642 if (hash->root.root.type == bfd_link_hash_defined
3643 || hash->root.root.type == bfd_link_hash_defweak)
3645 struct elf_aarch64_link_hash_table *globals =
3646 elf_aarch64_hash_table (info);
3647 sym_sec = hash->root.root.u.def.section;
3648 sym_value = hash->root.root.u.def.value;
3649 /* For a destination in a shared library,
3650 use the PLT stub as target address to
3651 decide whether a branch stub is
3653 if (globals->root.splt != NULL && hash != NULL
3654 && hash->root.plt.offset != (bfd_vma) - 1)
3656 sym_sec = globals->root.splt;
3657 sym_value = hash->root.plt.offset;
3658 if (sym_sec->output_section != NULL)
3659 destination = (sym_value
3660 + sym_sec->output_offset
3662 sym_sec->output_section->vma);
3664 else if (sym_sec->output_section != NULL)
3665 destination = (sym_value + irela->r_addend
3666 + sym_sec->output_offset
3667 + sym_sec->output_section->vma);
3669 else if (hash->root.root.type == bfd_link_hash_undefined
3670 || (hash->root.root.type
3671 == bfd_link_hash_undefweak))
3673 /* For a shared library, use the PLT stub as
3674 target address to decide whether a long
3675 branch stub is needed.
3676 For absolute code, they cannot be handled. */
3677 struct elf_aarch64_link_hash_table *globals =
3678 elf_aarch64_hash_table (info);
3680 if (globals->root.splt != NULL && hash != NULL
3681 && hash->root.plt.offset != (bfd_vma) - 1)
3683 sym_sec = globals->root.splt;
3684 sym_value = hash->root.plt.offset;
3685 if (sym_sec->output_section != NULL)
3686 destination = (sym_value
3687 + sym_sec->output_offset
3689 sym_sec->output_section->vma);
3696 bfd_set_error (bfd_error_bad_value);
3697 goto error_ret_free_internal;
3699 st_type = ELF_ST_TYPE (hash->root.type);
3700 sym_name = hash->root.root.root.string;
3703 /* Determine what (if any) linker stub is needed. */
3704 stub_type = aarch64_type_of_stub
3705 (info, section, irela, st_type, hash, destination);
3706 if (stub_type == aarch64_stub_none)
3709 /* Support for grouping stub sections. */
3710 id_sec = htab->stub_group[section->id].link_sec;
3712 /* Get the name of this stub. */
3713 stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
3716 goto error_ret_free_internal;
3719 aarch64_stub_hash_lookup (&htab->stub_hash_table,
3720 stub_name, FALSE, FALSE);
3721 if (stub_entry != NULL)
3723 /* The proper stub has already been created. */
3728 stub_entry = _bfd_aarch64_add_stub_entry_in_group
3729 (stub_name, section, htab);
3730 if (stub_entry == NULL)
3733 goto error_ret_free_internal;
3736 stub_entry->target_value = sym_value;
3737 stub_entry->target_section = sym_sec;
3738 stub_entry->stub_type = stub_type;
3739 stub_entry->h = hash;
3740 stub_entry->st_type = st_type;
3742 if (sym_name == NULL)
3743 sym_name = "unnamed";
3744 len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
3745 stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
3746 if (stub_entry->output_name == NULL)
3749 goto error_ret_free_internal;
3752 snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
3755 stub_changed = TRUE;
3758 /* We're done with the internal relocs, free them. */
3759 if (elf_section_data (section)->relocs == NULL)
3760 free (internal_relocs);
3767 _bfd_aarch64_resize_stubs (htab);
3769 /* Ask the linker to do its stuff. */
3770 (*htab->layout_sections_again) ();
3771 stub_changed = FALSE;
3776 error_ret_free_local:
3780 /* Build all the stubs associated with the current output file. The
3781 stubs are kept in a hash table attached to the main linker hash
3782 table. We also set up the .plt entries for statically linked PIC
3783 functions here. This function is called via aarch64_elf_finish in the
3787 elfNN_aarch64_build_stubs (struct bfd_link_info *info)
3790 struct bfd_hash_table *table;
3791 struct elf_aarch64_link_hash_table *htab;
3793 htab = elf_aarch64_hash_table (info);
3795 for (stub_sec = htab->stub_bfd->sections;
3796 stub_sec != NULL; stub_sec = stub_sec->next)
3800 /* Ignore non-stub sections. */
3801 if (!strstr (stub_sec->name, STUB_SUFFIX))
3804 /* Allocate memory to hold the linker stubs. */
3805 size = stub_sec->size;
3806 stub_sec->contents = bfd_zalloc (htab->stub_bfd, size);
3807 if (stub_sec->contents == NULL && size != 0)
3811 bfd_putl32 (0x14000000 | (size >> 2), stub_sec->contents);
3812 stub_sec->size += 4;
3815 /* Build the stubs as directed by the stub hash table. */
3816 table = &htab->stub_hash_table;
3817 bfd_hash_traverse (table, aarch64_build_one_stub, info);
3823 /* Add an entry to the code/data map for section SEC. */
3826 elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
3828 struct _aarch64_elf_section_data *sec_data =
3829 elf_aarch64_section_data (sec);
3830 unsigned int newidx;
3832 if (sec_data->map == NULL)
3834 sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
3835 sec_data->mapcount = 0;
3836 sec_data->mapsize = 1;
3839 newidx = sec_data->mapcount++;
3841 if (sec_data->mapcount > sec_data->mapsize)
3843 sec_data->mapsize *= 2;
3844 sec_data->map = bfd_realloc_or_free
3845 (sec_data->map, sec_data->mapsize * sizeof (elf_aarch64_section_map));
3850 sec_data->map[newidx].vma = vma;
3851 sec_data->map[newidx].type = type;
3856 /* Initialise maps of insn/data for input BFDs. */
3858 bfd_elfNN_aarch64_init_maps (bfd *abfd)
3860 Elf_Internal_Sym *isymbuf;
3861 Elf_Internal_Shdr *hdr;
3862 unsigned int i, localsyms;
3864 /* Make sure that we are dealing with an AArch64 elf binary. */
3865 if (!is_aarch64_elf (abfd))
3868 if ((abfd->flags & DYNAMIC) != 0)
3871 hdr = &elf_symtab_hdr (abfd);
3872 localsyms = hdr->sh_info;
3874 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
3875 should contain the number of local symbols, which should come before any
3876 global symbols. Mapping symbols are always local. */
3877 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL, NULL);
3879 /* No internal symbols read? Skip this BFD. */
3880 if (isymbuf == NULL)
3883 for (i = 0; i < localsyms; i++)
3885 Elf_Internal_Sym *isym = &isymbuf[i];
3886 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
3889 if (sec != NULL && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
3891 name = bfd_elf_string_from_elf_section (abfd,
3895 if (bfd_is_aarch64_special_symbol_name
3896 (name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
3897 elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
3902 /* Set option values needed during linking. */
3904 bfd_elfNN_aarch64_set_options (struct bfd *output_bfd,
3905 struct bfd_link_info *link_info,
3907 int no_wchar_warn, int pic_veneer,
3908 int fix_erratum_835769,
3909 int fix_erratum_843419)
3911 struct elf_aarch64_link_hash_table *globals;
3913 globals = elf_aarch64_hash_table (link_info);
3914 globals->pic_veneer = pic_veneer;
3915 globals->fix_erratum_835769 = fix_erratum_835769;
3916 globals->fix_erratum_843419 = fix_erratum_843419;
3917 globals->fix_erratum_843419_adr = TRUE;
3919 BFD_ASSERT (is_aarch64_elf (output_bfd));
3920 elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
3921 elf_aarch64_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
3925 aarch64_calculate_got_entry_vma (struct elf_link_hash_entry *h,
3926 struct elf_aarch64_link_hash_table
3927 *globals, struct bfd_link_info *info,
3928 bfd_vma value, bfd *output_bfd,
3929 bfd_boolean *unresolved_reloc_p)
3931 bfd_vma off = (bfd_vma) - 1;
3932 asection *basegot = globals->root.sgot;
3933 bfd_boolean dyn = globals->root.dynamic_sections_created;
3937 BFD_ASSERT (basegot != NULL);
3938 off = h->got.offset;
3939 BFD_ASSERT (off != (bfd_vma) - 1);
3940 if (!WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
3942 && SYMBOL_REFERENCES_LOCAL (info, h))
3943 || (ELF_ST_VISIBILITY (h->other)
3944 && h->root.type == bfd_link_hash_undefweak))
3946 /* This is actually a static link, or it is a -Bsymbolic link
3947 and the symbol is defined locally. We must initialize this
3948 entry in the global offset table. Since the offset must
3949 always be a multiple of 8 (4 in the case of ILP32), we use
3950 the least significant bit to record whether we have
3951 initialized it already.
3952 When doing a dynamic link, we create a .rel(a).got relocation
3953 entry to initialize the value. This is done in the
3954 finish_dynamic_symbol routine. */
3959 bfd_put_NN (output_bfd, value, basegot->contents + off);
3964 *unresolved_reloc_p = FALSE;
3966 off = off + basegot->output_section->vma + basegot->output_offset;
3972 /* Change R_TYPE to a more efficient access model where possible,
3973 return the new reloc type. */
3975 static bfd_reloc_code_real_type
3976 aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
3977 struct elf_link_hash_entry *h)
3979 bfd_boolean is_local = h == NULL;
3983 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
3984 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
3986 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3987 : BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21);
3989 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
3991 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
3994 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
3996 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
3997 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
3999 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4000 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
4002 ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC
4003 : BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC);
4005 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4006 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1 : r_type;
4008 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
4009 return is_local ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type;
4011 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4014 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4016 ? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
4017 : BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
4019 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4020 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4021 /* Instructions with these relocations will become NOPs. */
4022 return BFD_RELOC_AARCH64_NONE;
4032 aarch64_reloc_got_type (bfd_reloc_code_real_type r_type)
4036 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4037 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4038 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4039 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4042 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4043 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4044 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4047 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4048 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4049 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4050 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4051 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4052 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4053 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4054 return GOT_TLSDESC_GD;
4056 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4057 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4058 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4059 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4062 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4063 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4064 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4065 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4066 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4067 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4068 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4069 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4079 aarch64_can_relax_tls (bfd *input_bfd,
4080 struct bfd_link_info *info,
4081 bfd_reloc_code_real_type r_type,
4082 struct elf_link_hash_entry *h,
4083 unsigned long r_symndx)
4085 unsigned int symbol_got_type;
4086 unsigned int reloc_got_type;
4088 if (! IS_AARCH64_TLS_RELOC (r_type))
4091 symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
4092 reloc_got_type = aarch64_reloc_got_type (r_type);
4094 if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
4100 if (h && h->root.type == bfd_link_hash_undefweak)
4106 /* Given the relocation code R_TYPE, return the relaxed bfd reloc
4109 static bfd_reloc_code_real_type
4110 aarch64_tls_transition (bfd *input_bfd,
4111 struct bfd_link_info *info,
4112 unsigned int r_type,
4113 struct elf_link_hash_entry *h,
4114 unsigned long r_symndx)
4116 bfd_reloc_code_real_type bfd_r_type
4117 = elfNN_aarch64_bfd_reloc_from_type (r_type);
4119 if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
4122 return aarch64_tls_transition_without_check (bfd_r_type, h);
4125 /* Return the base VMA address which should be subtracted from real addresses
4126 when resolving R_AARCH64_TLS_DTPREL relocation. */
4129 dtpoff_base (struct bfd_link_info *info)
4131 /* If tls_sec is NULL, we should have signalled an error already. */
4132 BFD_ASSERT (elf_hash_table (info)->tls_sec != NULL);
4133 return elf_hash_table (info)->tls_sec->vma;
4136 /* Return the base VMA address which should be subtracted from real addresses
4137 when resolving R_AARCH64_TLS_GOTTPREL64 relocations. */
4140 tpoff_base (struct bfd_link_info *info)
4142 struct elf_link_hash_table *htab = elf_hash_table (info);
4144 /* If tls_sec is NULL, we should have signalled an error already. */
4145 BFD_ASSERT (htab->tls_sec != NULL);
4147 bfd_vma base = align_power ((bfd_vma) TCB_SIZE,
4148 htab->tls_sec->alignment_power);
4149 return htab->tls_sec->vma - base;
4153 symbol_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4154 unsigned long r_symndx)
4156 /* Calculate the address of the GOT entry for symbol
4157 referred to in h. */
4159 return &h->got.offset;
4163 struct elf_aarch64_local_symbol *l;
4165 l = elf_aarch64_locals (input_bfd);
4166 return &l[r_symndx].got_offset;
4171 symbol_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4172 unsigned long r_symndx)
4175 p = symbol_got_offset_ref (input_bfd, h, r_symndx);
4180 symbol_got_offset_mark_p (bfd *input_bfd, struct elf_link_hash_entry *h,
4181 unsigned long r_symndx)
4184 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4189 symbol_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4190 unsigned long r_symndx)
4193 value = * symbol_got_offset_ref (input_bfd, h, r_symndx);
4199 symbol_tlsdesc_got_offset_ref (bfd *input_bfd, struct elf_link_hash_entry *h,
4200 unsigned long r_symndx)
4202 /* Calculate the address of the GOT entry for symbol
4203 referred to in h. */
4206 struct elf_aarch64_link_hash_entry *eh;
4207 eh = (struct elf_aarch64_link_hash_entry *) h;
4208 return &eh->tlsdesc_got_jump_table_offset;
4213 struct elf_aarch64_local_symbol *l;
4215 l = elf_aarch64_locals (input_bfd);
4216 return &l[r_symndx].tlsdesc_got_jump_table_offset;
4221 symbol_tlsdesc_got_offset_mark (bfd *input_bfd, struct elf_link_hash_entry *h,
4222 unsigned long r_symndx)
4225 p = symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4230 symbol_tlsdesc_got_offset_mark_p (bfd *input_bfd,
4231 struct elf_link_hash_entry *h,
4232 unsigned long r_symndx)
4235 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4240 symbol_tlsdesc_got_offset (bfd *input_bfd, struct elf_link_hash_entry *h,
4241 unsigned long r_symndx)
4244 value = * symbol_tlsdesc_got_offset_ref (input_bfd, h, r_symndx);
4249 /* Data for make_branch_to_erratum_835769_stub(). */
4251 struct erratum_835769_branch_to_stub_data
4253 struct bfd_link_info *info;
4254 asection *output_section;
4258 /* Helper to insert branches to erratum 835769 stubs in the right
4259 places for a particular section. */
4262 make_branch_to_erratum_835769_stub (struct bfd_hash_entry *gen_entry,
4265 struct elf_aarch64_stub_hash_entry *stub_entry;
4266 struct erratum_835769_branch_to_stub_data *data;
4268 unsigned long branch_insn = 0;
4269 bfd_vma veneered_insn_loc, veneer_entry_loc;
4270 bfd_signed_vma branch_offset;
4271 unsigned int target;
4274 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4275 data = (struct erratum_835769_branch_to_stub_data *) in_arg;
4277 if (stub_entry->target_section != data->output_section
4278 || stub_entry->stub_type != aarch64_stub_erratum_835769_veneer)
4281 contents = data->contents;
4282 veneered_insn_loc = stub_entry->target_section->output_section->vma
4283 + stub_entry->target_section->output_offset
4284 + stub_entry->target_value;
4285 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4286 + stub_entry->stub_sec->output_offset
4287 + stub_entry->stub_offset;
4288 branch_offset = veneer_entry_loc - veneered_insn_loc;
4290 abfd = stub_entry->target_section->owner;
4291 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4292 (*_bfd_error_handler)
4293 (_("%B: error: Erratum 835769 stub out "
4294 "of range (input file too large)"), abfd);
4296 target = stub_entry->target_value;
4297 branch_insn = 0x14000000;
4298 branch_offset >>= 2;
4299 branch_offset &= 0x3ffffff;
4300 branch_insn |= branch_offset;
4301 bfd_putl32 (branch_insn, &contents[target]);
4308 _bfd_aarch64_erratum_843419_branch_to_stub (struct bfd_hash_entry *gen_entry,
4311 struct elf_aarch64_stub_hash_entry *stub_entry
4312 = (struct elf_aarch64_stub_hash_entry *) gen_entry;
4313 struct erratum_835769_branch_to_stub_data *data
4314 = (struct erratum_835769_branch_to_stub_data *) in_arg;
4315 struct bfd_link_info *info;
4316 struct elf_aarch64_link_hash_table *htab;
4324 contents = data->contents;
4325 section = data->output_section;
4327 htab = elf_aarch64_hash_table (info);
4329 if (stub_entry->target_section != section
4330 || stub_entry->stub_type != aarch64_stub_erratum_843419_veneer)
4333 insn = bfd_getl32 (contents + stub_entry->target_value);
4335 stub_entry->stub_sec->contents + stub_entry->stub_offset);
4337 place = (section->output_section->vma + section->output_offset
4338 + stub_entry->adrp_offset);
4339 insn = bfd_getl32 (contents + stub_entry->adrp_offset);
4341 if ((insn & AARCH64_ADRP_OP_MASK) != AARCH64_ADRP_OP)
4344 bfd_signed_vma imm =
4345 (_bfd_aarch64_sign_extend
4346 ((bfd_vma) _bfd_aarch64_decode_adrp_imm (insn) << 12, 33)
4349 if (htab->fix_erratum_843419_adr
4350 && (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
4352 insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
4353 | AARCH64_RT (insn));
4354 bfd_putl32 (insn, contents + stub_entry->adrp_offset);
4358 bfd_vma veneered_insn_loc;
4359 bfd_vma veneer_entry_loc;
4360 bfd_signed_vma branch_offset;
4361 uint32_t branch_insn;
4363 veneered_insn_loc = stub_entry->target_section->output_section->vma
4364 + stub_entry->target_section->output_offset
4365 + stub_entry->target_value;
4366 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
4367 + stub_entry->stub_sec->output_offset
4368 + stub_entry->stub_offset;
4369 branch_offset = veneer_entry_loc - veneered_insn_loc;
4371 abfd = stub_entry->target_section->owner;
4372 if (!aarch64_valid_branch_p (veneer_entry_loc, veneered_insn_loc))
4373 (*_bfd_error_handler)
4374 (_("%B: error: Erratum 843419 stub out "
4375 "of range (input file too large)"), abfd);
4377 branch_insn = 0x14000000;
4378 branch_offset >>= 2;
4379 branch_offset &= 0x3ffffff;
4380 branch_insn |= branch_offset;
4381 bfd_putl32 (branch_insn, contents + stub_entry->target_value);
4388 elfNN_aarch64_write_section (bfd *output_bfd ATTRIBUTE_UNUSED,
4389 struct bfd_link_info *link_info,
4394 struct elf_aarch64_link_hash_table *globals =
4395 elf_aarch64_hash_table (link_info);
4397 if (globals == NULL)
4400 /* Fix code to point to erratum 835769 stubs. */
4401 if (globals->fix_erratum_835769)
4403 struct erratum_835769_branch_to_stub_data data;
4405 data.info = link_info;
4406 data.output_section = sec;
4407 data.contents = contents;
4408 bfd_hash_traverse (&globals->stub_hash_table,
4409 make_branch_to_erratum_835769_stub, &data);
4412 if (globals->fix_erratum_843419)
4414 struct erratum_835769_branch_to_stub_data data;
4416 data.info = link_info;
4417 data.output_section = sec;
4418 data.contents = contents;
4419 bfd_hash_traverse (&globals->stub_hash_table,
4420 _bfd_aarch64_erratum_843419_branch_to_stub, &data);
4426 /* Perform a relocation as part of a final link. */
4427 static bfd_reloc_status_type
4428 elfNN_aarch64_final_link_relocate (reloc_howto_type *howto,
4431 asection *input_section,
4433 Elf_Internal_Rela *rel,
4435 struct bfd_link_info *info,
4437 struct elf_link_hash_entry *h,
4438 bfd_boolean *unresolved_reloc_p,
4439 bfd_boolean save_addend,
4440 bfd_vma *saved_addend,
4441 Elf_Internal_Sym *sym)
4443 Elf_Internal_Shdr *symtab_hdr;
4444 unsigned int r_type = howto->type;
4445 bfd_reloc_code_real_type bfd_r_type
4446 = elfNN_aarch64_bfd_reloc_from_howto (howto);
4447 bfd_reloc_code_real_type new_bfd_r_type;
4448 unsigned long r_symndx;
4449 bfd_byte *hit_data = contents + rel->r_offset;
4451 bfd_signed_vma signed_addend;
4452 struct elf_aarch64_link_hash_table *globals;
4453 bfd_boolean weak_undef_p;
4456 globals = elf_aarch64_hash_table (info);
4458 symtab_hdr = &elf_symtab_hdr (input_bfd);
4460 BFD_ASSERT (is_aarch64_elf (input_bfd));
4462 r_symndx = ELFNN_R_SYM (rel->r_info);
4464 /* It is possible to have linker relaxations on some TLS access
4465 models. Update our information here. */
4466 new_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type, h, r_symndx);
4467 if (new_bfd_r_type != bfd_r_type)
4469 bfd_r_type = new_bfd_r_type;
4470 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
4471 BFD_ASSERT (howto != NULL);
4472 r_type = howto->type;
4475 place = input_section->output_section->vma
4476 + input_section->output_offset + rel->r_offset;
4478 /* Get addend, accumulating the addend for consecutive relocs
4479 which refer to the same offset. */
4480 signed_addend = saved_addend ? *saved_addend : 0;
4481 signed_addend += rel->r_addend;
4483 weak_undef_p = (h ? h->root.type == bfd_link_hash_undefweak
4484 : bfd_is_und_section (sym_sec));
4486 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
4487 it here if it is defined in a non-shared object. */
4489 && h->type == STT_GNU_IFUNC
4495 if ((input_section->flags & SEC_ALLOC) == 0
4496 || h->plt.offset == (bfd_vma) -1)
4499 /* STT_GNU_IFUNC symbol must go through PLT. */
4500 plt = globals->root.splt ? globals->root.splt : globals->root.iplt;
4501 value = (plt->output_section->vma + plt->output_offset + h->plt.offset);
4506 if (h->root.root.string)
4507 name = h->root.root.string;
4509 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
4511 (*_bfd_error_handler)
4512 (_("%B: relocation %s against STT_GNU_IFUNC "
4513 "symbol `%s' isn't handled by %s"), input_bfd,
4514 howto->name, name, __FUNCTION__);
4515 bfd_set_error (bfd_error_bad_value);
4518 case BFD_RELOC_AARCH64_NN:
4519 if (rel->r_addend != 0)
4521 if (h->root.root.string)
4522 name = h->root.root.string;
4524 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
4526 (*_bfd_error_handler)
4527 (_("%B: relocation %s against STT_GNU_IFUNC "
4528 "symbol `%s' has non-zero addend: %d"),
4529 input_bfd, howto->name, name, rel->r_addend);
4530 bfd_set_error (bfd_error_bad_value);
4534 /* Generate dynamic relocation only when there is a
4535 non-GOT reference in a shared object. */
4536 if (info->shared && h->non_got_ref)
4538 Elf_Internal_Rela outrel;
4541 /* Need a dynamic relocation to get the real function
4543 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
4547 if (outrel.r_offset == (bfd_vma) -1
4548 || outrel.r_offset == (bfd_vma) -2)
4551 outrel.r_offset += (input_section->output_section->vma
4552 + input_section->output_offset);
4554 if (h->dynindx == -1
4556 || info->executable)
4558 /* This symbol is resolved locally. */
4559 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
4560 outrel.r_addend = (h->root.u.def.value
4561 + h->root.u.def.section->output_section->vma
4562 + h->root.u.def.section->output_offset);
4566 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4567 outrel.r_addend = 0;
4570 sreloc = globals->root.irelifunc;
4571 elf_append_rela (output_bfd, sreloc, &outrel);
4573 /* If this reloc is against an external symbol, we
4574 do not want to fiddle with the addend. Otherwise,
4575 we need to include the symbol value so that it
4576 becomes an addend for the dynamic reloc. For an
4577 internal symbol, we have updated addend. */
4578 return bfd_reloc_ok;
4581 case BFD_RELOC_AARCH64_JUMP26:
4582 case BFD_RELOC_AARCH64_CALL26:
4583 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4586 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4588 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4589 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4590 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4591 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4592 base_got = globals->root.sgot;
4593 off = h->got.offset;
4595 if (base_got == NULL)
4598 if (off == (bfd_vma) -1)
4602 /* We can't use h->got.offset here to save state, or
4603 even just remember the offset, as finish_dynamic_symbol
4604 would use that as offset into .got. */
4606 if (globals->root.splt != NULL)
4608 plt_index = ((h->plt.offset - globals->plt_header_size) /
4609 globals->plt_entry_size);
4610 off = (plt_index + 3) * GOT_ENTRY_SIZE;
4611 base_got = globals->root.sgotplt;
4615 plt_index = h->plt.offset / globals->plt_entry_size;
4616 off = plt_index * GOT_ENTRY_SIZE;
4617 base_got = globals->root.igotplt;
4620 if (h->dynindx == -1
4624 /* This references the local definition. We must
4625 initialize this entry in the global offset table.
4626 Since the offset must always be a multiple of 8,
4627 we use the least significant bit to record
4628 whether we have initialized it already.
4630 When doing a dynamic link, we create a .rela.got
4631 relocation entry to initialize the value. This
4632 is done in the finish_dynamic_symbol routine. */
4637 bfd_put_NN (output_bfd, value,
4638 base_got->contents + off);
4639 /* Note that this is harmless as -1 | 1 still is -1. */
4643 value = (base_got->output_section->vma
4644 + base_got->output_offset + off);
4647 value = aarch64_calculate_got_entry_vma (h, globals, info,
4649 unresolved_reloc_p);
4650 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4652 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
4653 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4654 case BFD_RELOC_AARCH64_ADD_LO12:
4661 case BFD_RELOC_AARCH64_NONE:
4662 case BFD_RELOC_AARCH64_TLSDESC_CALL:
4663 *unresolved_reloc_p = FALSE;
4664 return bfd_reloc_ok;
4666 case BFD_RELOC_AARCH64_NN:
4668 /* When generating a shared object or relocatable executable, these
4669 relocations are copied into the output file to be resolved at
4671 if (((info->shared == TRUE) || globals->root.is_relocatable_executable)
4672 && (input_section->flags & SEC_ALLOC)
4674 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
4675 || h->root.type != bfd_link_hash_undefweak))
4677 Elf_Internal_Rela outrel;
4679 bfd_boolean skip, relocate;
4682 *unresolved_reloc_p = FALSE;
4687 outrel.r_addend = signed_addend;
4689 _bfd_elf_section_offset (output_bfd, info, input_section,
4691 if (outrel.r_offset == (bfd_vma) - 1)
4693 else if (outrel.r_offset == (bfd_vma) - 2)
4699 outrel.r_offset += (input_section->output_section->vma
4700 + input_section->output_offset);
4703 memset (&outrel, 0, sizeof outrel);
4706 && (!info->shared || !SYMBOLIC_BIND (info, h) || !h->def_regular))
4707 outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
4712 /* On SVR4-ish systems, the dynamic loader cannot
4713 relocate the text and data segments independently,
4714 so the symbol does not matter. */
4716 outrel.r_info = ELFNN_R_INFO (symbol, AARCH64_R (RELATIVE));
4717 outrel.r_addend += value;
4720 sreloc = elf_section_data (input_section)->sreloc;
4721 if (sreloc == NULL || sreloc->contents == NULL)
4722 return bfd_reloc_notsupported;
4724 loc = sreloc->contents + sreloc->reloc_count++ * RELOC_SIZE (globals);
4725 bfd_elfNN_swap_reloca_out (output_bfd, &outrel, loc);
4727 if (sreloc->reloc_count * RELOC_SIZE (globals) > sreloc->size)
4729 /* Sanity to check that we have previously allocated
4730 sufficient space in the relocation section for the
4731 number of relocations we actually want to emit. */
4735 /* If this reloc is against an external symbol, we do not want to
4736 fiddle with the addend. Otherwise, we need to include the symbol
4737 value so that it becomes an addend for the dynamic reloc. */
4739 return bfd_reloc_ok;
4741 return _bfd_final_link_relocate (howto, input_bfd, input_section,
4742 contents, rel->r_offset, value,
4746 value += signed_addend;
4749 case BFD_RELOC_AARCH64_JUMP26:
4750 case BFD_RELOC_AARCH64_CALL26:
4752 asection *splt = globals->root.splt;
4753 bfd_boolean via_plt_p =
4754 splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
4756 /* A call to an undefined weak symbol is converted to a jump to
4757 the next instruction unless a PLT entry will be created.
4758 The jump to the next instruction is optimized as a NOP.
4759 Do the same for local undefined symbols. */
4760 if (weak_undef_p && ! via_plt_p)
4762 bfd_putl32 (INSN_NOP, hit_data);
4763 return bfd_reloc_ok;
4766 /* If the call goes through a PLT entry, make sure to
4767 check distance to the right destination address. */
4770 value = (splt->output_section->vma
4771 + splt->output_offset + h->plt.offset);
4772 *unresolved_reloc_p = FALSE;
4775 /* If the target symbol is global and marked as a function the
4776 relocation applies a function call or a tail call. In this
4777 situation we can veneer out of range branches. The veneers
4778 use IP0 and IP1 hence cannot be used arbitrary out of range
4779 branches that occur within the body of a function. */
4780 if (h && h->type == STT_FUNC)
4782 /* Check if a stub has to be inserted because the destination
4784 if (! aarch64_valid_branch_p (value, place))
4786 /* The target is out of reach, so redirect the branch to
4787 the local stub for this function. */
4788 struct elf_aarch64_stub_hash_entry *stub_entry;
4789 stub_entry = elfNN_aarch64_get_stub_entry (input_section,
4792 if (stub_entry != NULL)
4793 value = (stub_entry->stub_offset
4794 + stub_entry->stub_sec->output_offset
4795 + stub_entry->stub_sec->output_section->vma);
4799 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4800 signed_addend, weak_undef_p);
4803 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
4804 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
4805 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
4806 case BFD_RELOC_AARCH64_LD_LO19_PCREL:
4807 case BFD_RELOC_AARCH64_16_PCREL:
4808 case BFD_RELOC_AARCH64_32_PCREL:
4809 case BFD_RELOC_AARCH64_64_PCREL:
4811 && (input_section->flags & SEC_ALLOC) != 0
4812 && (input_section->flags & SEC_READONLY) != 0
4816 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4818 (*_bfd_error_handler)
4819 (_("%B: relocation %s against external symbol `%s' can not be used"
4820 " when making a shared object; recompile with -fPIC"),
4821 input_bfd, elfNN_aarch64_howto_table[howto_index].name,
4822 h->root.root.string);
4823 bfd_set_error (bfd_error_bad_value);
4827 case BFD_RELOC_AARCH64_16:
4829 case BFD_RELOC_AARCH64_32:
4831 case BFD_RELOC_AARCH64_ADD_LO12:
4832 case BFD_RELOC_AARCH64_BRANCH19:
4833 case BFD_RELOC_AARCH64_LDST8_LO12:
4834 case BFD_RELOC_AARCH64_LDST16_LO12:
4835 case BFD_RELOC_AARCH64_LDST32_LO12:
4836 case BFD_RELOC_AARCH64_LDST64_LO12:
4837 case BFD_RELOC_AARCH64_LDST128_LO12:
4838 case BFD_RELOC_AARCH64_MOVW_G0_S:
4839 case BFD_RELOC_AARCH64_MOVW_G1_S:
4840 case BFD_RELOC_AARCH64_MOVW_G2_S:
4841 case BFD_RELOC_AARCH64_MOVW_G0:
4842 case BFD_RELOC_AARCH64_MOVW_G0_NC:
4843 case BFD_RELOC_AARCH64_MOVW_G1:
4844 case BFD_RELOC_AARCH64_MOVW_G1_NC:
4845 case BFD_RELOC_AARCH64_MOVW_G2:
4846 case BFD_RELOC_AARCH64_MOVW_G2_NC:
4847 case BFD_RELOC_AARCH64_MOVW_G3:
4848 case BFD_RELOC_AARCH64_TSTBR14:
4849 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4850 signed_addend, weak_undef_p);
4853 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
4854 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
4855 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
4856 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
4857 if (globals->root.sgot == NULL)
4858 BFD_ASSERT (h != NULL);
4862 value = aarch64_calculate_got_entry_vma (h, globals, info, value,
4864 unresolved_reloc_p);
4865 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4870 struct elf_aarch64_local_symbol *locals
4871 = elf_aarch64_locals (input_bfd);
4875 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
4876 (*_bfd_error_handler)
4877 (_("%B: Local symbol descriptor table be NULL when applying "
4878 "relocation %s against local symbol"),
4879 input_bfd, elfNN_aarch64_howto_table[howto_index].name);
4883 off = symbol_got_offset (input_bfd, h, r_symndx);
4884 base_got = globals->root.sgot;
4885 bfd_vma got_entry_addr = (base_got->output_section->vma
4886 + base_got->output_offset + off);
4888 if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
4890 bfd_put_64 (output_bfd, value, base_got->contents + off);
4895 Elf_Internal_Rela outrel;
4897 /* For local symbol, we have done absolute relocation in static
4898 linking stageh. While for share library, we need to update
4899 the content of GOT entry according to the share objects
4900 loading base address. So we need to generate a
4901 R_AARCH64_RELATIVE reloc for dynamic linker. */
4902 s = globals->root.srelgot;
4906 outrel.r_offset = got_entry_addr;
4907 outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
4908 outrel.r_addend = value;
4909 elf_append_rela (output_bfd, s, &outrel);
4912 symbol_got_offset_mark (input_bfd, h, r_symndx);
4915 /* Update the relocation value to GOT entry addr as we have transformed
4916 the direct data access into indirect data access through GOT. */
4917 value = got_entry_addr;
4922 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
4923 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
4924 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
4925 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
4926 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
4927 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
4928 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
4929 if (globals->root.sgot == NULL)
4930 return bfd_reloc_notsupported;
4932 value = (symbol_got_offset (input_bfd, h, r_symndx)
4933 + globals->root.sgot->output_section->vma
4934 + globals->root.sgot->output_offset);
4936 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4938 *unresolved_reloc_p = FALSE;
4941 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
4942 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
4943 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
4944 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
4945 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
4946 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
4947 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
4948 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
4949 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4950 signed_addend - tpoff_base (info),
4952 *unresolved_reloc_p = FALSE;
4955 case BFD_RELOC_AARCH64_TLSDESC_ADD:
4956 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
4957 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
4958 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
4959 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
4960 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
4961 case BFD_RELOC_AARCH64_TLSDESC_LDR:
4962 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
4963 if (globals->root.sgot == NULL)
4964 return bfd_reloc_notsupported;
4965 value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
4966 + globals->root.sgotplt->output_section->vma
4967 + globals->root.sgotplt->output_offset
4968 + globals->sgotplt_jump_table_size);
4970 value = _bfd_aarch64_elf_resolve_relocation (bfd_r_type, place, value,
4972 *unresolved_reloc_p = FALSE;
4976 return bfd_reloc_notsupported;
4980 *saved_addend = value;
4982 /* Only apply the final relocation in a sequence. */
4984 return bfd_reloc_continue;
4986 return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
4990 /* Handle TLS relaxations. Relaxing is possible for symbols that use
4991 R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
4994 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
4995 is to then call final_link_relocate. Return other values in the
4998 static bfd_reloc_status_type
4999 elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
5000 bfd *input_bfd, bfd_byte *contents,
5001 Elf_Internal_Rela *rel, struct elf_link_hash_entry *h)
5003 bfd_boolean is_local = h == NULL;
5004 unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
5007 BFD_ASSERT (globals && input_bfd && contents && rel);
5009 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5011 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5012 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5015 /* GD->LE relaxation:
5016 adrp x0, :tlsgd:var => movz x0, :tprel_g1:var
5018 adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var
5020 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5021 return bfd_reloc_continue;
5025 /* GD->IE relaxation:
5026 adrp x0, :tlsgd:var => adrp x0, :gottprel:var
5028 adrp x0, :tlsdesc:var => adrp x0, :gottprel:var
5030 return bfd_reloc_continue;
5033 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5037 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5040 /* Tiny TLSDESC->LE relaxation:
5041 ldr x1, :tlsdesc:var => movz x0, #:tprel_g1:var
5042 adr x0, :tlsdesc:var => movk x0, #:tprel_g0_nc:var
5046 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5047 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5049 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5050 AARCH64_R (TLSLE_MOVW_TPREL_G0_NC));
5051 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5053 bfd_putl32 (0xd2a00000, contents + rel->r_offset);
5054 bfd_putl32 (0xf2800000, contents + rel->r_offset + 4);
5055 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5056 return bfd_reloc_continue;
5060 /* Tiny TLSDESC->IE relaxation:
5061 ldr x1, :tlsdesc:var => ldr x0, :gottprel:var
5062 adr x0, :tlsdesc:var => nop
5066 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (TLSDESC_ADR_PREL21));
5067 BFD_ASSERT (ELFNN_R_TYPE (rel[2].r_info) == AARCH64_R (TLSDESC_CALL));
5069 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5070 rel[2].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5072 bfd_putl32 (0x58000000, contents + rel->r_offset);
5073 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 4);
5074 bfd_putl32 (INSN_NOP, contents + rel->r_offset + 8);
5075 return bfd_reloc_continue;
5078 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5081 /* Tiny GD->LE relaxation:
5082 adr x0, :tlsgd:var => mrs x1, tpidr_el0
5083 bl __tls_get_addr => add x0, x1, #:tprel_hi12:x, lsl #12
5084 nop => add x0, x0, #:tprel_lo12_nc:x
5087 /* First kill the tls_get_addr reloc on the bl instruction. */
5088 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5090 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 0);
5091 bfd_putl32 (0x91400020, contents + rel->r_offset + 4);
5092 bfd_putl32 (0x91000000, contents + rel->r_offset + 8);
5094 rel[1].r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5095 AARCH64_R (TLSLE_ADD_TPREL_LO12_NC));
5096 rel[1].r_offset = rel->r_offset + 8;
5098 /* Move the current relocation to the second instruction in
5101 rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info),
5102 AARCH64_R (TLSLE_ADD_TPREL_HI12));
5103 return bfd_reloc_continue;
5107 /* Tiny GD->IE relaxation:
5108 adr x0, :tlsgd:var => ldr x0, :gottprel:var
5109 bl __tls_get_addr => mrs x1, tpidr_el0
5110 nop => add x0, x0, x1
5113 /* First kill the tls_get_addr reloc on the bl instruction. */
5114 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5115 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5117 bfd_putl32 (0x58000000, contents + rel->r_offset);
5118 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5119 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5120 return bfd_reloc_continue;
5123 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5124 return bfd_reloc_continue;
5126 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5129 /* GD->LE relaxation:
5130 ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var
5132 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5133 return bfd_reloc_continue;
5137 /* GD->IE relaxation:
5138 ldr xd, [x0, #:tlsdesc_lo12:var] => ldr x0, [x0, #:gottprel_lo12:var]
5140 insn = bfd_getl32 (contents + rel->r_offset);
5142 bfd_putl32 (insn, contents + rel->r_offset);
5143 return bfd_reloc_continue;
5146 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5149 /* GD->LE relaxation
5150 add x0, #:tlsgd_lo12:var => movk x0, :tprel_g0_nc:var
5151 bl __tls_get_addr => mrs x1, tpidr_el0
5152 nop => add x0, x1, x0
5155 /* First kill the tls_get_addr reloc on the bl instruction. */
5156 BFD_ASSERT (rel->r_offset + 4 == rel[1].r_offset);
5157 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5159 bfd_putl32 (0xf2800000, contents + rel->r_offset);
5160 bfd_putl32 (0xd53bd041, contents + rel->r_offset + 4);
5161 bfd_putl32 (0x8b000020, contents + rel->r_offset + 8);
5162 return bfd_reloc_continue;
5166 /* GD->IE relaxation
5167 ADD x0, #:tlsgd_lo12:var => ldr x0, [x0, #:gottprel_lo12:var]
5168 BL __tls_get_addr => mrs x1, tpidr_el0
5170 NOP => add x0, x1, x0
5173 BFD_ASSERT (ELFNN_R_TYPE (rel[1].r_info) == AARCH64_R (CALL26));
5175 /* Remove the relocation on the BL instruction. */
5176 rel[1].r_info = ELFNN_R_INFO (STN_UNDEF, R_AARCH64_NONE);
5178 bfd_putl32 (0xf9400000, contents + rel->r_offset);
5180 /* We choose to fixup the BL and NOP instructions using the
5181 offset from the second relocation to allow flexibility in
5182 scheduling instructions between the ADD and BL. */
5183 bfd_putl32 (0xd53bd041, contents + rel[1].r_offset);
5184 bfd_putl32 (0x8b000020, contents + rel[1].r_offset + 4);
5185 return bfd_reloc_continue;
5188 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5189 case BFD_RELOC_AARCH64_TLSDESC_CALL:
5190 /* GD->IE/LE relaxation:
5191 add x0, x0, #:tlsdesc_lo12:var => nop
5194 bfd_putl32 (INSN_NOP, contents + rel->r_offset);
5195 return bfd_reloc_ok;
5197 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5198 /* IE->LE relaxation:
5199 adrp xd, :gottprel:var => movz xd, :tprel_g1:var
5203 insn = bfd_getl32 (contents + rel->r_offset);
5204 bfd_putl32 (0xd2a00000 | (insn & 0x1f), contents + rel->r_offset);
5206 return bfd_reloc_continue;
5208 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5209 /* IE->LE relaxation:
5210 ldr xd, [xm, #:gottprel_lo12:var] => movk xd, :tprel_g0_nc:var
5214 insn = bfd_getl32 (contents + rel->r_offset);
5215 bfd_putl32 (0xf2800000 | (insn & 0x1f), contents + rel->r_offset);
5217 return bfd_reloc_continue;
5220 return bfd_reloc_continue;
5223 return bfd_reloc_ok;
5226 /* Relocate an AArch64 ELF section. */
5229 elfNN_aarch64_relocate_section (bfd *output_bfd,
5230 struct bfd_link_info *info,
5232 asection *input_section,
5234 Elf_Internal_Rela *relocs,
5235 Elf_Internal_Sym *local_syms,
5236 asection **local_sections)
5238 Elf_Internal_Shdr *symtab_hdr;
5239 struct elf_link_hash_entry **sym_hashes;
5240 Elf_Internal_Rela *rel;
5241 Elf_Internal_Rela *relend;
5243 struct elf_aarch64_link_hash_table *globals;
5244 bfd_boolean save_addend = FALSE;
5247 globals = elf_aarch64_hash_table (info);
5249 symtab_hdr = &elf_symtab_hdr (input_bfd);
5250 sym_hashes = elf_sym_hashes (input_bfd);
5253 relend = relocs + input_section->reloc_count;
5254 for (; rel < relend; rel++)
5256 unsigned int r_type;
5257 bfd_reloc_code_real_type bfd_r_type;
5258 bfd_reloc_code_real_type relaxed_bfd_r_type;
5259 reloc_howto_type *howto;
5260 unsigned long r_symndx;
5261 Elf_Internal_Sym *sym;
5263 struct elf_link_hash_entry *h;
5265 bfd_reloc_status_type r;
5268 bfd_boolean unresolved_reloc = FALSE;
5269 char *error_message = NULL;
5271 r_symndx = ELFNN_R_SYM (rel->r_info);
5272 r_type = ELFNN_R_TYPE (rel->r_info);
5274 bfd_reloc.howto = elfNN_aarch64_howto_from_type (r_type);
5275 howto = bfd_reloc.howto;
5279 (*_bfd_error_handler)
5280 (_("%B: unrecognized relocation (0x%x) in section `%A'"),
5281 input_bfd, input_section, r_type);
5284 bfd_r_type = elfNN_aarch64_bfd_reloc_from_howto (howto);
5290 if (r_symndx < symtab_hdr->sh_info)
5292 sym = local_syms + r_symndx;
5293 sym_type = ELFNN_ST_TYPE (sym->st_info);
5294 sec = local_sections[r_symndx];
5296 /* An object file might have a reference to a local
5297 undefined symbol. This is a daft object file, but we
5298 should at least do something about it. */
5299 if (r_type != R_AARCH64_NONE && r_type != R_AARCH64_NULL
5300 && bfd_is_und_section (sec)
5301 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
5303 if (!info->callbacks->undefined_symbol
5304 (info, bfd_elf_string_from_elf_section
5305 (input_bfd, symtab_hdr->sh_link, sym->st_name),
5306 input_bfd, input_section, rel->r_offset, TRUE))
5310 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
5312 /* Relocate against local STT_GNU_IFUNC symbol. */
5313 if (!info->relocatable
5314 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
5316 h = elfNN_aarch64_get_local_sym_hash (globals, input_bfd,
5321 /* Set STT_GNU_IFUNC symbol value. */
5322 h->root.u.def.value = sym->st_value;
5323 h->root.u.def.section = sec;
5328 bfd_boolean warned, ignored;
5330 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
5331 r_symndx, symtab_hdr, sym_hashes,
5333 unresolved_reloc, warned, ignored);
5338 if (sec != NULL && discarded_section (sec))
5339 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
5340 rel, 1, relend, howto, 0, contents);
5342 if (info->relocatable)
5346 name = h->root.root.string;
5349 name = (bfd_elf_string_from_elf_section
5350 (input_bfd, symtab_hdr->sh_link, sym->st_name));
5351 if (name == NULL || *name == '\0')
5352 name = bfd_section_name (input_bfd, sec);
5356 && r_type != R_AARCH64_NONE
5357 && r_type != R_AARCH64_NULL
5359 || h->root.type == bfd_link_hash_defined
5360 || h->root.type == bfd_link_hash_defweak)
5361 && IS_AARCH64_TLS_RELOC (bfd_r_type) != (sym_type == STT_TLS))
5363 (*_bfd_error_handler)
5364 ((sym_type == STT_TLS
5365 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
5366 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
5368 input_section, (long) rel->r_offset, howto->name, name);
5371 /* We relax only if we can see that there can be a valid transition
5372 from a reloc type to another.
5373 We call elfNN_aarch64_final_link_relocate unless we're completely
5374 done, i.e., the relaxation produced the final output we want. */
5376 relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
5378 if (relaxed_bfd_r_type != bfd_r_type)
5380 bfd_r_type = relaxed_bfd_r_type;
5381 howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
5382 BFD_ASSERT (howto != NULL);
5383 r_type = howto->type;
5384 r = elfNN_aarch64_tls_relax (globals, input_bfd, contents, rel, h);
5385 unresolved_reloc = 0;
5388 r = bfd_reloc_continue;
5390 /* There may be multiple consecutive relocations for the
5391 same offset. In that case we are supposed to treat the
5392 output of each relocation as the addend for the next. */
5393 if (rel + 1 < relend
5394 && rel->r_offset == rel[1].r_offset
5395 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NONE
5396 && ELFNN_R_TYPE (rel[1].r_info) != R_AARCH64_NULL)
5399 save_addend = FALSE;
5401 if (r == bfd_reloc_continue)
5402 r = elfNN_aarch64_final_link_relocate (howto, input_bfd, output_bfd,
5403 input_section, contents, rel,
5404 relocation, info, sec,
5405 h, &unresolved_reloc,
5406 save_addend, &addend, sym);
5408 switch (elfNN_aarch64_bfd_reloc_from_type (r_type))
5410 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5411 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5412 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5413 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5415 bfd_boolean need_relocs = FALSE;
5420 off = symbol_got_offset (input_bfd, h, r_symndx);
5421 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5424 (info->shared || indx != 0) &&
5426 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5427 || h->root.type != bfd_link_hash_undefweak);
5429 BFD_ASSERT (globals->root.srelgot != NULL);
5433 Elf_Internal_Rela rela;
5434 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPMOD));
5436 rela.r_offset = globals->root.sgot->output_section->vma +
5437 globals->root.sgot->output_offset + off;
5440 loc = globals->root.srelgot->contents;
5441 loc += globals->root.srelgot->reloc_count++
5442 * RELOC_SIZE (htab);
5443 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5447 bfd_put_NN (output_bfd,
5448 relocation - dtpoff_base (info),
5449 globals->root.sgot->contents + off
5454 /* This TLS symbol is global. We emit a
5455 relocation to fixup the tls offset at load
5458 ELFNN_R_INFO (indx, AARCH64_R (TLS_DTPREL));
5461 (globals->root.sgot->output_section->vma
5462 + globals->root.sgot->output_offset + off
5465 loc = globals->root.srelgot->contents;
5466 loc += globals->root.srelgot->reloc_count++
5467 * RELOC_SIZE (globals);
5468 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5469 bfd_put_NN (output_bfd, (bfd_vma) 0,
5470 globals->root.sgot->contents + off
5476 bfd_put_NN (output_bfd, (bfd_vma) 1,
5477 globals->root.sgot->contents + off);
5478 bfd_put_NN (output_bfd,
5479 relocation - dtpoff_base (info),
5480 globals->root.sgot->contents + off
5484 symbol_got_offset_mark (input_bfd, h, r_symndx);
5488 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5489 case BFD_RELOC_AARCH64_TLSIE_LDNN_GOTTPREL_LO12_NC:
5490 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5491 if (! symbol_got_offset_mark_p (input_bfd, h, r_symndx))
5493 bfd_boolean need_relocs = FALSE;
5498 off = symbol_got_offset (input_bfd, h, r_symndx);
5500 indx = h && h->dynindx != -1 ? h->dynindx : 0;
5503 (info->shared || indx != 0) &&
5505 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5506 || h->root.type != bfd_link_hash_undefweak);
5508 BFD_ASSERT (globals->root.srelgot != NULL);
5512 Elf_Internal_Rela rela;
5515 rela.r_addend = relocation - dtpoff_base (info);
5519 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLS_TPREL));
5520 rela.r_offset = globals->root.sgot->output_section->vma +
5521 globals->root.sgot->output_offset + off;
5523 loc = globals->root.srelgot->contents;
5524 loc += globals->root.srelgot->reloc_count++
5525 * RELOC_SIZE (htab);
5527 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5529 bfd_put_NN (output_bfd, rela.r_addend,
5530 globals->root.sgot->contents + off);
5533 bfd_put_NN (output_bfd, relocation - tpoff_base (info),
5534 globals->root.sgot->contents + off);
5536 symbol_got_offset_mark (input_bfd, h, r_symndx);
5540 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5541 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5542 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5543 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5544 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5545 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5546 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5547 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5550 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5551 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5552 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5553 case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
5554 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5555 if (! symbol_tlsdesc_got_offset_mark_p (input_bfd, h, r_symndx))
5557 bfd_boolean need_relocs = FALSE;
5558 int indx = h && h->dynindx != -1 ? h->dynindx : 0;
5559 bfd_vma off = symbol_tlsdesc_got_offset (input_bfd, h, r_symndx);
5561 need_relocs = (h == NULL
5562 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
5563 || h->root.type != bfd_link_hash_undefweak);
5565 BFD_ASSERT (globals->root.srelgot != NULL);
5566 BFD_ASSERT (globals->root.sgot != NULL);
5571 Elf_Internal_Rela rela;
5572 rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
5575 rela.r_offset = (globals->root.sgotplt->output_section->vma
5576 + globals->root.sgotplt->output_offset
5577 + off + globals->sgotplt_jump_table_size);
5580 rela.r_addend = relocation - dtpoff_base (info);
5582 /* Allocate the next available slot in the PLT reloc
5583 section to hold our R_AARCH64_TLSDESC, the next
5584 available slot is determined from reloc_count,
5585 which we step. But note, reloc_count was
5586 artifically moved down while allocating slots for
5587 real PLT relocs such that all of the PLT relocs
5588 will fit above the initial reloc_count and the
5589 extra stuff will fit below. */
5590 loc = globals->root.srelplt->contents;
5591 loc += globals->root.srelplt->reloc_count++
5592 * RELOC_SIZE (globals);
5594 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
5596 bfd_put_NN (output_bfd, (bfd_vma) 0,
5597 globals->root.sgotplt->contents + off +
5598 globals->sgotplt_jump_table_size);
5599 bfd_put_NN (output_bfd, (bfd_vma) 0,
5600 globals->root.sgotplt->contents + off +
5601 globals->sgotplt_jump_table_size +
5605 symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
5616 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
5617 because such sections are not SEC_ALLOC and thus ld.so will
5618 not process them. */
5619 if (unresolved_reloc
5620 && !((input_section->flags & SEC_DEBUGGING) != 0
5622 && _bfd_elf_section_offset (output_bfd, info, input_section,
5623 +rel->r_offset) != (bfd_vma) - 1)
5625 (*_bfd_error_handler)
5627 ("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
5628 input_bfd, input_section, (long) rel->r_offset, howto->name,
5629 h->root.root.string);
5633 if (r != bfd_reloc_ok && r != bfd_reloc_continue)
5637 case bfd_reloc_overflow:
5638 if (!(*info->callbacks->reloc_overflow)
5639 (info, (h ? &h->root : NULL), name, howto->name, (bfd_vma) 0,
5640 input_bfd, input_section, rel->r_offset))
5644 case bfd_reloc_undefined:
5645 if (!((*info->callbacks->undefined_symbol)
5646 (info, name, input_bfd, input_section,
5647 rel->r_offset, TRUE)))
5651 case bfd_reloc_outofrange:
5652 error_message = _("out of range");
5655 case bfd_reloc_notsupported:
5656 error_message = _("unsupported relocation");
5659 case bfd_reloc_dangerous:
5660 /* error_message should already be set. */
5664 error_message = _("unknown error");
5668 BFD_ASSERT (error_message != NULL);
5669 if (!((*info->callbacks->reloc_dangerous)
5670 (info, error_message, input_bfd, input_section,
5681 /* Set the right machine number. */
5684 elfNN_aarch64_object_p (bfd *abfd)
5687 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64_ilp32);
5689 bfd_default_set_arch_mach (abfd, bfd_arch_aarch64, bfd_mach_aarch64);
5694 /* Function to keep AArch64 specific flags in the ELF header. */
5697 elfNN_aarch64_set_private_flags (bfd *abfd, flagword flags)
5699 if (elf_flags_init (abfd) && elf_elfheader (abfd)->e_flags != flags)
5704 elf_elfheader (abfd)->e_flags = flags;
5705 elf_flags_init (abfd) = TRUE;
5711 /* Merge backend specific data from an object file to the output
5712 object file when linking. */
5715 elfNN_aarch64_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
5719 bfd_boolean flags_compatible = TRUE;
5722 /* Check if we have the same endianess. */
5723 if (!_bfd_generic_verify_endian_match (ibfd, obfd))
5726 if (!is_aarch64_elf (ibfd) || !is_aarch64_elf (obfd))
5729 /* The input BFD must have had its flags initialised. */
5730 /* The following seems bogus to me -- The flags are initialized in
5731 the assembler but I don't think an elf_flags_init field is
5732 written into the object. */
5733 /* BFD_ASSERT (elf_flags_init (ibfd)); */
5735 in_flags = elf_elfheader (ibfd)->e_flags;
5736 out_flags = elf_elfheader (obfd)->e_flags;
5738 if (!elf_flags_init (obfd))
5740 /* If the input is the default architecture and had the default
5741 flags then do not bother setting the flags for the output
5742 architecture, instead allow future merges to do this. If no
5743 future merges ever set these flags then they will retain their
5744 uninitialised values, which surprise surprise, correspond
5745 to the default values. */
5746 if (bfd_get_arch_info (ibfd)->the_default
5747 && elf_elfheader (ibfd)->e_flags == 0)
5750 elf_flags_init (obfd) = TRUE;
5751 elf_elfheader (obfd)->e_flags = in_flags;
5753 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
5754 && bfd_get_arch_info (obfd)->the_default)
5755 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
5756 bfd_get_mach (ibfd));
5761 /* Identical flags must be compatible. */
5762 if (in_flags == out_flags)
5765 /* Check to see if the input BFD actually contains any sections. If
5766 not, its flags may not have been initialised either, but it
5767 cannot actually cause any incompatiblity. Do not short-circuit
5768 dynamic objects; their section list may be emptied by
5769 elf_link_add_object_symbols.
5771 Also check to see if there are no code sections in the input.
5772 In this case there is no need to check for code specific flags.
5773 XXX - do we need to worry about floating-point format compatability
5774 in data sections ? */
5775 if (!(ibfd->flags & DYNAMIC))
5777 bfd_boolean null_input_bfd = TRUE;
5778 bfd_boolean only_data_sections = TRUE;
5780 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
5782 if ((bfd_get_section_flags (ibfd, sec)
5783 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5784 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
5785 only_data_sections = FALSE;
5787 null_input_bfd = FALSE;
5791 if (null_input_bfd || only_data_sections)
5795 return flags_compatible;
5798 /* Display the flags field. */
5801 elfNN_aarch64_print_private_bfd_data (bfd *abfd, void *ptr)
5803 FILE *file = (FILE *) ptr;
5804 unsigned long flags;
5806 BFD_ASSERT (abfd != NULL && ptr != NULL);
5808 /* Print normal ELF private data. */
5809 _bfd_elf_print_private_bfd_data (abfd, ptr);
5811 flags = elf_elfheader (abfd)->e_flags;
5812 /* Ignore init flag - it may not be set, despite the flags field
5813 containing valid data. */
5815 /* xgettext:c-format */
5816 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
5819 fprintf (file, _("<Unrecognised flag bits set>"));
5826 /* Update the got entry reference counts for the section being removed. */
5829 elfNN_aarch64_gc_sweep_hook (bfd *abfd,
5830 struct bfd_link_info *info,
5832 const Elf_Internal_Rela * relocs)
5834 struct elf_aarch64_link_hash_table *htab;
5835 Elf_Internal_Shdr *symtab_hdr;
5836 struct elf_link_hash_entry **sym_hashes;
5837 struct elf_aarch64_local_symbol *locals;
5838 const Elf_Internal_Rela *rel, *relend;
5840 if (info->relocatable)
5843 htab = elf_aarch64_hash_table (info);
5848 elf_section_data (sec)->local_dynrel = NULL;
5850 symtab_hdr = &elf_symtab_hdr (abfd);
5851 sym_hashes = elf_sym_hashes (abfd);
5853 locals = elf_aarch64_locals (abfd);
5855 relend = relocs + sec->reloc_count;
5856 for (rel = relocs; rel < relend; rel++)
5858 unsigned long r_symndx;
5859 unsigned int r_type;
5860 struct elf_link_hash_entry *h = NULL;
5862 r_symndx = ELFNN_R_SYM (rel->r_info);
5864 if (r_symndx >= symtab_hdr->sh_info)
5867 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
5868 while (h->root.type == bfd_link_hash_indirect
5869 || h->root.type == bfd_link_hash_warning)
5870 h = (struct elf_link_hash_entry *) h->root.u.i.link;
5874 Elf_Internal_Sym *isym;
5876 /* A local symbol. */
5877 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
5880 /* Check relocation against local STT_GNU_IFUNC symbol. */
5882 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
5884 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel, FALSE);
5892 struct elf_aarch64_link_hash_entry *eh;
5893 struct elf_dyn_relocs **pp;
5894 struct elf_dyn_relocs *p;
5896 eh = (struct elf_aarch64_link_hash_entry *) h;
5898 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
5901 /* Everything must go for SEC. */
5907 r_type = ELFNN_R_TYPE (rel->r_info);
5908 switch (aarch64_tls_transition (abfd,info, r_type, h ,r_symndx))
5910 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
5911 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
5912 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
5913 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
5914 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
5915 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
5916 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
5917 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
5918 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
5919 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
5920 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
5921 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
5922 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
5923 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5924 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
5925 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5926 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5927 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5928 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
5929 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5930 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5931 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5932 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5933 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5934 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5937 if (h->got.refcount > 0)
5938 h->got.refcount -= 1;
5940 if (h->type == STT_GNU_IFUNC)
5942 if (h->plt.refcount > 0)
5943 h->plt.refcount -= 1;
5946 else if (locals != NULL)
5948 if (locals[r_symndx].got_refcount > 0)
5949 locals[r_symndx].got_refcount -= 1;
5953 case BFD_RELOC_AARCH64_CALL26:
5954 case BFD_RELOC_AARCH64_JUMP26:
5955 /* If this is a local symbol then we resolve it
5956 directly without creating a PLT entry. */
5960 if (h->plt.refcount > 0)
5961 h->plt.refcount -= 1;
5964 case BFD_RELOC_AARCH64_MOVW_G0_NC:
5965 case BFD_RELOC_AARCH64_MOVW_G1_NC:
5966 case BFD_RELOC_AARCH64_MOVW_G2_NC:
5967 case BFD_RELOC_AARCH64_MOVW_G3:
5968 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
5969 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
5970 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
5971 case BFD_RELOC_AARCH64_NN:
5972 if (h != NULL && info->executable)
5974 if (h->plt.refcount > 0)
5975 h->plt.refcount -= 1;
5987 /* Adjust a symbol defined by a dynamic object and referenced by a
5988 regular object. The current definition is in some section of the
5989 dynamic object, but we're not including those sections. We have to
5990 change the definition to something the rest of the link can
5994 elfNN_aarch64_adjust_dynamic_symbol (struct bfd_link_info *info,
5995 struct elf_link_hash_entry *h)
5997 struct elf_aarch64_link_hash_table *htab;
6000 /* If this is a function, put it in the procedure linkage table. We
6001 will fill in the contents of the procedure linkage table later,
6002 when we know the address of the .got section. */
6003 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
6005 if (h->plt.refcount <= 0
6006 || (h->type != STT_GNU_IFUNC
6007 && (SYMBOL_CALLS_LOCAL (info, h)
6008 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
6009 && h->root.type == bfd_link_hash_undefweak))))
6011 /* This case can occur if we saw a CALL26 reloc in
6012 an input file, but the symbol wasn't referred to
6013 by a dynamic object or all references were
6014 garbage collected. In which case we can end up
6016 h->plt.offset = (bfd_vma) - 1;
6023 /* Otherwise, reset to -1. */
6024 h->plt.offset = (bfd_vma) - 1;
6027 /* If this is a weak symbol, and there is a real definition, the
6028 processor independent code will have arranged for us to see the
6029 real definition first, and we can just use the same value. */
6030 if (h->u.weakdef != NULL)
6032 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
6033 || h->u.weakdef->root.type == bfd_link_hash_defweak);
6034 h->root.u.def.section = h->u.weakdef->root.u.def.section;
6035 h->root.u.def.value = h->u.weakdef->root.u.def.value;
6036 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc)
6037 h->non_got_ref = h->u.weakdef->non_got_ref;
6041 /* If we are creating a shared library, we must presume that the
6042 only references to the symbol are via the global offset table.
6043 For such cases we need not do anything here; the relocations will
6044 be handled correctly by relocate_section. */
6048 /* If there are no references to this symbol that do not use the
6049 GOT, we don't need to generate a copy reloc. */
6050 if (!h->non_got_ref)
6053 /* If -z nocopyreloc was given, we won't generate them either. */
6054 if (info->nocopyreloc)
6060 /* We must allocate the symbol in our .dynbss section, which will
6061 become part of the .bss section of the executable. There will be
6062 an entry for this symbol in the .dynsym section. The dynamic
6063 object will contain position independent code, so all references
6064 from the dynamic object to this symbol will go through the global
6065 offset table. The dynamic linker will use the .dynsym entry to
6066 determine the address it must put in the global offset table, so
6067 both the dynamic object and the regular object will refer to the
6068 same memory location for the variable. */
6070 htab = elf_aarch64_hash_table (info);
6072 /* We must generate a R_AARCH64_COPY reloc to tell the dynamic linker
6073 to copy the initial value out of the dynamic object and into the
6074 runtime process image. */
6075 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
6077 htab->srelbss->size += RELOC_SIZE (htab);
6083 return _bfd_elf_adjust_dynamic_copy (info, h, s);
6088 elfNN_aarch64_allocate_local_symbols (bfd *abfd, unsigned number)
6090 struct elf_aarch64_local_symbol *locals;
6091 locals = elf_aarch64_locals (abfd);
6094 locals = (struct elf_aarch64_local_symbol *)
6095 bfd_zalloc (abfd, number * sizeof (struct elf_aarch64_local_symbol));
6098 elf_aarch64_locals (abfd) = locals;
6103 /* Create the .got section to hold the global offset table. */
6106 aarch64_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
6108 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
6111 struct elf_link_hash_entry *h;
6112 struct elf_link_hash_table *htab = elf_hash_table (info);
6114 /* This function may be called more than once. */
6115 s = bfd_get_linker_section (abfd, ".got");
6119 flags = bed->dynamic_sec_flags;
6121 s = bfd_make_section_anyway_with_flags (abfd,
6122 (bed->rela_plts_and_copies_p
6123 ? ".rela.got" : ".rel.got"),
6124 (bed->dynamic_sec_flags
6127 || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6131 s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
6133 || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
6136 htab->sgot->size += GOT_ENTRY_SIZE;
6138 if (bed->want_got_sym)
6140 /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
6141 (or .got.plt) section. We don't do this in the linker script
6142 because we don't want to define the symbol if we are not creating
6143 a global offset table. */
6144 h = _bfd_elf_define_linkage_sym (abfd, info, s,
6145 "_GLOBAL_OFFSET_TABLE_");
6146 elf_hash_table (info)->hgot = h;
6151 if (bed->want_got_plt)
6153 s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
6155 || !bfd_set_section_alignment (abfd, s,
6156 bed->s->log_file_align))
6161 /* The first bit of the global offset table is the header. */
6162 s->size += bed->got_header_size;
6167 /* Look through the relocs for a section during the first phase. */
6170 elfNN_aarch64_check_relocs (bfd *abfd, struct bfd_link_info *info,
6171 asection *sec, const Elf_Internal_Rela *relocs)
6173 Elf_Internal_Shdr *symtab_hdr;
6174 struct elf_link_hash_entry **sym_hashes;
6175 const Elf_Internal_Rela *rel;
6176 const Elf_Internal_Rela *rel_end;
6179 struct elf_aarch64_link_hash_table *htab;
6181 if (info->relocatable)
6184 BFD_ASSERT (is_aarch64_elf (abfd));
6186 htab = elf_aarch64_hash_table (info);
6189 symtab_hdr = &elf_symtab_hdr (abfd);
6190 sym_hashes = elf_sym_hashes (abfd);
6192 rel_end = relocs + sec->reloc_count;
6193 for (rel = relocs; rel < rel_end; rel++)
6195 struct elf_link_hash_entry *h;
6196 unsigned long r_symndx;
6197 unsigned int r_type;
6198 bfd_reloc_code_real_type bfd_r_type;
6199 Elf_Internal_Sym *isym;
6201 r_symndx = ELFNN_R_SYM (rel->r_info);
6202 r_type = ELFNN_R_TYPE (rel->r_info);
6204 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
6206 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
6211 if (r_symndx < symtab_hdr->sh_info)
6213 /* A local symbol. */
6214 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6219 /* Check relocation against local STT_GNU_IFUNC symbol. */
6220 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
6222 h = elfNN_aarch64_get_local_sym_hash (htab, abfd, rel,
6227 /* Fake a STT_GNU_IFUNC symbol. */
6228 h->type = STT_GNU_IFUNC;
6231 h->forced_local = 1;
6232 h->root.type = bfd_link_hash_defined;
6239 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
6240 while (h->root.type == bfd_link_hash_indirect
6241 || h->root.type == bfd_link_hash_warning)
6242 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6244 /* PR15323, ref flags aren't set for references in the same
6246 h->root.non_ir_ref = 1;
6249 /* Could be done earlier, if h were already available. */
6250 bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
6254 /* Create the ifunc sections for static executables. If we
6255 never see an indirect function symbol nor we are building
6256 a static executable, those sections will be empty and
6257 won't appear in output. */
6263 case BFD_RELOC_AARCH64_NN:
6264 case BFD_RELOC_AARCH64_CALL26:
6265 case BFD_RELOC_AARCH64_JUMP26:
6266 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6267 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6268 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6269 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6270 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6271 case BFD_RELOC_AARCH64_ADD_LO12:
6272 if (htab->root.dynobj == NULL)
6273 htab->root.dynobj = abfd;
6274 if (!_bfd_elf_create_ifunc_sections (htab->root.dynobj, info))
6279 /* It is referenced by a non-shared object. */
6281 h->root.non_ir_ref = 1;
6286 case BFD_RELOC_AARCH64_NN:
6288 /* We don't need to handle relocs into sections not going into
6289 the "real" output. */
6290 if ((sec->flags & SEC_ALLOC) == 0)
6298 h->plt.refcount += 1;
6299 h->pointer_equality_needed = 1;
6302 /* No need to do anything if we're not creating a shared
6308 struct elf_dyn_relocs *p;
6309 struct elf_dyn_relocs **head;
6311 /* We must copy these reloc types into the output file.
6312 Create a reloc section in dynobj and make room for
6316 if (htab->root.dynobj == NULL)
6317 htab->root.dynobj = abfd;
6319 sreloc = _bfd_elf_make_dynamic_reloc_section
6320 (sec, htab->root.dynobj, LOG_FILE_ALIGN, abfd, /*rela? */ TRUE);
6326 /* If this is a global symbol, we count the number of
6327 relocations we need for this symbol. */
6330 struct elf_aarch64_link_hash_entry *eh;
6331 eh = (struct elf_aarch64_link_hash_entry *) h;
6332 head = &eh->dyn_relocs;
6336 /* Track dynamic relocs needed for local syms too.
6337 We really need local syms available to do this
6343 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
6348 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
6352 /* Beware of type punned pointers vs strict aliasing
6354 vpp = &(elf_section_data (s)->local_dynrel);
6355 head = (struct elf_dyn_relocs **) vpp;
6359 if (p == NULL || p->sec != sec)
6361 bfd_size_type amt = sizeof *p;
6362 p = ((struct elf_dyn_relocs *)
6363 bfd_zalloc (htab->root.dynobj, amt));
6376 /* RR: We probably want to keep a consistency check that
6377 there are no dangling GOT_PAGE relocs. */
6378 case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
6379 case BFD_RELOC_AARCH64_GOT_LD_PREL19:
6380 case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
6381 case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
6382 case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
6383 case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
6384 case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
6385 case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
6386 case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
6387 case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
6388 case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
6389 case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
6390 case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
6391 case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6392 case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
6393 case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6394 case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
6395 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6396 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
6397 case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6398 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
6399 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6400 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
6401 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6402 case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
6405 unsigned old_got_type;
6407 got_type = aarch64_reloc_got_type (bfd_r_type);
6411 h->got.refcount += 1;
6412 old_got_type = elf_aarch64_hash_entry (h)->got_type;
6416 struct elf_aarch64_local_symbol *locals;
6418 if (!elfNN_aarch64_allocate_local_symbols
6419 (abfd, symtab_hdr->sh_info))
6422 locals = elf_aarch64_locals (abfd);
6423 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6424 locals[r_symndx].got_refcount += 1;
6425 old_got_type = locals[r_symndx].got_type;
6428 /* If a variable is accessed with both general dynamic TLS
6429 methods, two slots may be created. */
6430 if (GOT_TLS_GD_ANY_P (old_got_type) && GOT_TLS_GD_ANY_P (got_type))
6431 got_type |= old_got_type;
6433 /* We will already have issued an error message if there
6434 is a TLS/non-TLS mismatch, based on the symbol type.
6435 So just combine any TLS types needed. */
6436 if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
6437 && got_type != GOT_NORMAL)
6438 got_type |= old_got_type;
6440 /* If the symbol is accessed by both IE and GD methods, we
6441 are able to relax. Turn off the GD flag, without
6442 messing up with any other kind of TLS types that may be
6444 if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
6445 got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
6447 if (old_got_type != got_type)
6450 elf_aarch64_hash_entry (h)->got_type = got_type;
6453 struct elf_aarch64_local_symbol *locals;
6454 locals = elf_aarch64_locals (abfd);
6455 BFD_ASSERT (r_symndx < symtab_hdr->sh_info);
6456 locals[r_symndx].got_type = got_type;
6460 if (htab->root.dynobj == NULL)
6461 htab->root.dynobj = abfd;
6462 if (! aarch64_elf_create_got_section (htab->root.dynobj, info))
6467 case BFD_RELOC_AARCH64_MOVW_G0_NC:
6468 case BFD_RELOC_AARCH64_MOVW_G1_NC:
6469 case BFD_RELOC_AARCH64_MOVW_G2_NC:
6470 case BFD_RELOC_AARCH64_MOVW_G3:
6473 int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
6474 (*_bfd_error_handler)
6475 (_("%B: relocation %s against `%s' can not be used when making "
6476 "a shared object; recompile with -fPIC"),
6477 abfd, elfNN_aarch64_howto_table[howto_index].name,
6478 (h) ? h->root.root.string : "a local symbol");
6479 bfd_set_error (bfd_error_bad_value);
6483 case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
6484 case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
6485 case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
6486 if (h != NULL && info->executable)
6488 /* If this reloc is in a read-only section, we might
6489 need a copy reloc. We can't check reliably at this
6490 stage whether the section is read-only, as input
6491 sections have not yet been mapped to output sections.
6492 Tentatively set the flag for now, and correct in
6493 adjust_dynamic_symbol. */
6495 h->plt.refcount += 1;
6496 h->pointer_equality_needed = 1;
6498 /* FIXME:: RR need to handle these in shared libraries
6499 and essentially bomb out as these being non-PIC
6500 relocations in shared libraries. */
6503 case BFD_RELOC_AARCH64_CALL26:
6504 case BFD_RELOC_AARCH64_JUMP26:
6505 /* If this is a local symbol then we resolve it
6506 directly without creating a PLT entry. */
6511 if (h->plt.refcount <= 0)
6512 h->plt.refcount = 1;
6514 h->plt.refcount += 1;
6525 /* Treat mapping symbols as special target symbols. */
6528 elfNN_aarch64_is_target_special_symbol (bfd *abfd ATTRIBUTE_UNUSED,
6531 return bfd_is_aarch64_special_symbol_name (sym->name,
6532 BFD_AARCH64_SPECIAL_SYM_TYPE_ANY);
6535 /* This is a copy of elf_find_function () from elf.c except that
6536 AArch64 mapping symbols are ignored when looking for function names. */
6539 aarch64_elf_find_function (bfd *abfd ATTRIBUTE_UNUSED,
6543 const char **filename_ptr,
6544 const char **functionname_ptr)
6546 const char *filename = NULL;
6547 asymbol *func = NULL;
6548 bfd_vma low_func = 0;
6551 for (p = symbols; *p != NULL; p++)
6555 q = (elf_symbol_type *) * p;
6557 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
6562 filename = bfd_asymbol_name (&q->symbol);
6566 /* Skip mapping symbols. */
6567 if ((q->symbol.flags & BSF_LOCAL)
6568 && (bfd_is_aarch64_special_symbol_name
6569 (q->symbol.name, BFD_AARCH64_SPECIAL_SYM_TYPE_ANY)))
6572 if (bfd_get_section (&q->symbol) == section
6573 && q->symbol.value >= low_func && q->symbol.value <= offset)
6575 func = (asymbol *) q;
6576 low_func = q->symbol.value;
6586 *filename_ptr = filename;
6587 if (functionname_ptr)
6588 *functionname_ptr = bfd_asymbol_name (func);
6594 /* Find the nearest line to a particular section and offset, for error
6595 reporting. This code is a duplicate of the code in elf.c, except
6596 that it uses aarch64_elf_find_function. */
6599 elfNN_aarch64_find_nearest_line (bfd *abfd,
6603 const char **filename_ptr,
6604 const char **functionname_ptr,
6605 unsigned int *line_ptr,
6606 unsigned int *discriminator_ptr)
6608 bfd_boolean found = FALSE;
6610 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
6611 filename_ptr, functionname_ptr,
6612 line_ptr, discriminator_ptr,
6613 dwarf_debug_sections, 0,
6614 &elf_tdata (abfd)->dwarf2_find_line_info))
6616 if (!*functionname_ptr)
6617 aarch64_elf_find_function (abfd, symbols, section, offset,
6618 *filename_ptr ? NULL : filename_ptr,
6624 /* Skip _bfd_dwarf1_find_nearest_line since no known AArch64
6625 toolchain uses DWARF1. */
6627 if (!_bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
6628 &found, filename_ptr,
6629 functionname_ptr, line_ptr,
6630 &elf_tdata (abfd)->line_info))
6633 if (found && (*functionname_ptr || *line_ptr))
6636 if (symbols == NULL)
6639 if (!aarch64_elf_find_function (abfd, symbols, section, offset,
6640 filename_ptr, functionname_ptr))
6648 elfNN_aarch64_find_inliner_info (bfd *abfd,
6649 const char **filename_ptr,
6650 const char **functionname_ptr,
6651 unsigned int *line_ptr)
6654 found = _bfd_dwarf2_find_inliner_info
6655 (abfd, filename_ptr,
6656 functionname_ptr, line_ptr, &elf_tdata (abfd)->dwarf2_find_line_info);
6662 elfNN_aarch64_post_process_headers (bfd *abfd,
6663 struct bfd_link_info *link_info)
6665 Elf_Internal_Ehdr *i_ehdrp; /* ELF file header, internal form. */
6667 i_ehdrp = elf_elfheader (abfd);
6668 i_ehdrp->e_ident[EI_ABIVERSION] = AARCH64_ELF_ABI_VERSION;
6670 _bfd_elf_post_process_headers (abfd, link_info);
6673 static enum elf_reloc_type_class
6674 elfNN_aarch64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
6675 const asection *rel_sec ATTRIBUTE_UNUSED,
6676 const Elf_Internal_Rela *rela)
6678 switch ((int) ELFNN_R_TYPE (rela->r_info))
6680 case AARCH64_R (RELATIVE):
6681 return reloc_class_relative;
6682 case AARCH64_R (JUMP_SLOT):
6683 return reloc_class_plt;
6684 case AARCH64_R (COPY):
6685 return reloc_class_copy;
6687 return reloc_class_normal;
6691 /* Handle an AArch64 specific section when reading an object file. This is
6692 called when bfd_section_from_shdr finds a section with an unknown
6696 elfNN_aarch64_section_from_shdr (bfd *abfd,
6697 Elf_Internal_Shdr *hdr,
6698 const char *name, int shindex)
6700 /* There ought to be a place to keep ELF backend specific flags, but
6701 at the moment there isn't one. We just keep track of the
6702 sections by their name, instead. Fortunately, the ABI gives
6703 names for all the AArch64 specific sections, so we will probably get
6705 switch (hdr->sh_type)
6707 case SHT_AARCH64_ATTRIBUTES:
6714 if (!_bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
6720 /* A structure used to record a list of sections, independently
6721 of the next and prev fields in the asection structure. */
6722 typedef struct section_list
6725 struct section_list *next;
6726 struct section_list *prev;
6730 /* Unfortunately we need to keep a list of sections for which
6731 an _aarch64_elf_section_data structure has been allocated. This
6732 is because it is possible for functions like elfNN_aarch64_write_section
6733 to be called on a section which has had an elf_data_structure
6734 allocated for it (and so the used_by_bfd field is valid) but
6735 for which the AArch64 extended version of this structure - the
6736 _aarch64_elf_section_data structure - has not been allocated. */
6737 static section_list *sections_with_aarch64_elf_section_data = NULL;
6740 record_section_with_aarch64_elf_section_data (asection *sec)
6742 struct section_list *entry;
6744 entry = bfd_malloc (sizeof (*entry));
6748 entry->next = sections_with_aarch64_elf_section_data;
6750 if (entry->next != NULL)
6751 entry->next->prev = entry;
6752 sections_with_aarch64_elf_section_data = entry;
6755 static struct section_list *
6756 find_aarch64_elf_section_entry (asection *sec)
6758 struct section_list *entry;
6759 static struct section_list *last_entry = NULL;
6761 /* This is a short cut for the typical case where the sections are added
6762 to the sections_with_aarch64_elf_section_data list in forward order and
6763 then looked up here in backwards order. This makes a real difference
6764 to the ld-srec/sec64k.exp linker test. */
6765 entry = sections_with_aarch64_elf_section_data;
6766 if (last_entry != NULL)
6768 if (last_entry->sec == sec)
6770 else if (last_entry->next != NULL && last_entry->next->sec == sec)
6771 entry = last_entry->next;
6774 for (; entry; entry = entry->next)
6775 if (entry->sec == sec)
6779 /* Record the entry prior to this one - it is the entry we are
6780 most likely to want to locate next time. Also this way if we
6781 have been called from
6782 unrecord_section_with_aarch64_elf_section_data () we will not
6783 be caching a pointer that is about to be freed. */
6784 last_entry = entry->prev;
6790 unrecord_section_with_aarch64_elf_section_data (asection *sec)
6792 struct section_list *entry;
6794 entry = find_aarch64_elf_section_entry (sec);
6798 if (entry->prev != NULL)
6799 entry->prev->next = entry->next;
6800 if (entry->next != NULL)
6801 entry->next->prev = entry->prev;
6802 if (entry == sections_with_aarch64_elf_section_data)
6803 sections_with_aarch64_elf_section_data = entry->next;
6812 struct bfd_link_info *info;
6815 int (*func) (void *, const char *, Elf_Internal_Sym *,
6816 asection *, struct elf_link_hash_entry *);
6817 } output_arch_syminfo;
6819 enum map_symbol_type
6826 /* Output a single mapping symbol. */
6829 elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
6830 enum map_symbol_type type, bfd_vma offset)
6832 static const char *names[2] = { "$x", "$d" };
6833 Elf_Internal_Sym sym;
6835 sym.st_value = (osi->sec->output_section->vma
6836 + osi->sec->output_offset + offset);
6839 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6840 sym.st_shndx = osi->sec_shndx;
6841 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
6846 /* Output mapping symbols for PLT entries associated with H. */
6849 elfNN_aarch64_output_plt_map (struct elf_link_hash_entry *h, void *inf)
6851 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
6854 if (h->root.type == bfd_link_hash_indirect)
6857 if (h->root.type == bfd_link_hash_warning)
6858 /* When warning symbols are created, they **replace** the "real"
6859 entry in the hash table, thus we never get to see the real
6860 symbol in a hash traversal. So look at it now. */
6861 h = (struct elf_link_hash_entry *) h->root.u.i.link;
6863 if (h->plt.offset == (bfd_vma) - 1)
6866 addr = h->plt.offset;
6869 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6876 /* Output a single local symbol for a generated stub. */
6879 elfNN_aarch64_output_stub_sym (output_arch_syminfo *osi, const char *name,
6880 bfd_vma offset, bfd_vma size)
6882 Elf_Internal_Sym sym;
6884 sym.st_value = (osi->sec->output_section->vma
6885 + osi->sec->output_offset + offset);
6888 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6889 sym.st_shndx = osi->sec_shndx;
6890 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
6894 aarch64_map_one_stub (struct bfd_hash_entry *gen_entry, void *in_arg)
6896 struct elf_aarch64_stub_hash_entry *stub_entry;
6900 output_arch_syminfo *osi;
6902 /* Massage our args to the form they really have. */
6903 stub_entry = (struct elf_aarch64_stub_hash_entry *) gen_entry;
6904 osi = (output_arch_syminfo *) in_arg;
6906 stub_sec = stub_entry->stub_sec;
6908 /* Ensure this stub is attached to the current section being
6910 if (stub_sec != osi->sec)
6913 addr = (bfd_vma) stub_entry->stub_offset;
6915 stub_name = stub_entry->output_name;
6917 switch (stub_entry->stub_type)
6919 case aarch64_stub_adrp_branch:
6920 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6921 sizeof (aarch64_adrp_branch_stub)))
6923 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6926 case aarch64_stub_long_branch:
6927 if (!elfNN_aarch64_output_stub_sym
6928 (osi, stub_name, addr, sizeof (aarch64_long_branch_stub)))
6930 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6932 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_DATA, addr + 16))
6935 case aarch64_stub_erratum_835769_veneer:
6936 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6937 sizeof (aarch64_erratum_835769_stub)))
6939 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6942 case aarch64_stub_erratum_843419_veneer:
6943 if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
6944 sizeof (aarch64_erratum_843419_stub)))
6946 if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
6957 /* Output mapping symbols for linker generated sections. */
6960 elfNN_aarch64_output_arch_local_syms (bfd *output_bfd,
6961 struct bfd_link_info *info,
6963 int (*func) (void *, const char *,
6966 struct elf_link_hash_entry
6969 output_arch_syminfo osi;
6970 struct elf_aarch64_link_hash_table *htab;
6972 htab = elf_aarch64_hash_table (info);
6978 /* Long calls stubs. */
6979 if (htab->stub_bfd && htab->stub_bfd->sections)
6983 for (stub_sec = htab->stub_bfd->sections;
6984 stub_sec != NULL; stub_sec = stub_sec->next)
6986 /* Ignore non-stub sections. */
6987 if (!strstr (stub_sec->name, STUB_SUFFIX))
6992 osi.sec_shndx = _bfd_elf_section_from_bfd_section
6993 (output_bfd, osi.sec->output_section);
6995 /* The first instruction in a stub is always a branch. */
6996 if (!elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0))
6999 bfd_hash_traverse (&htab->stub_hash_table, aarch64_map_one_stub,
7004 /* Finally, output mapping symbols for the PLT. */
7005 if (!htab->root.splt || htab->root.splt->size == 0)
7008 /* For now live without mapping symbols for the plt. */
7009 osi.sec_shndx = _bfd_elf_section_from_bfd_section
7010 (output_bfd, htab->root.splt->output_section);
7011 osi.sec = htab->root.splt;
7013 elf_link_hash_traverse (&htab->root, elfNN_aarch64_output_plt_map,
7020 /* Allocate target specific section data. */
7023 elfNN_aarch64_new_section_hook (bfd *abfd, asection *sec)
7025 if (!sec->used_by_bfd)
7027 _aarch64_elf_section_data *sdata;
7028 bfd_size_type amt = sizeof (*sdata);
7030 sdata = bfd_zalloc (abfd, amt);
7033 sec->used_by_bfd = sdata;
7036 record_section_with_aarch64_elf_section_data (sec);
7038 return _bfd_elf_new_section_hook (abfd, sec);
7043 unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
7045 void *ignore ATTRIBUTE_UNUSED)
7047 unrecord_section_with_aarch64_elf_section_data (sec);
7051 elfNN_aarch64_close_and_cleanup (bfd *abfd)
7054 bfd_map_over_sections (abfd,
7055 unrecord_section_via_map_over_sections, NULL);
7057 return _bfd_elf_close_and_cleanup (abfd);
7061 elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
7064 bfd_map_over_sections (abfd,
7065 unrecord_section_via_map_over_sections, NULL);
7067 return _bfd_free_cached_info (abfd);
7070 /* Create dynamic sections. This is different from the ARM backend in that
7071 the got, plt, gotplt and their relocation sections are all created in the
7072 standard part of the bfd elf backend. */
7075 elfNN_aarch64_create_dynamic_sections (bfd *dynobj,
7076 struct bfd_link_info *info)
7078 struct elf_aarch64_link_hash_table *htab;
7080 /* We need to create .got section. */
7081 if (!aarch64_elf_create_got_section (dynobj, info))
7084 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
7087 htab = elf_aarch64_hash_table (info);
7088 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
7090 htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
7092 if (!htab->sdynbss || (!info->shared && !htab->srelbss))
7099 /* Allocate space in .plt, .got and associated reloc sections for
7103 elfNN_aarch64_allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
7105 struct bfd_link_info *info;
7106 struct elf_aarch64_link_hash_table *htab;
7107 struct elf_aarch64_link_hash_entry *eh;
7108 struct elf_dyn_relocs *p;
7110 /* An example of a bfd_link_hash_indirect symbol is versioned
7111 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7112 -> __gxx_personality_v0(bfd_link_hash_defined)
7114 There is no need to process bfd_link_hash_indirect symbols here
7115 because we will also be presented with the concrete instance of
7116 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7117 called to copy all relevant data from the generic to the concrete
7120 if (h->root.type == bfd_link_hash_indirect)
7123 if (h->root.type == bfd_link_hash_warning)
7124 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7126 info = (struct bfd_link_info *) inf;
7127 htab = elf_aarch64_hash_table (info);
7129 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7130 here if it is defined and referenced in a non-shared object. */
7131 if (h->type == STT_GNU_IFUNC
7134 else if (htab->root.dynamic_sections_created && h->plt.refcount > 0)
7136 /* Make sure this symbol is output as a dynamic symbol.
7137 Undefined weak syms won't yet be marked as dynamic. */
7138 if (h->dynindx == -1 && !h->forced_local)
7140 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7144 if (info->shared || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
7146 asection *s = htab->root.splt;
7148 /* If this is the first .plt entry, make room for the special
7151 s->size += htab->plt_header_size;
7153 h->plt.offset = s->size;
7155 /* If this symbol is not defined in a regular file, and we are
7156 not generating a shared library, then set the symbol to this
7157 location in the .plt. This is required to make function
7158 pointers compare as equal between the normal executable and
7159 the shared library. */
7160 if (!info->shared && !h->def_regular)
7162 h->root.u.def.section = s;
7163 h->root.u.def.value = h->plt.offset;
7166 /* Make room for this entry. For now we only create the
7167 small model PLT entries. We later need to find a way
7168 of relaxing into these from the large model PLT entries. */
7169 s->size += PLT_SMALL_ENTRY_SIZE;
7171 /* We also need to make an entry in the .got.plt section, which
7172 will be placed in the .got section by the linker script. */
7173 htab->root.sgotplt->size += GOT_ENTRY_SIZE;
7175 /* We also need to make an entry in the .rela.plt section. */
7176 htab->root.srelplt->size += RELOC_SIZE (htab);
7178 /* We need to ensure that all GOT entries that serve the PLT
7179 are consecutive with the special GOT slots [0] [1] and
7180 [2]. Any addtional relocations, such as
7181 R_AARCH64_TLSDESC, must be placed after the PLT related
7182 entries. We abuse the reloc_count such that during
7183 sizing we adjust reloc_count to indicate the number of
7184 PLT related reserved entries. In subsequent phases when
7185 filling in the contents of the reloc entries, PLT related
7186 entries are placed by computing their PLT index (0
7187 .. reloc_count). While other none PLT relocs are placed
7188 at the slot indicated by reloc_count and reloc_count is
7191 htab->root.srelplt->reloc_count++;
7195 h->plt.offset = (bfd_vma) - 1;
7201 h->plt.offset = (bfd_vma) - 1;
7205 eh = (struct elf_aarch64_link_hash_entry *) h;
7206 eh->tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7208 if (h->got.refcount > 0)
7211 unsigned got_type = elf_aarch64_hash_entry (h)->got_type;
7213 h->got.offset = (bfd_vma) - 1;
7215 dyn = htab->root.dynamic_sections_created;
7217 /* Make sure this symbol is output as a dynamic symbol.
7218 Undefined weak syms won't yet be marked as dynamic. */
7219 if (dyn && h->dynindx == -1 && !h->forced_local)
7221 if (!bfd_elf_link_record_dynamic_symbol (info, h))
7225 if (got_type == GOT_UNKNOWN)
7228 else if (got_type == GOT_NORMAL)
7230 h->got.offset = htab->root.sgot->size;
7231 htab->root.sgot->size += GOT_ENTRY_SIZE;
7232 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7233 || h->root.type != bfd_link_hash_undefweak)
7235 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7237 htab->root.srelgot->size += RELOC_SIZE (htab);
7243 if (got_type & GOT_TLSDESC_GD)
7245 eh->tlsdesc_got_jump_table_offset =
7246 (htab->root.sgotplt->size
7247 - aarch64_compute_jump_table_size (htab));
7248 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7249 h->got.offset = (bfd_vma) - 2;
7252 if (got_type & GOT_TLS_GD)
7254 h->got.offset = htab->root.sgot->size;
7255 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7258 if (got_type & GOT_TLS_IE)
7260 h->got.offset = htab->root.sgot->size;
7261 htab->root.sgot->size += GOT_ENTRY_SIZE;
7264 indx = h && h->dynindx != -1 ? h->dynindx : 0;
7265 if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
7266 || h->root.type != bfd_link_hash_undefweak)
7269 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
7271 if (got_type & GOT_TLSDESC_GD)
7273 htab->root.srelplt->size += RELOC_SIZE (htab);
7274 /* Note reloc_count not incremented here! We have
7275 already adjusted reloc_count for this relocation
7278 /* TLSDESC PLT is now needed, but not yet determined. */
7279 htab->tlsdesc_plt = (bfd_vma) - 1;
7282 if (got_type & GOT_TLS_GD)
7283 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7285 if (got_type & GOT_TLS_IE)
7286 htab->root.srelgot->size += RELOC_SIZE (htab);
7292 h->got.offset = (bfd_vma) - 1;
7295 if (eh->dyn_relocs == NULL)
7298 /* In the shared -Bsymbolic case, discard space allocated for
7299 dynamic pc-relative relocs against symbols which turn out to be
7300 defined in regular objects. For the normal shared case, discard
7301 space for pc-relative relocs that have become local due to symbol
7302 visibility changes. */
7306 /* Relocs that use pc_count are those that appear on a call
7307 insn, or certain REL relocs that can generated via assembly.
7308 We want calls to protected symbols to resolve directly to the
7309 function rather than going via the plt. If people want
7310 function pointer comparisons to work as expected then they
7311 should avoid writing weird assembly. */
7312 if (SYMBOL_CALLS_LOCAL (info, h))
7314 struct elf_dyn_relocs **pp;
7316 for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
7318 p->count -= p->pc_count;
7327 /* Also discard relocs on undefined weak syms with non-default
7329 if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
7331 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7332 eh->dyn_relocs = NULL;
7334 /* Make sure undefined weak symbols are output as a dynamic
7336 else if (h->dynindx == -1
7338 && !bfd_elf_link_record_dynamic_symbol (info, h))
7343 else if (ELIMINATE_COPY_RELOCS)
7345 /* For the non-shared case, discard space for relocs against
7346 symbols which turn out to need copy relocs or are not
7352 || (htab->root.dynamic_sections_created
7353 && (h->root.type == bfd_link_hash_undefweak
7354 || h->root.type == bfd_link_hash_undefined))))
7356 /* Make sure this symbol is output as a dynamic symbol.
7357 Undefined weak syms won't yet be marked as dynamic. */
7358 if (h->dynindx == -1
7360 && !bfd_elf_link_record_dynamic_symbol (info, h))
7363 /* If that succeeded, we know we'll be keeping all the
7365 if (h->dynindx != -1)
7369 eh->dyn_relocs = NULL;
7374 /* Finally, allocate space. */
7375 for (p = eh->dyn_relocs; p != NULL; p = p->next)
7379 sreloc = elf_section_data (p->sec)->sreloc;
7381 BFD_ASSERT (sreloc != NULL);
7383 sreloc->size += p->count * RELOC_SIZE (htab);
7389 /* Allocate space in .plt, .got and associated reloc sections for
7390 ifunc dynamic relocs. */
7393 elfNN_aarch64_allocate_ifunc_dynrelocs (struct elf_link_hash_entry *h,
7396 struct bfd_link_info *info;
7397 struct elf_aarch64_link_hash_table *htab;
7398 struct elf_aarch64_link_hash_entry *eh;
7400 /* An example of a bfd_link_hash_indirect symbol is versioned
7401 symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
7402 -> __gxx_personality_v0(bfd_link_hash_defined)
7404 There is no need to process bfd_link_hash_indirect symbols here
7405 because we will also be presented with the concrete instance of
7406 the symbol and elfNN_aarch64_copy_indirect_symbol () will have been
7407 called to copy all relevant data from the generic to the concrete
7410 if (h->root.type == bfd_link_hash_indirect)
7413 if (h->root.type == bfd_link_hash_warning)
7414 h = (struct elf_link_hash_entry *) h->root.u.i.link;
7416 info = (struct bfd_link_info *) inf;
7417 htab = elf_aarch64_hash_table (info);
7419 eh = (struct elf_aarch64_link_hash_entry *) h;
7421 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
7422 here if it is defined and referenced in a non-shared object. */
7423 if (h->type == STT_GNU_IFUNC
7425 return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
7427 htab->plt_entry_size,
7428 htab->plt_header_size,
7433 /* Allocate space in .plt, .got and associated reloc sections for
7434 local dynamic relocs. */
7437 elfNN_aarch64_allocate_local_dynrelocs (void **slot, void *inf)
7439 struct elf_link_hash_entry *h
7440 = (struct elf_link_hash_entry *) *slot;
7442 if (h->type != STT_GNU_IFUNC
7446 || h->root.type != bfd_link_hash_defined)
7449 return elfNN_aarch64_allocate_dynrelocs (h, inf);
7452 /* Allocate space in .plt, .got and associated reloc sections for
7453 local ifunc dynamic relocs. */
7456 elfNN_aarch64_allocate_local_ifunc_dynrelocs (void **slot, void *inf)
7458 struct elf_link_hash_entry *h
7459 = (struct elf_link_hash_entry *) *slot;
7461 if (h->type != STT_GNU_IFUNC
7465 || h->root.type != bfd_link_hash_defined)
7468 return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
7471 /* This is the most important function of all . Innocuosly named
7474 elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
7475 struct bfd_link_info *info)
7477 struct elf_aarch64_link_hash_table *htab;
7483 htab = elf_aarch64_hash_table ((info));
7484 dynobj = htab->root.dynobj;
7486 BFD_ASSERT (dynobj != NULL);
7488 if (htab->root.dynamic_sections_created)
7490 if (info->executable)
7492 s = bfd_get_linker_section (dynobj, ".interp");
7495 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
7496 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
7500 /* Set up .got offsets for local syms, and space for local dynamic
7502 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7504 struct elf_aarch64_local_symbol *locals = NULL;
7505 Elf_Internal_Shdr *symtab_hdr;
7509 if (!is_aarch64_elf (ibfd))
7512 for (s = ibfd->sections; s != NULL; s = s->next)
7514 struct elf_dyn_relocs *p;
7516 for (p = (struct elf_dyn_relocs *)
7517 (elf_section_data (s)->local_dynrel); p != NULL; p = p->next)
7519 if (!bfd_is_abs_section (p->sec)
7520 && bfd_is_abs_section (p->sec->output_section))
7522 /* Input section has been discarded, either because
7523 it is a copy of a linkonce section or due to
7524 linker script /DISCARD/, so we'll be discarding
7527 else if (p->count != 0)
7529 srel = elf_section_data (p->sec)->sreloc;
7530 srel->size += p->count * RELOC_SIZE (htab);
7531 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
7532 info->flags |= DF_TEXTREL;
7537 locals = elf_aarch64_locals (ibfd);
7541 symtab_hdr = &elf_symtab_hdr (ibfd);
7542 srel = htab->root.srelgot;
7543 for (i = 0; i < symtab_hdr->sh_info; i++)
7545 locals[i].got_offset = (bfd_vma) - 1;
7546 locals[i].tlsdesc_got_jump_table_offset = (bfd_vma) - 1;
7547 if (locals[i].got_refcount > 0)
7549 unsigned got_type = locals[i].got_type;
7550 if (got_type & GOT_TLSDESC_GD)
7552 locals[i].tlsdesc_got_jump_table_offset =
7553 (htab->root.sgotplt->size
7554 - aarch64_compute_jump_table_size (htab));
7555 htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
7556 locals[i].got_offset = (bfd_vma) - 2;
7559 if (got_type & GOT_TLS_GD)
7561 locals[i].got_offset = htab->root.sgot->size;
7562 htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
7565 if (got_type & GOT_TLS_IE
7566 || got_type & GOT_NORMAL)
7568 locals[i].got_offset = htab->root.sgot->size;
7569 htab->root.sgot->size += GOT_ENTRY_SIZE;
7572 if (got_type == GOT_UNKNOWN)
7578 if (got_type & GOT_TLSDESC_GD)
7580 htab->root.srelplt->size += RELOC_SIZE (htab);
7581 /* Note RELOC_COUNT not incremented here! */
7582 htab->tlsdesc_plt = (bfd_vma) - 1;
7585 if (got_type & GOT_TLS_GD)
7586 htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
7588 if (got_type & GOT_TLS_IE
7589 || got_type & GOT_NORMAL)
7590 htab->root.srelgot->size += RELOC_SIZE (htab);
7595 locals[i].got_refcount = (bfd_vma) - 1;
7601 /* Allocate global sym .plt and .got entries, and space for global
7602 sym dynamic relocs. */
7603 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_dynrelocs,
7606 /* Allocate global ifunc sym .plt and .got entries, and space for global
7607 ifunc sym dynamic relocs. */
7608 elf_link_hash_traverse (&htab->root, elfNN_aarch64_allocate_ifunc_dynrelocs,
7611 /* Allocate .plt and .got entries, and space for local symbols. */
7612 htab_traverse (htab->loc_hash_table,
7613 elfNN_aarch64_allocate_local_dynrelocs,
7616 /* Allocate .plt and .got entries, and space for local ifunc symbols. */
7617 htab_traverse (htab->loc_hash_table,
7618 elfNN_aarch64_allocate_local_ifunc_dynrelocs,
7621 /* For every jump slot reserved in the sgotplt, reloc_count is
7622 incremented. However, when we reserve space for TLS descriptors,
7623 it's not incremented, so in order to compute the space reserved
7624 for them, it suffices to multiply the reloc count by the jump
7627 if (htab->root.srelplt)
7628 htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
7630 if (htab->tlsdesc_plt)
7632 if (htab->root.splt->size == 0)
7633 htab->root.splt->size += PLT_ENTRY_SIZE;
7635 htab->tlsdesc_plt = htab->root.splt->size;
7636 htab->root.splt->size += PLT_TLSDESC_ENTRY_SIZE;
7638 /* If we're not using lazy TLS relocations, don't generate the
7639 GOT entry required. */
7640 if (!(info->flags & DF_BIND_NOW))
7642 htab->dt_tlsdesc_got = htab->root.sgot->size;
7643 htab->root.sgot->size += GOT_ENTRY_SIZE;
7647 /* Init mapping symbols information to use later to distingush between
7648 code and data while scanning for errata. */
7649 if (htab->fix_erratum_835769 || htab->fix_erratum_843419)
7650 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
7652 if (!is_aarch64_elf (ibfd))
7654 bfd_elfNN_aarch64_init_maps (ibfd);
7657 /* We now have determined the sizes of the various dynamic sections.
7658 Allocate memory for them. */
7660 for (s = dynobj->sections; s != NULL; s = s->next)
7662 if ((s->flags & SEC_LINKER_CREATED) == 0)
7665 if (s == htab->root.splt
7666 || s == htab->root.sgot
7667 || s == htab->root.sgotplt
7668 || s == htab->root.iplt
7669 || s == htab->root.igotplt || s == htab->sdynbss)
7671 /* Strip this section if we don't need it; see the
7674 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela"))
7676 if (s->size != 0 && s != htab->root.srelplt)
7679 /* We use the reloc_count field as a counter if we need
7680 to copy relocs into the output file. */
7681 if (s != htab->root.srelplt)
7686 /* It's not one of our sections, so don't allocate space. */
7692 /* If we don't need this section, strip it from the
7693 output file. This is mostly to handle .rela.bss and
7694 .rela.plt. We must create both sections in
7695 create_dynamic_sections, because they must be created
7696 before the linker maps input sections to output
7697 sections. The linker does that before
7698 adjust_dynamic_symbol is called, and it is that
7699 function which decides whether anything needs to go
7700 into these sections. */
7702 s->flags |= SEC_EXCLUDE;
7706 if ((s->flags & SEC_HAS_CONTENTS) == 0)
7709 /* Allocate memory for the section contents. We use bfd_zalloc
7710 here in case unused entries are not reclaimed before the
7711 section's contents are written out. This should not happen,
7712 but this way if it does, we get a R_AARCH64_NONE reloc instead
7714 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
7715 if (s->contents == NULL)
7719 if (htab->root.dynamic_sections_created)
7721 /* Add some entries to the .dynamic section. We fill in the
7722 values later, in elfNN_aarch64_finish_dynamic_sections, but we
7723 must add the entries now so that we get the correct size for
7724 the .dynamic section. The DT_DEBUG entry is filled in by the
7725 dynamic linker and used by the debugger. */
7726 #define add_dynamic_entry(TAG, VAL) \
7727 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
7729 if (info->executable)
7731 if (!add_dynamic_entry (DT_DEBUG, 0))
7735 if (htab->root.splt->size != 0)
7737 if (!add_dynamic_entry (DT_PLTGOT, 0)
7738 || !add_dynamic_entry (DT_PLTRELSZ, 0)
7739 || !add_dynamic_entry (DT_PLTREL, DT_RELA)
7740 || !add_dynamic_entry (DT_JMPREL, 0))
7743 if (htab->tlsdesc_plt
7744 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
7745 || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
7751 if (!add_dynamic_entry (DT_RELA, 0)
7752 || !add_dynamic_entry (DT_RELASZ, 0)
7753 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
7756 /* If any dynamic relocs apply to a read-only section,
7757 then we need a DT_TEXTREL entry. */
7758 if ((info->flags & DF_TEXTREL) != 0)
7760 if (!add_dynamic_entry (DT_TEXTREL, 0))
7765 #undef add_dynamic_entry
7771 elf_aarch64_update_plt_entry (bfd *output_bfd,
7772 bfd_reloc_code_real_type r_type,
7773 bfd_byte *plt_entry, bfd_vma value)
7775 reloc_howto_type *howto = elfNN_aarch64_howto_from_bfd_reloc (r_type);
7777 _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
7781 elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
7782 struct elf_aarch64_link_hash_table
7783 *htab, bfd *output_bfd,
7784 struct bfd_link_info *info)
7786 bfd_byte *plt_entry;
7789 bfd_vma gotplt_entry_address;
7790 bfd_vma plt_entry_address;
7791 Elf_Internal_Rela rela;
7793 asection *plt, *gotplt, *relplt;
7795 /* When building a static executable, use .iplt, .igot.plt and
7796 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7797 if (htab->root.splt != NULL)
7799 plt = htab->root.splt;
7800 gotplt = htab->root.sgotplt;
7801 relplt = htab->root.srelplt;
7805 plt = htab->root.iplt;
7806 gotplt = htab->root.igotplt;
7807 relplt = htab->root.irelplt;
7810 /* Get the index in the procedure linkage table which
7811 corresponds to this symbol. This is the index of this symbol
7812 in all the symbols for which we are making plt entries. The
7813 first entry in the procedure linkage table is reserved.
7815 Get the offset into the .got table of the entry that
7816 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
7817 bytes. The first three are reserved for the dynamic linker.
7819 For static executables, we don't reserve anything. */
7821 if (plt == htab->root.splt)
7823 plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
7824 got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
7828 plt_index = h->plt.offset / htab->plt_entry_size;
7829 got_offset = plt_index * GOT_ENTRY_SIZE;
7832 plt_entry = plt->contents + h->plt.offset;
7833 plt_entry_address = plt->output_section->vma
7834 + plt->output_offset + h->plt.offset;
7835 gotplt_entry_address = gotplt->output_section->vma +
7836 gotplt->output_offset + got_offset;
7838 /* Copy in the boiler-plate for the PLTn entry. */
7839 memcpy (plt_entry, elfNN_aarch64_small_plt_entry, PLT_SMALL_ENTRY_SIZE);
7841 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
7842 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
7843 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
7845 PG (gotplt_entry_address) -
7846 PG (plt_entry_address));
7848 /* Fill in the lo12 bits for the load from the pltgot. */
7849 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
7851 PG_OFFSET (gotplt_entry_address));
7853 /* Fill in the lo12 bits for the add from the pltgot entry. */
7854 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
7856 PG_OFFSET (gotplt_entry_address));
7858 /* All the GOTPLT Entries are essentially initialized to PLT0. */
7859 bfd_put_NN (output_bfd,
7860 plt->output_section->vma + plt->output_offset,
7861 gotplt->contents + got_offset);
7863 rela.r_offset = gotplt_entry_address;
7865 if (h->dynindx == -1
7866 || ((info->executable
7867 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
7869 && h->type == STT_GNU_IFUNC))
7871 /* If an STT_GNU_IFUNC symbol is locally defined, generate
7872 R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
7873 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
7874 rela.r_addend = (h->root.u.def.value
7875 + h->root.u.def.section->output_section->vma
7876 + h->root.u.def.section->output_offset);
7880 /* Fill in the entry in the .rela.plt section. */
7881 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
7885 /* Compute the relocation entry to used based on PLT index and do
7886 not adjust reloc_count. The reloc_count has already been adjusted
7887 to account for this entry. */
7888 loc = relplt->contents + plt_index * RELOC_SIZE (htab);
7889 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
7892 /* Size sections even though they're not dynamic. We use it to setup
7893 _TLS_MODULE_BASE_, if needed. */
7896 elfNN_aarch64_always_size_sections (bfd *output_bfd,
7897 struct bfd_link_info *info)
7901 if (info->relocatable)
7904 tls_sec = elf_hash_table (info)->tls_sec;
7908 struct elf_link_hash_entry *tlsbase;
7910 tlsbase = elf_link_hash_lookup (elf_hash_table (info),
7911 "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
7915 struct bfd_link_hash_entry *h = NULL;
7916 const struct elf_backend_data *bed =
7917 get_elf_backend_data (output_bfd);
7919 if (!(_bfd_generic_link_add_one_symbol
7920 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
7921 tls_sec, 0, NULL, FALSE, bed->collect, &h)))
7924 tlsbase->type = STT_TLS;
7925 tlsbase = (struct elf_link_hash_entry *) h;
7926 tlsbase->def_regular = 1;
7927 tlsbase->other = STV_HIDDEN;
7928 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
7935 /* Finish up dynamic symbol handling. We set the contents of various
7936 dynamic sections here. */
7938 elfNN_aarch64_finish_dynamic_symbol (bfd *output_bfd,
7939 struct bfd_link_info *info,
7940 struct elf_link_hash_entry *h,
7941 Elf_Internal_Sym *sym)
7943 struct elf_aarch64_link_hash_table *htab;
7944 htab = elf_aarch64_hash_table (info);
7946 if (h->plt.offset != (bfd_vma) - 1)
7948 asection *plt, *gotplt, *relplt;
7950 /* This symbol has an entry in the procedure linkage table. Set
7953 /* When building a static executable, use .iplt, .igot.plt and
7954 .rela.iplt sections for STT_GNU_IFUNC symbols. */
7955 if (htab->root.splt != NULL)
7957 plt = htab->root.splt;
7958 gotplt = htab->root.sgotplt;
7959 relplt = htab->root.srelplt;
7963 plt = htab->root.iplt;
7964 gotplt = htab->root.igotplt;
7965 relplt = htab->root.irelplt;
7968 /* This symbol has an entry in the procedure linkage table. Set
7970 if ((h->dynindx == -1
7971 && !((h->forced_local || info->executable)
7973 && h->type == STT_GNU_IFUNC))
7979 elfNN_aarch64_create_small_pltn_entry (h, htab, output_bfd, info);
7980 if (!h->def_regular)
7982 /* Mark the symbol as undefined, rather than as defined in
7983 the .plt section. */
7984 sym->st_shndx = SHN_UNDEF;
7985 /* If the symbol is weak we need to clear the value.
7986 Otherwise, the PLT entry would provide a definition for
7987 the symbol even if the symbol wasn't defined anywhere,
7988 and so the symbol would never be NULL. Leave the value if
7989 there were any relocations where pointer equality matters
7990 (this is a clue for the dynamic linker, to make function
7991 pointer comparisons work between an application and shared
7993 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
7998 if (h->got.offset != (bfd_vma) - 1
7999 && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL)
8001 Elf_Internal_Rela rela;
8004 /* This symbol has an entry in the global offset table. Set it
8006 if (htab->root.sgot == NULL || htab->root.srelgot == NULL)
8009 rela.r_offset = (htab->root.sgot->output_section->vma
8010 + htab->root.sgot->output_offset
8011 + (h->got.offset & ~(bfd_vma) 1));
8014 && h->type == STT_GNU_IFUNC)
8018 /* Generate R_AARCH64_GLOB_DAT. */
8025 if (!h->pointer_equality_needed)
8028 /* For non-shared object, we can't use .got.plt, which
8029 contains the real function address if we need pointer
8030 equality. We load the GOT entry with the PLT entry. */
8031 plt = htab->root.splt ? htab->root.splt : htab->root.iplt;
8032 bfd_put_NN (output_bfd, (plt->output_section->vma
8033 + plt->output_offset
8035 htab->root.sgot->contents
8036 + (h->got.offset & ~(bfd_vma) 1));
8040 else if (info->shared && SYMBOL_REFERENCES_LOCAL (info, h))
8042 if (!h->def_regular)
8045 BFD_ASSERT ((h->got.offset & 1) != 0);
8046 rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
8047 rela.r_addend = (h->root.u.def.value
8048 + h->root.u.def.section->output_section->vma
8049 + h->root.u.def.section->output_offset);
8054 BFD_ASSERT ((h->got.offset & 1) == 0);
8055 bfd_put_NN (output_bfd, (bfd_vma) 0,
8056 htab->root.sgot->contents + h->got.offset);
8057 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
8061 loc = htab->root.srelgot->contents;
8062 loc += htab->root.srelgot->reloc_count++ * RELOC_SIZE (htab);
8063 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8068 Elf_Internal_Rela rela;
8071 /* This symbol needs a copy reloc. Set it up. */
8073 if (h->dynindx == -1
8074 || (h->root.type != bfd_link_hash_defined
8075 && h->root.type != bfd_link_hash_defweak)
8076 || htab->srelbss == NULL)
8079 rela.r_offset = (h->root.u.def.value
8080 + h->root.u.def.section->output_section->vma
8081 + h->root.u.def.section->output_offset);
8082 rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (COPY));
8084 loc = htab->srelbss->contents;
8085 loc += htab->srelbss->reloc_count++ * RELOC_SIZE (htab);
8086 bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
8089 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. SYM may
8090 be NULL for local symbols. */
8092 && (h == elf_hash_table (info)->hdynamic
8093 || h == elf_hash_table (info)->hgot))
8094 sym->st_shndx = SHN_ABS;
8099 /* Finish up local dynamic symbol handling. We set the contents of
8100 various dynamic sections here. */
8103 elfNN_aarch64_finish_local_dynamic_symbol (void **slot, void *inf)
8105 struct elf_link_hash_entry *h
8106 = (struct elf_link_hash_entry *) *slot;
8107 struct bfd_link_info *info
8108 = (struct bfd_link_info *) inf;
8110 return elfNN_aarch64_finish_dynamic_symbol (info->output_bfd,
8115 elfNN_aarch64_init_small_plt0_entry (bfd *output_bfd ATTRIBUTE_UNUSED,
8116 struct elf_aarch64_link_hash_table
8119 /* Fill in PLT0. Fixme:RR Note this doesn't distinguish between
8120 small and large plts and at the minute just generates
8123 /* PLT0 of the small PLT looks like this in ELF64 -
8124 stp x16, x30, [sp, #-16]! // Save the reloc and lr on stack.
8125 adrp x16, PLT_GOT + 16 // Get the page base of the GOTPLT
8126 ldr x17, [x16, #:lo12:PLT_GOT+16] // Load the address of the
8128 add x16, x16, #:lo12:PLT_GOT+16 // Load the lo12 bits of the
8129 // GOTPLT entry for this.
8131 PLT0 will be slightly different in ELF32 due to different got entry
8134 bfd_vma plt_got_2nd_ent; /* Address of GOT[2]. */
8138 memcpy (htab->root.splt->contents, elfNN_aarch64_small_plt0_entry,
8140 elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
8143 plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
8144 + htab->root.sgotplt->output_offset
8145 + GOT_ENTRY_SIZE * 2);
8147 plt_base = htab->root.splt->output_section->vma +
8148 htab->root.splt->output_offset;
8150 /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
8151 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
8152 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8153 htab->root.splt->contents + 4,
8154 PG (plt_got_2nd_ent) - PG (plt_base + 4));
8156 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
8157 htab->root.splt->contents + 8,
8158 PG_OFFSET (plt_got_2nd_ent));
8160 elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
8161 htab->root.splt->contents + 12,
8162 PG_OFFSET (plt_got_2nd_ent));
8166 elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
8167 struct bfd_link_info *info)
8169 struct elf_aarch64_link_hash_table *htab;
8173 htab = elf_aarch64_hash_table (info);
8174 dynobj = htab->root.dynobj;
8175 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
8177 if (htab->root.dynamic_sections_created)
8179 ElfNN_External_Dyn *dyncon, *dynconend;
8181 if (sdyn == NULL || htab->root.sgot == NULL)
8184 dyncon = (ElfNN_External_Dyn *) sdyn->contents;
8185 dynconend = (ElfNN_External_Dyn *) (sdyn->contents + sdyn->size);
8186 for (; dyncon < dynconend; dyncon++)
8188 Elf_Internal_Dyn dyn;
8191 bfd_elfNN_swap_dyn_in (dynobj, dyncon, &dyn);
8199 s = htab->root.sgotplt;
8200 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
8204 dyn.d_un.d_ptr = htab->root.srelplt->output_section->vma;
8208 s = htab->root.srelplt;
8209 dyn.d_un.d_val = s->size;
8213 /* The procedure linkage table relocs (DT_JMPREL) should
8214 not be included in the overall relocs (DT_RELA).
8215 Therefore, we override the DT_RELASZ entry here to
8216 make it not include the JMPREL relocs. Since the
8217 linker script arranges for .rela.plt to follow all
8218 other relocation sections, we don't have to worry
8219 about changing the DT_RELA entry. */
8220 if (htab->root.srelplt != NULL)
8222 s = htab->root.srelplt;
8223 dyn.d_un.d_val -= s->size;
8227 case DT_TLSDESC_PLT:
8228 s = htab->root.splt;
8229 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8230 + htab->tlsdesc_plt;
8233 case DT_TLSDESC_GOT:
8234 s = htab->root.sgot;
8235 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
8236 + htab->dt_tlsdesc_got;
8240 bfd_elfNN_swap_dyn_out (output_bfd, &dyn, dyncon);
8245 /* Fill in the special first entry in the procedure linkage table. */
8246 if (htab->root.splt && htab->root.splt->size > 0)
8248 elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
8250 elf_section_data (htab->root.splt->output_section)->
8251 this_hdr.sh_entsize = htab->plt_entry_size;
8254 if (htab->tlsdesc_plt)
8256 bfd_put_NN (output_bfd, (bfd_vma) 0,
8257 htab->root.sgot->contents + htab->dt_tlsdesc_got);
8259 memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
8260 elfNN_aarch64_tlsdesc_small_plt_entry,
8261 sizeof (elfNN_aarch64_tlsdesc_small_plt_entry));
8264 bfd_vma adrp1_addr =
8265 htab->root.splt->output_section->vma
8266 + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
8268 bfd_vma adrp2_addr = adrp1_addr + 4;
8271 htab->root.sgot->output_section->vma
8272 + htab->root.sgot->output_offset;
8274 bfd_vma pltgot_addr =
8275 htab->root.sgotplt->output_section->vma
8276 + htab->root.sgotplt->output_offset;
8278 bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
8280 bfd_byte *plt_entry =
8281 htab->root.splt->contents + htab->tlsdesc_plt;
8283 /* adrp x2, DT_TLSDESC_GOT */
8284 elf_aarch64_update_plt_entry (output_bfd,
8285 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8287 (PG (dt_tlsdesc_got)
8288 - PG (adrp1_addr)));
8291 elf_aarch64_update_plt_entry (output_bfd,
8292 BFD_RELOC_AARCH64_ADR_HI21_PCREL,
8295 - PG (adrp2_addr)));
8297 /* ldr x2, [x2, #0] */
8298 elf_aarch64_update_plt_entry (output_bfd,
8299 BFD_RELOC_AARCH64_LDSTNN_LO12,
8301 PG_OFFSET (dt_tlsdesc_got));
8304 elf_aarch64_update_plt_entry (output_bfd,
8305 BFD_RELOC_AARCH64_ADD_LO12,
8307 PG_OFFSET (pltgot_addr));
8312 if (htab->root.sgotplt)
8314 if (bfd_is_abs_section (htab->root.sgotplt->output_section))
8316 (*_bfd_error_handler)
8317 (_("discarded output section: `%A'"), htab->root.sgotplt);
8321 /* Fill in the first three entries in the global offset table. */
8322 if (htab->root.sgotplt->size > 0)
8324 bfd_put_NN (output_bfd, (bfd_vma) 0, htab->root.sgotplt->contents);
8326 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */
8327 bfd_put_NN (output_bfd,
8329 htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
8330 bfd_put_NN (output_bfd,
8332 htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
8335 if (htab->root.sgot)
8337 if (htab->root.sgot->size > 0)
8340 sdyn ? sdyn->output_section->vma + sdyn->output_offset : 0;
8341 bfd_put_NN (output_bfd, addr, htab->root.sgot->contents);
8345 elf_section_data (htab->root.sgotplt->output_section)->
8346 this_hdr.sh_entsize = GOT_ENTRY_SIZE;
8349 if (htab->root.sgot && htab->root.sgot->size > 0)
8350 elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
8353 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
8354 htab_traverse (htab->loc_hash_table,
8355 elfNN_aarch64_finish_local_dynamic_symbol,
8361 /* Return address for Ith PLT stub in section PLT, for relocation REL
8362 or (bfd_vma) -1 if it should not be included. */
8365 elfNN_aarch64_plt_sym_val (bfd_vma i, const asection *plt,
8366 const arelent *rel ATTRIBUTE_UNUSED)
8368 return plt->vma + PLT_ENTRY_SIZE + i * PLT_SMALL_ENTRY_SIZE;
8372 /* We use this so we can override certain functions
8373 (though currently we don't). */
8375 const struct elf_size_info elfNN_aarch64_size_info =
8377 sizeof (ElfNN_External_Ehdr),
8378 sizeof (ElfNN_External_Phdr),
8379 sizeof (ElfNN_External_Shdr),
8380 sizeof (ElfNN_External_Rel),
8381 sizeof (ElfNN_External_Rela),
8382 sizeof (ElfNN_External_Sym),
8383 sizeof (ElfNN_External_Dyn),
8384 sizeof (Elf_External_Note),
8385 4, /* Hash table entry size. */
8386 1, /* Internal relocs per external relocs. */
8387 ARCH_SIZE, /* Arch size. */
8388 LOG_FILE_ALIGN, /* Log_file_align. */
8389 ELFCLASSNN, EV_CURRENT,
8390 bfd_elfNN_write_out_phdrs,
8391 bfd_elfNN_write_shdrs_and_ehdr,
8392 bfd_elfNN_checksum_contents,
8393 bfd_elfNN_write_relocs,
8394 bfd_elfNN_swap_symbol_in,
8395 bfd_elfNN_swap_symbol_out,
8396 bfd_elfNN_slurp_reloc_table,
8397 bfd_elfNN_slurp_symbol_table,
8398 bfd_elfNN_swap_dyn_in,
8399 bfd_elfNN_swap_dyn_out,
8400 bfd_elfNN_swap_reloc_in,
8401 bfd_elfNN_swap_reloc_out,
8402 bfd_elfNN_swap_reloca_in,
8403 bfd_elfNN_swap_reloca_out
8406 #define ELF_ARCH bfd_arch_aarch64
8407 #define ELF_MACHINE_CODE EM_AARCH64
8408 #define ELF_MAXPAGESIZE 0x10000
8409 #define ELF_MINPAGESIZE 0x1000
8410 #define ELF_COMMONPAGESIZE 0x1000
8412 #define bfd_elfNN_close_and_cleanup \
8413 elfNN_aarch64_close_and_cleanup
8415 #define bfd_elfNN_bfd_free_cached_info \
8416 elfNN_aarch64_bfd_free_cached_info
8418 #define bfd_elfNN_bfd_is_target_special_symbol \
8419 elfNN_aarch64_is_target_special_symbol
8421 #define bfd_elfNN_bfd_link_hash_table_create \
8422 elfNN_aarch64_link_hash_table_create
8424 #define bfd_elfNN_bfd_merge_private_bfd_data \
8425 elfNN_aarch64_merge_private_bfd_data
8427 #define bfd_elfNN_bfd_print_private_bfd_data \
8428 elfNN_aarch64_print_private_bfd_data
8430 #define bfd_elfNN_bfd_reloc_type_lookup \
8431 elfNN_aarch64_reloc_type_lookup
8433 #define bfd_elfNN_bfd_reloc_name_lookup \
8434 elfNN_aarch64_reloc_name_lookup
8436 #define bfd_elfNN_bfd_set_private_flags \
8437 elfNN_aarch64_set_private_flags
8439 #define bfd_elfNN_find_inliner_info \
8440 elfNN_aarch64_find_inliner_info
8442 #define bfd_elfNN_find_nearest_line \
8443 elfNN_aarch64_find_nearest_line
8445 #define bfd_elfNN_mkobject \
8446 elfNN_aarch64_mkobject
8448 #define bfd_elfNN_new_section_hook \
8449 elfNN_aarch64_new_section_hook
8451 #define elf_backend_adjust_dynamic_symbol \
8452 elfNN_aarch64_adjust_dynamic_symbol
8454 #define elf_backend_always_size_sections \
8455 elfNN_aarch64_always_size_sections
8457 #define elf_backend_check_relocs \
8458 elfNN_aarch64_check_relocs
8460 #define elf_backend_copy_indirect_symbol \
8461 elfNN_aarch64_copy_indirect_symbol
8463 /* Create .dynbss, and .rela.bss sections in DYNOBJ, and set up shortcuts
8464 to them in our hash. */
8465 #define elf_backend_create_dynamic_sections \
8466 elfNN_aarch64_create_dynamic_sections
8468 #define elf_backend_init_index_section \
8469 _bfd_elf_init_2_index_sections
8471 #define elf_backend_finish_dynamic_sections \
8472 elfNN_aarch64_finish_dynamic_sections
8474 #define elf_backend_finish_dynamic_symbol \
8475 elfNN_aarch64_finish_dynamic_symbol
8477 #define elf_backend_gc_sweep_hook \
8478 elfNN_aarch64_gc_sweep_hook
8480 #define elf_backend_object_p \
8481 elfNN_aarch64_object_p
8483 #define elf_backend_output_arch_local_syms \
8484 elfNN_aarch64_output_arch_local_syms
8486 #define elf_backend_plt_sym_val \
8487 elfNN_aarch64_plt_sym_val
8489 #define elf_backend_post_process_headers \
8490 elfNN_aarch64_post_process_headers
8492 #define elf_backend_relocate_section \
8493 elfNN_aarch64_relocate_section
8495 #define elf_backend_reloc_type_class \
8496 elfNN_aarch64_reloc_type_class
8498 #define elf_backend_section_from_shdr \
8499 elfNN_aarch64_section_from_shdr
8501 #define elf_backend_size_dynamic_sections \
8502 elfNN_aarch64_size_dynamic_sections
8504 #define elf_backend_size_info \
8505 elfNN_aarch64_size_info
8507 #define elf_backend_write_section \
8508 elfNN_aarch64_write_section
8510 #define elf_backend_can_refcount 1
8511 #define elf_backend_can_gc_sections 1
8512 #define elf_backend_plt_readonly 1
8513 #define elf_backend_want_got_plt 1
8514 #define elf_backend_want_plt_sym 0
8515 #define elf_backend_may_use_rel_p 0
8516 #define elf_backend_may_use_rela_p 1
8517 #define elf_backend_default_use_rela_p 1
8518 #define elf_backend_rela_normal 1
8519 #define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
8520 #define elf_backend_default_execstack 0
8522 #undef elf_backend_obj_attrs_section
8523 #define elf_backend_obj_attrs_section ".ARM.attributes"
8525 #include "elfNN-target.h"