1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2018 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
30 #include "elf-vxworks.h"
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
59 #define elf_info_to_howto NULL
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 struct bfd_link_info *link_info,
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
77 static reloc_howto_type elf32_arm_howto_table_1[] =
80 HOWTO (R_ARM_NONE, /* type */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
84 FALSE, /* pc_relative */
86 complain_overflow_dont,/* complain_on_overflow */
87 bfd_elf_generic_reloc, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE, /* partial_inplace */
92 FALSE), /* pcrel_offset */
94 HOWTO (R_ARM_PC24, /* type */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
98 TRUE, /* pc_relative */
100 complain_overflow_signed,/* complain_on_overflow */
101 bfd_elf_generic_reloc, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE), /* pcrel_offset */
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32, /* type */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
113 FALSE, /* pc_relative */
115 complain_overflow_bitfield,/* complain_on_overflow */
116 bfd_elf_generic_reloc, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE), /* pcrel_offset */
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32, /* type */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
128 TRUE, /* pc_relative */
130 complain_overflow_bitfield,/* complain_on_overflow */
131 bfd_elf_generic_reloc, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE), /* pcrel_offset */
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0, /* type */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
143 TRUE, /* pc_relative */
145 complain_overflow_dont,/* complain_on_overflow */
146 bfd_elf_generic_reloc, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE), /* pcrel_offset */
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16, /* type */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
158 FALSE, /* pc_relative */
160 complain_overflow_bitfield,/* complain_on_overflow */
161 bfd_elf_generic_reloc, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE), /* pcrel_offset */
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12, /* type */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
173 FALSE, /* pc_relative */
175 complain_overflow_bitfield,/* complain_on_overflow */
176 bfd_elf_generic_reloc, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE), /* pcrel_offset */
183 HOWTO (R_ARM_THM_ABS5, /* type */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
187 FALSE, /* pc_relative */
189 complain_overflow_bitfield,/* complain_on_overflow */
190 bfd_elf_generic_reloc, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE), /* pcrel_offset */
198 HOWTO (R_ARM_ABS8, /* type */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
202 FALSE, /* pc_relative */
204 complain_overflow_bitfield,/* complain_on_overflow */
205 bfd_elf_generic_reloc, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE), /* pcrel_offset */
212 HOWTO (R_ARM_SBREL32, /* type */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
216 FALSE, /* pc_relative */
218 complain_overflow_dont,/* complain_on_overflow */
219 bfd_elf_generic_reloc, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE), /* pcrel_offset */
226 HOWTO (R_ARM_THM_CALL, /* type */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
230 TRUE, /* pc_relative */
232 complain_overflow_signed,/* complain_on_overflow */
233 bfd_elf_generic_reloc, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE), /* pcrel_offset */
240 HOWTO (R_ARM_THM_PC8, /* type */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
244 TRUE, /* pc_relative */
246 complain_overflow_signed,/* complain_on_overflow */
247 bfd_elf_generic_reloc, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE), /* pcrel_offset */
254 HOWTO (R_ARM_BREL_ADJ, /* type */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
258 FALSE, /* pc_relative */
260 complain_overflow_signed,/* complain_on_overflow */
261 bfd_elf_generic_reloc, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
268 HOWTO (R_ARM_TLS_DESC, /* type */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
272 FALSE, /* pc_relative */
274 complain_overflow_bitfield,/* complain_on_overflow */
275 bfd_elf_generic_reloc, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
282 HOWTO (R_ARM_THM_SWI8, /* type */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
286 FALSE, /* pc_relative */
288 complain_overflow_signed,/* complain_on_overflow */
289 bfd_elf_generic_reloc, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE), /* pcrel_offset */
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25, /* type */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
301 TRUE, /* pc_relative */
303 complain_overflow_signed,/* complain_on_overflow */
304 bfd_elf_generic_reloc, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE), /* pcrel_offset */
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22, /* type */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
316 TRUE, /* pc_relative */
318 complain_overflow_signed,/* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE), /* pcrel_offset */
326 /* Dynamic TLS relocations. */
328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
332 FALSE, /* pc_relative */
334 complain_overflow_bitfield,/* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
346 FALSE, /* pc_relative */
348 complain_overflow_bitfield,/* complain_on_overflow */
349 bfd_elf_generic_reloc, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
356 HOWTO (R_ARM_TLS_TPOFF32, /* type */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
360 FALSE, /* pc_relative */
362 complain_overflow_bitfield,/* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE), /* pcrel_offset */
370 /* Relocs used in ARM Linux */
372 HOWTO (R_ARM_COPY, /* type */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
376 FALSE, /* pc_relative */
378 complain_overflow_bitfield,/* complain_on_overflow */
379 bfd_elf_generic_reloc, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE), /* pcrel_offset */
386 HOWTO (R_ARM_GLOB_DAT, /* type */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
390 FALSE, /* pc_relative */
392 complain_overflow_bitfield,/* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
400 HOWTO (R_ARM_JUMP_SLOT, /* type */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
404 FALSE, /* pc_relative */
406 complain_overflow_bitfield,/* complain_on_overflow */
407 bfd_elf_generic_reloc, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE), /* pcrel_offset */
414 HOWTO (R_ARM_RELATIVE, /* type */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
418 FALSE, /* pc_relative */
420 complain_overflow_bitfield,/* complain_on_overflow */
421 bfd_elf_generic_reloc, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE), /* pcrel_offset */
428 HOWTO (R_ARM_GOTOFF32, /* type */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
432 FALSE, /* pc_relative */
434 complain_overflow_bitfield,/* complain_on_overflow */
435 bfd_elf_generic_reloc, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE), /* pcrel_offset */
442 HOWTO (R_ARM_GOTPC, /* type */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
446 TRUE, /* pc_relative */
448 complain_overflow_bitfield,/* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE), /* pcrel_offset */
456 HOWTO (R_ARM_GOT32, /* type */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
460 FALSE, /* pc_relative */
462 complain_overflow_bitfield,/* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
470 HOWTO (R_ARM_PLT32, /* type */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
474 TRUE, /* pc_relative */
476 complain_overflow_bitfield,/* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE), /* pcrel_offset */
484 HOWTO (R_ARM_CALL, /* type */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
488 TRUE, /* pc_relative */
490 complain_overflow_signed,/* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE), /* pcrel_offset */
498 HOWTO (R_ARM_JUMP24, /* type */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
502 TRUE, /* pc_relative */
504 complain_overflow_signed,/* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE), /* pcrel_offset */
512 HOWTO (R_ARM_THM_JUMP24, /* type */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
516 TRUE, /* pc_relative */
518 complain_overflow_signed,/* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE), /* pcrel_offset */
526 HOWTO (R_ARM_BASE_ABS, /* type */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
530 FALSE, /* pc_relative */
532 complain_overflow_dont,/* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
544 TRUE, /* pc_relative */
546 complain_overflow_dont,/* complain_on_overflow */
547 bfd_elf_generic_reloc, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE), /* pcrel_offset */
554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
558 TRUE, /* pc_relative */
560 complain_overflow_dont,/* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE), /* pcrel_offset */
568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
572 TRUE, /* pc_relative */
574 complain_overflow_dont,/* complain_on_overflow */
575 bfd_elf_generic_reloc, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE), /* pcrel_offset */
582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
586 FALSE, /* pc_relative */
588 complain_overflow_dont,/* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE), /* pcrel_offset */
596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
600 FALSE, /* pc_relative */
602 complain_overflow_dont,/* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE), /* pcrel_offset */
610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
614 FALSE, /* pc_relative */
616 complain_overflow_dont,/* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE), /* pcrel_offset */
624 HOWTO (R_ARM_TARGET1, /* type */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
628 FALSE, /* pc_relative */
630 complain_overflow_dont,/* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
638 HOWTO (R_ARM_ROSEGREL32, /* type */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
642 FALSE, /* pc_relative */
644 complain_overflow_dont,/* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE), /* pcrel_offset */
652 HOWTO (R_ARM_V4BX, /* type */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
656 FALSE, /* pc_relative */
658 complain_overflow_dont,/* complain_on_overflow */
659 bfd_elf_generic_reloc, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE), /* pcrel_offset */
666 HOWTO (R_ARM_TARGET2, /* type */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
670 FALSE, /* pc_relative */
672 complain_overflow_signed,/* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
680 HOWTO (R_ARM_PREL31, /* type */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
684 TRUE, /* pc_relative */
686 complain_overflow_signed,/* complain_on_overflow */
687 bfd_elf_generic_reloc, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE), /* pcrel_offset */
694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
698 FALSE, /* pc_relative */
700 complain_overflow_dont,/* complain_on_overflow */
701 bfd_elf_generic_reloc, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE), /* pcrel_offset */
708 HOWTO (R_ARM_MOVT_ABS, /* type */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
712 FALSE, /* pc_relative */
714 complain_overflow_bitfield,/* complain_on_overflow */
715 bfd_elf_generic_reloc, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE), /* pcrel_offset */
722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
726 TRUE, /* pc_relative */
728 complain_overflow_dont,/* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE), /* pcrel_offset */
736 HOWTO (R_ARM_MOVT_PREL, /* type */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
740 TRUE, /* pc_relative */
742 complain_overflow_bitfield,/* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE), /* pcrel_offset */
750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
754 FALSE, /* pc_relative */
756 complain_overflow_dont,/* complain_on_overflow */
757 bfd_elf_generic_reloc, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE), /* pcrel_offset */
764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
768 FALSE, /* pc_relative */
770 complain_overflow_bitfield,/* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE), /* pcrel_offset */
778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
782 TRUE, /* pc_relative */
784 complain_overflow_dont,/* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE), /* pcrel_offset */
792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
796 TRUE, /* pc_relative */
798 complain_overflow_bitfield,/* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE), /* pcrel_offset */
806 HOWTO (R_ARM_THM_JUMP19, /* type */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
810 TRUE, /* pc_relative */
812 complain_overflow_signed,/* complain_on_overflow */
813 bfd_elf_generic_reloc, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE), /* pcrel_offset */
820 HOWTO (R_ARM_THM_JUMP6, /* type */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
824 TRUE, /* pc_relative */
826 complain_overflow_unsigned,/* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE), /* pcrel_offset */
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
841 TRUE, /* pc_relative */
843 complain_overflow_dont,/* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE), /* pcrel_offset */
851 HOWTO (R_ARM_THM_PC12, /* type */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
855 TRUE, /* pc_relative */
857 complain_overflow_dont,/* complain_on_overflow */
858 bfd_elf_generic_reloc, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE), /* pcrel_offset */
865 HOWTO (R_ARM_ABS32_NOI, /* type */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
869 FALSE, /* pc_relative */
871 complain_overflow_dont,/* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE), /* pcrel_offset */
879 HOWTO (R_ARM_REL32_NOI, /* type */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
883 TRUE, /* pc_relative */
885 complain_overflow_dont,/* complain_on_overflow */
886 bfd_elf_generic_reloc, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE), /* pcrel_offset */
893 /* Group relocations. */
895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
899 TRUE, /* pc_relative */
901 complain_overflow_dont,/* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE), /* pcrel_offset */
909 HOWTO (R_ARM_ALU_PC_G0, /* type */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
913 TRUE, /* pc_relative */
915 complain_overflow_dont,/* complain_on_overflow */
916 bfd_elf_generic_reloc, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE), /* pcrel_offset */
923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
927 TRUE, /* pc_relative */
929 complain_overflow_dont,/* complain_on_overflow */
930 bfd_elf_generic_reloc, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE), /* pcrel_offset */
937 HOWTO (R_ARM_ALU_PC_G1, /* type */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
941 TRUE, /* pc_relative */
943 complain_overflow_dont,/* complain_on_overflow */
944 bfd_elf_generic_reloc, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE), /* pcrel_offset */
951 HOWTO (R_ARM_ALU_PC_G2, /* type */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
955 TRUE, /* pc_relative */
957 complain_overflow_dont,/* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
965 HOWTO (R_ARM_LDR_PC_G1, /* type */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
969 TRUE, /* pc_relative */
971 complain_overflow_dont,/* complain_on_overflow */
972 bfd_elf_generic_reloc, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE), /* pcrel_offset */
979 HOWTO (R_ARM_LDR_PC_G2, /* type */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
983 TRUE, /* pc_relative */
985 complain_overflow_dont,/* complain_on_overflow */
986 bfd_elf_generic_reloc, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE), /* pcrel_offset */
993 HOWTO (R_ARM_LDRS_PC_G0, /* type */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
997 TRUE, /* pc_relative */
999 complain_overflow_dont,/* complain_on_overflow */
1000 bfd_elf_generic_reloc, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE), /* pcrel_offset */
1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 TRUE, /* pc_relative */
1013 complain_overflow_dont,/* complain_on_overflow */
1014 bfd_elf_generic_reloc, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE), /* pcrel_offset */
1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 TRUE, /* pc_relative */
1027 complain_overflow_dont,/* complain_on_overflow */
1028 bfd_elf_generic_reloc, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE), /* pcrel_offset */
1035 HOWTO (R_ARM_LDC_PC_G0, /* type */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 TRUE, /* pc_relative */
1041 complain_overflow_dont,/* complain_on_overflow */
1042 bfd_elf_generic_reloc, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE), /* pcrel_offset */
1049 HOWTO (R_ARM_LDC_PC_G1, /* type */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 TRUE, /* pc_relative */
1055 complain_overflow_dont,/* complain_on_overflow */
1056 bfd_elf_generic_reloc, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE), /* pcrel_offset */
1063 HOWTO (R_ARM_LDC_PC_G2, /* type */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 TRUE, /* pc_relative */
1069 complain_overflow_dont,/* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 TRUE, /* pc_relative */
1083 complain_overflow_dont,/* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1091 HOWTO (R_ARM_ALU_SB_G0, /* type */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 TRUE, /* pc_relative */
1097 complain_overflow_dont,/* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 TRUE, /* pc_relative */
1111 complain_overflow_dont,/* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE), /* pcrel_offset */
1119 HOWTO (R_ARM_ALU_SB_G1, /* type */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 TRUE, /* pc_relative */
1125 complain_overflow_dont,/* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE), /* pcrel_offset */
1133 HOWTO (R_ARM_ALU_SB_G2, /* type */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 TRUE, /* pc_relative */
1139 complain_overflow_dont,/* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE), /* pcrel_offset */
1147 HOWTO (R_ARM_LDR_SB_G0, /* type */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 TRUE, /* pc_relative */
1153 complain_overflow_dont,/* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE), /* pcrel_offset */
1161 HOWTO (R_ARM_LDR_SB_G1, /* type */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 TRUE, /* pc_relative */
1167 complain_overflow_dont,/* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE), /* pcrel_offset */
1175 HOWTO (R_ARM_LDR_SB_G2, /* type */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 TRUE, /* pc_relative */
1181 complain_overflow_dont,/* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE), /* pcrel_offset */
1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 TRUE, /* pc_relative */
1195 complain_overflow_dont,/* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE), /* pcrel_offset */
1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 TRUE, /* pc_relative */
1209 complain_overflow_dont,/* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 TRUE, /* pc_relative */
1223 complain_overflow_dont,/* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1231 HOWTO (R_ARM_LDC_SB_G0, /* type */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1235 TRUE, /* pc_relative */
1237 complain_overflow_dont,/* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1245 HOWTO (R_ARM_LDC_SB_G1, /* type */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 TRUE, /* pc_relative */
1251 complain_overflow_dont,/* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE), /* pcrel_offset */
1259 HOWTO (R_ARM_LDC_SB_G2, /* type */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 TRUE, /* pc_relative */
1265 complain_overflow_dont,/* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE), /* pcrel_offset */
1273 /* End of group relocations. */
1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1279 FALSE, /* pc_relative */
1281 complain_overflow_dont,/* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1289 HOWTO (R_ARM_MOVT_BREL, /* type */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1293 FALSE, /* pc_relative */
1295 complain_overflow_bitfield,/* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1303 HOWTO (R_ARM_MOVW_BREL, /* type */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1307 FALSE, /* pc_relative */
1309 complain_overflow_dont,/* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1321 FALSE, /* pc_relative */
1323 complain_overflow_dont,/* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1335 FALSE, /* pc_relative */
1337 complain_overflow_bitfield,/* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1349 FALSE, /* pc_relative */
1351 complain_overflow_dont,/* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 FALSE, /* pc_relative */
1365 complain_overflow_bitfield,/* complain_on_overflow */
1366 NULL, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1373 HOWTO (R_ARM_TLS_CALL, /* type */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 FALSE, /* pc_relative */
1379 complain_overflow_dont,/* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 FALSE, /* pc_relative */
1393 complain_overflow_bitfield,/* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 FALSE, /* pc_relative */
1407 complain_overflow_dont,/* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1415 HOWTO (R_ARM_PLT32_ABS, /* type */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 FALSE, /* pc_relative */
1421 complain_overflow_dont,/* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1429 HOWTO (R_ARM_GOT_ABS, /* type */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1433 FALSE, /* pc_relative */
1435 complain_overflow_dont,/* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1443 HOWTO (R_ARM_GOT_PREL, /* type */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 TRUE, /* pc_relative */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE), /* pcrel_offset */
1457 HOWTO (R_ARM_GOT_BREL12, /* type */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1461 FALSE, /* pc_relative */
1463 complain_overflow_bitfield,/* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1471 HOWTO (R_ARM_GOTOFF12, /* type */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 FALSE, /* pc_relative */
1477 complain_overflow_bitfield,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 FALSE, /* pc_relative */
1494 complain_overflow_dont, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE, /* partial_inplace */
1500 FALSE), /* pcrel_offset */
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 FALSE, /* pc_relative */
1509 complain_overflow_dont, /* complain_on_overflow */
1510 NULL, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE, /* partial_inplace */
1515 FALSE), /* pcrel_offset */
1517 HOWTO (R_ARM_THM_JUMP11, /* type */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1521 TRUE, /* pc_relative */
1523 complain_overflow_signed, /* complain_on_overflow */
1524 bfd_elf_generic_reloc, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE), /* pcrel_offset */
1531 HOWTO (R_ARM_THM_JUMP8, /* type */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1535 TRUE, /* pc_relative */
1537 complain_overflow_signed, /* complain_on_overflow */
1538 bfd_elf_generic_reloc, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE), /* pcrel_offset */
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32, /* type */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 FALSE, /* pc_relative */
1552 complain_overflow_bitfield,/* complain_on_overflow */
1553 NULL, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE), /* pcrel_offset */
1560 HOWTO (R_ARM_TLS_LDM32, /* type */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 FALSE, /* pc_relative */
1566 complain_overflow_bitfield,/* complain_on_overflow */
1567 bfd_elf_generic_reloc, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE), /* pcrel_offset */
1574 HOWTO (R_ARM_TLS_LDO32, /* type */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 FALSE, /* pc_relative */
1580 complain_overflow_bitfield,/* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1588 HOWTO (R_ARM_TLS_IE32, /* type */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 FALSE, /* pc_relative */
1594 complain_overflow_bitfield,/* complain_on_overflow */
1595 NULL, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE), /* pcrel_offset */
1602 HOWTO (R_ARM_TLS_LE32, /* type */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1606 FALSE, /* pc_relative */
1608 complain_overflow_bitfield,/* complain_on_overflow */
1609 NULL, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1616 HOWTO (R_ARM_TLS_LDO12, /* type */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1620 FALSE, /* pc_relative */
1622 complain_overflow_bitfield,/* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1630 HOWTO (R_ARM_TLS_LE12, /* type */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1634 FALSE, /* pc_relative */
1636 complain_overflow_bitfield,/* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1644 HOWTO (R_ARM_TLS_IE12GP, /* type */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1648 FALSE, /* pc_relative */
1650 complain_overflow_bitfield,/* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1658 /* 112-127 private relocations. */
1676 /* R_ARM_ME_TOO, obsolete. */
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1683 FALSE, /* pc_relative */
1685 complain_overflow_bitfield,/* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1698 FALSE, /* pc_relative. */
1700 complain_overflow_bitfield,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1711 FALSE, /* pc_relative. */
1713 complain_overflow_bitfield,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1724 FALSE, /* pc_relative. */
1726 complain_overflow_bitfield,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1737 FALSE, /* pc_relative. */
1739 complain_overflow_bitfield,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE), /* pcrel_offset. */
1749 static reloc_howto_type elf32_arm_howto_table_2[8] =
1751 HOWTO (R_ARM_IRELATIVE, /* type */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1755 FALSE, /* pc_relative */
1757 complain_overflow_bitfield,/* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE), /* pcrel_offset */
1764 HOWTO (R_ARM_GOTFUNCDESC, /* type */
1766 2, /* size (0 = byte, 1 = short, 2 = long) */
1768 FALSE, /* pc_relative */
1770 complain_overflow_bitfield,/* complain_on_overflow */
1771 bfd_elf_generic_reloc, /* special_function */
1772 "R_ARM_GOTFUNCDESC", /* name */
1773 FALSE, /* partial_inplace */
1775 0xffffffff, /* dst_mask */
1776 FALSE), /* pcrel_offset */
1777 HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1779 2, /* size (0 = byte, 1 = short, 2 = long) */
1781 FALSE, /* pc_relative */
1783 complain_overflow_bitfield,/* complain_on_overflow */
1784 bfd_elf_generic_reloc, /* special_function */
1785 "R_ARM_GOTOFFFUNCDESC",/* name */
1786 FALSE, /* partial_inplace */
1788 0xffffffff, /* dst_mask */
1789 FALSE), /* pcrel_offset */
1790 HOWTO (R_ARM_FUNCDESC, /* type */
1792 2, /* size (0 = byte, 1 = short, 2 = long) */
1794 FALSE, /* pc_relative */
1796 complain_overflow_bitfield,/* complain_on_overflow */
1797 bfd_elf_generic_reloc, /* special_function */
1798 "R_ARM_FUNCDESC", /* name */
1799 FALSE, /* partial_inplace */
1801 0xffffffff, /* dst_mask */
1802 FALSE), /* pcrel_offset */
1803 HOWTO (R_ARM_FUNCDESC_VALUE, /* type */
1805 2, /* size (0 = byte, 1 = short, 2 = long) */
1807 FALSE, /* pc_relative */
1809 complain_overflow_bitfield,/* complain_on_overflow */
1810 bfd_elf_generic_reloc, /* special_function */
1811 "R_ARM_FUNCDESC_VALUE",/* name */
1812 FALSE, /* partial_inplace */
1814 0xffffffff, /* dst_mask */
1815 FALSE), /* pcrel_offset */
1816 HOWTO (R_ARM_TLS_GD32_FDPIC, /* type */
1818 2, /* size (0 = byte, 1 = short, 2 = long) */
1820 FALSE, /* pc_relative */
1822 complain_overflow_bitfield,/* complain_on_overflow */
1823 bfd_elf_generic_reloc, /* special_function */
1824 "R_ARM_TLS_GD32_FDPIC",/* name */
1825 FALSE, /* partial_inplace */
1827 0xffffffff, /* dst_mask */
1828 FALSE), /* pcrel_offset */
1829 HOWTO (R_ARM_TLS_LDM32_FDPIC, /* type */
1831 2, /* size (0 = byte, 1 = short, 2 = long) */
1833 FALSE, /* pc_relative */
1835 complain_overflow_bitfield,/* complain_on_overflow */
1836 bfd_elf_generic_reloc, /* special_function */
1837 "R_ARM_TLS_LDM32_FDPIC",/* name */
1838 FALSE, /* partial_inplace */
1840 0xffffffff, /* dst_mask */
1841 FALSE), /* pcrel_offset */
1842 HOWTO (R_ARM_TLS_IE32_FDPIC, /* type */
1844 2, /* size (0 = byte, 1 = short, 2 = long) */
1846 FALSE, /* pc_relative */
1848 complain_overflow_bitfield,/* complain_on_overflow */
1849 bfd_elf_generic_reloc, /* special_function */
1850 "R_ARM_TLS_IE32_FDPIC",/* name */
1851 FALSE, /* partial_inplace */
1853 0xffffffff, /* dst_mask */
1854 FALSE), /* pcrel_offset */
1857 /* 249-255 extended, currently unused, relocations: */
1858 static reloc_howto_type elf32_arm_howto_table_3[4] =
1860 HOWTO (R_ARM_RREL32, /* type */
1862 0, /* size (0 = byte, 1 = short, 2 = long) */
1864 FALSE, /* pc_relative */
1866 complain_overflow_dont,/* complain_on_overflow */
1867 bfd_elf_generic_reloc, /* special_function */
1868 "R_ARM_RREL32", /* name */
1869 FALSE, /* partial_inplace */
1872 FALSE), /* pcrel_offset */
1874 HOWTO (R_ARM_RABS32, /* type */
1876 0, /* size (0 = byte, 1 = short, 2 = long) */
1878 FALSE, /* pc_relative */
1880 complain_overflow_dont,/* complain_on_overflow */
1881 bfd_elf_generic_reloc, /* special_function */
1882 "R_ARM_RABS32", /* name */
1883 FALSE, /* partial_inplace */
1886 FALSE), /* pcrel_offset */
1888 HOWTO (R_ARM_RPC24, /* type */
1890 0, /* size (0 = byte, 1 = short, 2 = long) */
1892 FALSE, /* pc_relative */
1894 complain_overflow_dont,/* complain_on_overflow */
1895 bfd_elf_generic_reloc, /* special_function */
1896 "R_ARM_RPC24", /* name */
1897 FALSE, /* partial_inplace */
1900 FALSE), /* pcrel_offset */
1902 HOWTO (R_ARM_RBASE, /* type */
1904 0, /* size (0 = byte, 1 = short, 2 = long) */
1906 FALSE, /* pc_relative */
1908 complain_overflow_dont,/* complain_on_overflow */
1909 bfd_elf_generic_reloc, /* special_function */
1910 "R_ARM_RBASE", /* name */
1911 FALSE, /* partial_inplace */
1914 FALSE) /* pcrel_offset */
1917 static reloc_howto_type *
1918 elf32_arm_howto_from_type (unsigned int r_type)
1920 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1921 return &elf32_arm_howto_table_1[r_type];
1923 if (r_type >= R_ARM_IRELATIVE
1924 && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1925 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1927 if (r_type >= R_ARM_RREL32
1928 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1929 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1935 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1936 Elf_Internal_Rela * elf_reloc)
1938 unsigned int r_type;
1940 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1941 if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1943 /* xgettext:c-format */
1944 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1946 bfd_set_error (bfd_error_bad_value);
1952 struct elf32_arm_reloc_map
1954 bfd_reloc_code_real_type bfd_reloc_val;
1955 unsigned char elf_reloc_val;
1958 /* All entries in this list must also be present in elf32_arm_howto_table. */
1959 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1961 {BFD_RELOC_NONE, R_ARM_NONE},
1962 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1963 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1964 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1965 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1966 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1967 {BFD_RELOC_32, R_ARM_ABS32},
1968 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1969 {BFD_RELOC_8, R_ARM_ABS8},
1970 {BFD_RELOC_16, R_ARM_ABS16},
1971 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1972 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1973 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1974 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1975 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1976 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1977 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1978 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1979 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1980 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1981 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1982 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1983 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1984 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1985 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1986 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1987 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1988 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1989 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1990 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1991 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1992 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1993 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1994 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1995 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1996 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1997 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1998 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1999 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
2000 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
2001 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
2002 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
2003 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
2004 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
2005 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
2006 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
2007 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
2008 {BFD_RELOC_ARM_GOTFUNCDESC, R_ARM_GOTFUNCDESC},
2009 {BFD_RELOC_ARM_GOTOFFFUNCDESC, R_ARM_GOTOFFFUNCDESC},
2010 {BFD_RELOC_ARM_FUNCDESC, R_ARM_FUNCDESC},
2011 {BFD_RELOC_ARM_FUNCDESC_VALUE, R_ARM_FUNCDESC_VALUE},
2012 {BFD_RELOC_ARM_TLS_GD32_FDPIC, R_ARM_TLS_GD32_FDPIC},
2013 {BFD_RELOC_ARM_TLS_LDM32_FDPIC, R_ARM_TLS_LDM32_FDPIC},
2014 {BFD_RELOC_ARM_TLS_IE32_FDPIC, R_ARM_TLS_IE32_FDPIC},
2015 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
2016 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
2017 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
2018 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
2019 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
2020 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
2021 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
2022 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
2023 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2024 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2025 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2026 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2027 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2028 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2029 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2030 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2031 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2032 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2033 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2034 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2035 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2036 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2037 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2038 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2039 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2040 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2041 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2042 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2043 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2044 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2045 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2046 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2047 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2048 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2049 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2050 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2051 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2052 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2053 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
2054 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2055 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2056 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2057 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
2060 static reloc_howto_type *
2061 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2062 bfd_reloc_code_real_type code)
2066 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2067 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2068 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2073 static reloc_howto_type *
2074 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2079 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2080 if (elf32_arm_howto_table_1[i].name != NULL
2081 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2082 return &elf32_arm_howto_table_1[i];
2084 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2085 if (elf32_arm_howto_table_2[i].name != NULL
2086 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2087 return &elf32_arm_howto_table_2[i];
2089 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2090 if (elf32_arm_howto_table_3[i].name != NULL
2091 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2092 return &elf32_arm_howto_table_3[i];
2097 /* Support for core dump NOTE sections. */
2100 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2105 switch (note->descsz)
2110 case 148: /* Linux/ARM 32-bit. */
2112 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2115 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2124 /* Make a ".reg/999" section. */
2125 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2126 size, note->descpos + offset);
2130 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2132 switch (note->descsz)
2137 case 124: /* Linux/ARM elf_prpsinfo. */
2138 elf_tdata (abfd)->core->pid
2139 = bfd_get_32 (abfd, note->descdata + 12);
2140 elf_tdata (abfd)->core->program
2141 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2142 elf_tdata (abfd)->core->command
2143 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2146 /* Note that for some reason, a spurious space is tacked
2147 onto the end of the args in some (at least one anyway)
2148 implementations, so strip it off if it exists. */
2150 char *command = elf_tdata (abfd)->core->command;
2151 int n = strlen (command);
2153 if (0 < n && command[n - 1] == ' ')
2154 command[n - 1] = '\0';
2161 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2171 char data[124] ATTRIBUTE_NONSTRING;
2174 va_start (ap, note_type);
2175 memset (data, 0, sizeof (data));
2176 strncpy (data + 28, va_arg (ap, const char *), 16);
2178 /* GCC 8.1 warns about 80 equals destination size with
2179 -Wstringop-truncation:
2180 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2182 #if GCC_VERSION == 8001
2183 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2185 strncpy (data + 44, va_arg (ap, const char *), 80);
2189 return elfcore_write_note (abfd, buf, bufsiz,
2190 "CORE", note_type, data, sizeof (data));
2201 va_start (ap, note_type);
2202 memset (data, 0, sizeof (data));
2203 pid = va_arg (ap, long);
2204 bfd_put_32 (abfd, pid, data + 24);
2205 cursig = va_arg (ap, int);
2206 bfd_put_16 (abfd, cursig, data + 12);
2207 greg = va_arg (ap, const void *);
2208 memcpy (data + 72, greg, 72);
2211 return elfcore_write_note (abfd, buf, bufsiz,
2212 "CORE", note_type, data, sizeof (data));
2217 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2218 #define TARGET_LITTLE_NAME "elf32-littlearm"
2219 #define TARGET_BIG_SYM arm_elf32_be_vec
2220 #define TARGET_BIG_NAME "elf32-bigarm"
2222 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2223 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2224 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2226 typedef unsigned long int insn32;
2227 typedef unsigned short int insn16;
2229 /* In lieu of proper flags, assume all EABIv4 or later objects are
2231 #define INTERWORK_FLAG(abfd) \
2232 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2233 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2234 || ((abfd)->flags & BFD_LINKER_CREATED))
2236 /* The linker script knows the section names for placement.
2237 The entry_names are used to do simple name mangling on the stubs.
2238 Given a function name, and its type, the stub can be found. The
2239 name can be changed. The only requirement is the %s be present. */
2240 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2241 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2243 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2244 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2246 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2247 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2249 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2250 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2252 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2253 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2255 #define STUB_ENTRY_NAME "__%s_veneer"
2257 #define CMSE_PREFIX "__acle_se_"
2259 /* The name of the dynamic interpreter. This is put in the .interp
2261 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2263 /* FDPIC default stack size. */
2264 #define DEFAULT_STACK_SIZE 0x8000
2266 static const unsigned long tls_trampoline [] =
2268 0xe08e0000, /* add r0, lr, r0 */
2269 0xe5901004, /* ldr r1, [r0,#4] */
2270 0xe12fff11, /* bx r1 */
2273 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2275 0xe52d2004, /* push {r2} */
2276 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2277 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2278 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2279 0xe081100f, /* 2: add r1, pc */
2280 0xe12fff12, /* bx r2 */
2281 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2282 + dl_tlsdesc_lazy_resolver(GOT) */
2283 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2286 /* ARM FDPIC PLT entry. */
2287 /* The last 5 words contain PLT lazy fragment code and data. */
2288 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2290 0xe59fc008, /* ldr r12, .L1 */
2291 0xe08cc009, /* add r12, r12, r9 */
2292 0xe59c9004, /* ldr r9, [r12, #4] */
2293 0xe59cf000, /* ldr pc, [r12] */
2294 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2295 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2296 0xe51fc00c, /* ldr r12, [pc, #-12] */
2297 0xe92d1000, /* push {r12} */
2298 0xe599c004, /* ldr r12, [r9, #4] */
2299 0xe599f000, /* ldr pc, [r9] */
2302 /* Thumb FDPIC PLT entry. */
2303 /* The last 5 words contain PLT lazy fragment code and data. */
2304 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2306 0xc00cf8df, /* ldr.w r12, .L1 */
2307 0x0c09eb0c, /* add.w r12, r12, r9 */
2308 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2309 0xf000f8dc, /* ldr.w pc, [r12] */
2310 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2311 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2312 0xc008f85f, /* ldr.w r12, .L2 */
2313 0xcd04f84d, /* push {r12} */
2314 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2315 0xf000f8d9, /* ldr.w pc, [r9] */
2318 #ifdef FOUR_WORD_PLT
2320 /* The first entry in a procedure linkage table looks like
2321 this. It is set up so that any shared library function that is
2322 called before the relocation has been set up calls the dynamic
2324 static const bfd_vma elf32_arm_plt0_entry [] =
2326 0xe52de004, /* str lr, [sp, #-4]! */
2327 0xe59fe010, /* ldr lr, [pc, #16] */
2328 0xe08fe00e, /* add lr, pc, lr */
2329 0xe5bef008, /* ldr pc, [lr, #8]! */
2332 /* Subsequent entries in a procedure linkage table look like
2334 static const bfd_vma elf32_arm_plt_entry [] =
2336 0xe28fc600, /* add ip, pc, #NN */
2337 0xe28cca00, /* add ip, ip, #NN */
2338 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2339 0x00000000, /* unused */
2342 #else /* not FOUR_WORD_PLT */
2344 /* The first entry in a procedure linkage table looks like
2345 this. It is set up so that any shared library function that is
2346 called before the relocation has been set up calls the dynamic
2348 static const bfd_vma elf32_arm_plt0_entry [] =
2350 0xe52de004, /* str lr, [sp, #-4]! */
2351 0xe59fe004, /* ldr lr, [pc, #4] */
2352 0xe08fe00e, /* add lr, pc, lr */
2353 0xe5bef008, /* ldr pc, [lr, #8]! */
2354 0x00000000, /* &GOT[0] - . */
2357 /* By default subsequent entries in a procedure linkage table look like
2358 this. Offsets that don't fit into 28 bits will cause link error. */
2359 static const bfd_vma elf32_arm_plt_entry_short [] =
2361 0xe28fc600, /* add ip, pc, #0xNN00000 */
2362 0xe28cca00, /* add ip, ip, #0xNN000 */
2363 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2366 /* When explicitly asked, we'll use this "long" entry format
2367 which can cope with arbitrary displacements. */
2368 static const bfd_vma elf32_arm_plt_entry_long [] =
2370 0xe28fc200, /* add ip, pc, #0xN0000000 */
2371 0xe28cc600, /* add ip, ip, #0xNN00000 */
2372 0xe28cca00, /* add ip, ip, #0xNN000 */
2373 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2376 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2378 #endif /* not FOUR_WORD_PLT */
2380 /* The first entry in a procedure linkage table looks like this.
2381 It is set up so that any shared library function that is called before the
2382 relocation has been set up calls the dynamic linker first. */
2383 static const bfd_vma elf32_thumb2_plt0_entry [] =
2385 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2386 an instruction maybe encoded to one or two array elements. */
2387 0xf8dfb500, /* push {lr} */
2388 0x44fee008, /* ldr.w lr, [pc, #8] */
2390 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2391 0x00000000, /* &GOT[0] - . */
2394 /* Subsequent entries in a procedure linkage table for thumb only target
2396 static const bfd_vma elf32_thumb2_plt_entry [] =
2398 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2399 an instruction maybe encoded to one or two array elements. */
2400 0x0c00f240, /* movw ip, #0xNNNN */
2401 0x0c00f2c0, /* movt ip, #0xNNNN */
2402 0xf8dc44fc, /* add ip, pc */
2403 0xbf00f000 /* ldr.w pc, [ip] */
2407 /* The format of the first entry in the procedure linkage table
2408 for a VxWorks executable. */
2409 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2411 0xe52dc008, /* str ip,[sp,#-8]! */
2412 0xe59fc000, /* ldr ip,[pc] */
2413 0xe59cf008, /* ldr pc,[ip,#8] */
2414 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2417 /* The format of subsequent entries in a VxWorks executable. */
2418 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2420 0xe59fc000, /* ldr ip,[pc] */
2421 0xe59cf000, /* ldr pc,[ip] */
2422 0x00000000, /* .long @got */
2423 0xe59fc000, /* ldr ip,[pc] */
2424 0xea000000, /* b _PLT */
2425 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2428 /* The format of entries in a VxWorks shared library. */
2429 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2431 0xe59fc000, /* ldr ip,[pc] */
2432 0xe79cf009, /* ldr pc,[ip,r9] */
2433 0x00000000, /* .long @got */
2434 0xe59fc000, /* ldr ip,[pc] */
2435 0xe599f008, /* ldr pc,[r9,#8] */
2436 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2439 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2440 #define PLT_THUMB_STUB_SIZE 4
2441 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2447 /* The entries in a PLT when using a DLL-based target with multiple
2449 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2451 0xe51ff004, /* ldr pc, [pc, #-4] */
2452 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2455 /* The first entry in a procedure linkage table looks like
2456 this. It is set up so that any shared library function that is
2457 called before the relocation has been set up calls the dynamic
2459 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2462 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2463 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2464 0xe08cc00f, /* add ip, ip, pc */
2465 0xe52dc008, /* str ip, [sp, #-8]! */
2466 /* Second bundle: */
2467 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2468 0xe59cc000, /* ldr ip, [ip] */
2469 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2470 0xe12fff1c, /* bx ip */
2472 0xe320f000, /* nop */
2473 0xe320f000, /* nop */
2474 0xe320f000, /* nop */
2476 0xe50dc004, /* str ip, [sp, #-4] */
2477 /* Fourth bundle: */
2478 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2479 0xe59cc000, /* ldr ip, [ip] */
2480 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2481 0xe12fff1c, /* bx ip */
2483 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2485 /* Subsequent entries in a procedure linkage table look like this. */
2486 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2488 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2489 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2490 0xe08cc00f, /* add ip, ip, pc */
2491 0xea000000, /* b .Lplt_tail */
2494 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2495 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2496 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2497 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2498 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2499 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2500 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2501 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2511 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2512 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2513 is inserted in arm_build_one_stub(). */
2514 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2515 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2516 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2517 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2518 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2519 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2520 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2521 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2526 enum stub_insn_type type;
2527 unsigned int r_type;
2531 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2532 to reach the stub if necessary. */
2533 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2535 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2536 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2539 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2541 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2543 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2544 ARM_INSN (0xe12fff1c), /* bx ip */
2545 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2548 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2549 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2551 THUMB16_INSN (0xb401), /* push {r0} */
2552 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2553 THUMB16_INSN (0x4684), /* mov ip, r0 */
2554 THUMB16_INSN (0xbc01), /* pop {r0} */
2555 THUMB16_INSN (0x4760), /* bx ip */
2556 THUMB16_INSN (0xbf00), /* nop */
2557 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2560 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2561 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2563 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2564 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2567 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2568 M-profile architectures. */
2569 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2571 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2572 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2573 THUMB16_INSN (0x4760), /* bx ip */
2576 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2578 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2580 THUMB16_INSN (0x4778), /* bx pc */
2581 THUMB16_INSN (0x46c0), /* nop */
2582 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2583 ARM_INSN (0xe12fff1c), /* bx ip */
2584 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2587 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2589 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2591 THUMB16_INSN (0x4778), /* bx pc */
2592 THUMB16_INSN (0x46c0), /* nop */
2593 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2594 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2597 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2598 one, when the destination is close enough. */
2599 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2601 THUMB16_INSN (0x4778), /* bx pc */
2602 THUMB16_INSN (0x46c0), /* nop */
2603 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2606 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2607 blx to reach the stub if necessary. */
2608 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2610 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2611 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2612 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2615 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2616 blx to reach the stub if necessary. We can not add into pc;
2617 it is not guaranteed to mode switch (different in ARMv6 and
2619 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2621 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2622 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2623 ARM_INSN (0xe12fff1c), /* bx ip */
2624 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2627 /* V4T ARM -> ARM long branch stub, PIC. */
2628 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2630 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2631 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2632 ARM_INSN (0xe12fff1c), /* bx ip */
2633 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2636 /* V4T Thumb -> ARM long branch stub, PIC. */
2637 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2639 THUMB16_INSN (0x4778), /* bx pc */
2640 THUMB16_INSN (0x46c0), /* nop */
2641 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2642 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2643 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2646 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2648 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2650 THUMB16_INSN (0xb401), /* push {r0} */
2651 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2652 THUMB16_INSN (0x46fc), /* mov ip, pc */
2653 THUMB16_INSN (0x4484), /* add ip, r0 */
2654 THUMB16_INSN (0xbc01), /* pop {r0} */
2655 THUMB16_INSN (0x4760), /* bx ip */
2656 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2659 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2661 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2663 THUMB16_INSN (0x4778), /* bx pc */
2664 THUMB16_INSN (0x46c0), /* nop */
2665 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2666 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2667 ARM_INSN (0xe12fff1c), /* bx ip */
2668 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2671 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2672 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2673 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2675 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2676 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2677 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2680 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2681 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2682 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2684 THUMB16_INSN (0x4778), /* bx pc */
2685 THUMB16_INSN (0x46c0), /* nop */
2686 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2687 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2688 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2691 /* NaCl ARM -> ARM long branch stub. */
2692 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2694 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2695 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2696 ARM_INSN (0xe12fff1c), /* bx ip */
2697 ARM_INSN (0xe320f000), /* nop */
2698 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2699 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2700 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2701 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2704 /* NaCl ARM -> ARM long branch stub, PIC. */
2705 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2707 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2708 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2709 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2710 ARM_INSN (0xe12fff1c), /* bx ip */
2711 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2712 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2713 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2714 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2717 /* Stub used for transition to secure state (aka SG veneer). */
2718 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2720 THUMB32_INSN (0xe97fe97f), /* sg. */
2721 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2725 /* Cortex-A8 erratum-workaround stubs. */
2727 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2728 can't use a conditional branch to reach this stub). */
2730 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2732 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2733 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2734 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2737 /* Stub used for b.w and bl.w instructions. */
2739 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2741 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2744 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2746 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2749 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2750 instruction (which switches to ARM mode) to point to this stub. Jump to the
2751 real destination using an ARM-mode branch. */
2753 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2755 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2758 /* For each section group there can be a specially created linker section
2759 to hold the stubs for that group. The name of the stub section is based
2760 upon the name of another section within that group with the suffix below
2763 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2764 create what appeared to be a linker stub section when it actually
2765 contained user code/data. For example, consider this fragment:
2767 const char * stubborn_problems[] = { "np" };
2769 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2772 .data.rel.local.stubborn_problems
2774 This then causes problems in arm32_arm_build_stubs() as it triggers:
2776 // Ignore non-stub sections.
2777 if (!strstr (stub_sec->name, STUB_SUFFIX))
2780 And so the section would be ignored instead of being processed. Hence
2781 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2783 #define STUB_SUFFIX ".__stub"
2785 /* One entry per long/short branch stub defined above. */
2787 DEF_STUB(long_branch_any_any) \
2788 DEF_STUB(long_branch_v4t_arm_thumb) \
2789 DEF_STUB(long_branch_thumb_only) \
2790 DEF_STUB(long_branch_v4t_thumb_thumb) \
2791 DEF_STUB(long_branch_v4t_thumb_arm) \
2792 DEF_STUB(short_branch_v4t_thumb_arm) \
2793 DEF_STUB(long_branch_any_arm_pic) \
2794 DEF_STUB(long_branch_any_thumb_pic) \
2795 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2796 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2797 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2798 DEF_STUB(long_branch_thumb_only_pic) \
2799 DEF_STUB(long_branch_any_tls_pic) \
2800 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2801 DEF_STUB(long_branch_arm_nacl) \
2802 DEF_STUB(long_branch_arm_nacl_pic) \
2803 DEF_STUB(cmse_branch_thumb_only) \
2804 DEF_STUB(a8_veneer_b_cond) \
2805 DEF_STUB(a8_veneer_b) \
2806 DEF_STUB(a8_veneer_bl) \
2807 DEF_STUB(a8_veneer_blx) \
2808 DEF_STUB(long_branch_thumb2_only) \
2809 DEF_STUB(long_branch_thumb2_only_pure)
2811 #define DEF_STUB(x) arm_stub_##x,
2812 enum elf32_arm_stub_type
2820 /* Note the first a8_veneer type. */
2821 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2825 const insn_sequence* template_sequence;
2829 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2830 static const stub_def stub_definitions[] =
2836 struct elf32_arm_stub_hash_entry
2838 /* Base hash table entry structure. */
2839 struct bfd_hash_entry root;
2841 /* The stub section. */
2844 /* Offset within stub_sec of the beginning of this stub. */
2845 bfd_vma stub_offset;
2847 /* Given the symbol's value and its section we can determine its final
2848 value when building the stubs (so the stub knows where to jump). */
2849 bfd_vma target_value;
2850 asection *target_section;
2852 /* Same as above but for the source of the branch to the stub. Used for
2853 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2854 such, source section does not need to be recorded since Cortex-A8 erratum
2855 workaround stubs are only generated when both source and target are in the
2857 bfd_vma source_value;
2859 /* The instruction which caused this stub to be generated (only valid for
2860 Cortex-A8 erratum workaround stubs at present). */
2861 unsigned long orig_insn;
2863 /* The stub type. */
2864 enum elf32_arm_stub_type stub_type;
2865 /* Its encoding size in bytes. */
2868 const insn_sequence *stub_template;
2869 /* The size of the template (number of entries). */
2870 int stub_template_size;
2872 /* The symbol table entry, if any, that this was derived from. */
2873 struct elf32_arm_link_hash_entry *h;
2875 /* Type of branch. */
2876 enum arm_st_branch_type branch_type;
2878 /* Where this stub is being called from, or, in the case of combined
2879 stub sections, the first input section in the group. */
2882 /* The name for the local symbol at the start of this stub. The
2883 stub name in the hash table has to be unique; this does not, so
2884 it can be friendlier. */
2888 /* Used to build a map of a section. This is required for mixed-endian
2891 typedef struct elf32_elf_section_map
2896 elf32_arm_section_map;
2898 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2902 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2903 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2904 VFP11_ERRATUM_ARM_VENEER,
2905 VFP11_ERRATUM_THUMB_VENEER
2907 elf32_vfp11_erratum_type;
2909 typedef struct elf32_vfp11_erratum_list
2911 struct elf32_vfp11_erratum_list *next;
2917 struct elf32_vfp11_erratum_list *veneer;
2918 unsigned int vfp_insn;
2922 struct elf32_vfp11_erratum_list *branch;
2926 elf32_vfp11_erratum_type type;
2928 elf32_vfp11_erratum_list;
2930 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2934 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2935 STM32L4XX_ERRATUM_VENEER
2937 elf32_stm32l4xx_erratum_type;
2939 typedef struct elf32_stm32l4xx_erratum_list
2941 struct elf32_stm32l4xx_erratum_list *next;
2947 struct elf32_stm32l4xx_erratum_list *veneer;
2952 struct elf32_stm32l4xx_erratum_list *branch;
2956 elf32_stm32l4xx_erratum_type type;
2958 elf32_stm32l4xx_erratum_list;
2963 INSERT_EXIDX_CANTUNWIND_AT_END
2965 arm_unwind_edit_type;
2967 /* A (sorted) list of edits to apply to an unwind table. */
2968 typedef struct arm_unwind_table_edit
2970 arm_unwind_edit_type type;
2971 /* Note: we sometimes want to insert an unwind entry corresponding to a
2972 section different from the one we're currently writing out, so record the
2973 (text) section this edit relates to here. */
2974 asection *linked_section;
2976 struct arm_unwind_table_edit *next;
2978 arm_unwind_table_edit;
2980 typedef struct _arm_elf_section_data
2982 /* Information about mapping symbols. */
2983 struct bfd_elf_section_data elf;
2984 unsigned int mapcount;
2985 unsigned int mapsize;
2986 elf32_arm_section_map *map;
2987 /* Information about CPU errata. */
2988 unsigned int erratumcount;
2989 elf32_vfp11_erratum_list *erratumlist;
2990 unsigned int stm32l4xx_erratumcount;
2991 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
2992 unsigned int additional_reloc_count;
2993 /* Information about unwind tables. */
2996 /* Unwind info attached to a text section. */
2999 asection *arm_exidx_sec;
3002 /* Unwind info attached to an .ARM.exidx section. */
3005 arm_unwind_table_edit *unwind_edit_list;
3006 arm_unwind_table_edit *unwind_edit_tail;
3010 _arm_elf_section_data;
3012 #define elf32_arm_section_data(sec) \
3013 ((_arm_elf_section_data *) elf_section_data (sec))
3015 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3016 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3017 so may be created multiple times: we use an array of these entries whilst
3018 relaxing which we can refresh easily, then create stubs for each potentially
3019 erratum-triggering instruction once we've settled on a solution. */
3021 struct a8_erratum_fix
3026 bfd_vma target_offset;
3027 unsigned long orig_insn;
3029 enum elf32_arm_stub_type stub_type;
3030 enum arm_st_branch_type branch_type;
3033 /* A table of relocs applied to branches which might trigger Cortex-A8
3036 struct a8_erratum_reloc
3039 bfd_vma destination;
3040 struct elf32_arm_link_hash_entry *hash;
3041 const char *sym_name;
3042 unsigned int r_type;
3043 enum arm_st_branch_type branch_type;
3044 bfd_boolean non_a8_stub;
3047 /* The size of the thread control block. */
3050 /* ARM-specific information about a PLT entry, over and above the usual
3054 /* We reference count Thumb references to a PLT entry separately,
3055 so that we can emit the Thumb trampoline only if needed. */
3056 bfd_signed_vma thumb_refcount;
3058 /* Some references from Thumb code may be eliminated by BL->BLX
3059 conversion, so record them separately. */
3060 bfd_signed_vma maybe_thumb_refcount;
3062 /* How many of the recorded PLT accesses were from non-call relocations.
3063 This information is useful when deciding whether anything takes the
3064 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3065 non-call references to the function should resolve directly to the
3066 real runtime target. */
3067 unsigned int noncall_refcount;
3069 /* Since PLT entries have variable size if the Thumb prologue is
3070 used, we need to record the index into .got.plt instead of
3071 recomputing it from the PLT offset. */
3072 bfd_signed_vma got_offset;
3075 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3076 struct arm_local_iplt_info
3078 /* The information that is usually found in the generic ELF part of
3079 the hash table entry. */
3080 union gotplt_union root;
3082 /* The information that is usually found in the ARM-specific part of
3083 the hash table entry. */
3084 struct arm_plt_info arm;
3086 /* A list of all potential dynamic relocations against this symbol. */
3087 struct elf_dyn_relocs *dyn_relocs;
3090 /* Structure to handle FDPIC support for local functions. */
3091 struct fdpic_local {
3092 unsigned int funcdesc_cnt;
3093 unsigned int gotofffuncdesc_cnt;
3094 int funcdesc_offset;
3097 struct elf_arm_obj_tdata
3099 struct elf_obj_tdata root;
3101 /* tls_type for each local got entry. */
3102 char *local_got_tls_type;
3104 /* GOTPLT entries for TLS descriptors. */
3105 bfd_vma *local_tlsdesc_gotent;
3107 /* Information for local symbols that need entries in .iplt. */
3108 struct arm_local_iplt_info **local_iplt;
3110 /* Zero to warn when linking objects with incompatible enum sizes. */
3111 int no_enum_size_warning;
3113 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3114 int no_wchar_size_warning;
3116 /* Maintains FDPIC counters and funcdesc info. */
3117 struct fdpic_local *local_fdpic_cnts;
3120 #define elf_arm_tdata(bfd) \
3121 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3123 #define elf32_arm_local_got_tls_type(bfd) \
3124 (elf_arm_tdata (bfd)->local_got_tls_type)
3126 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3127 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3129 #define elf32_arm_local_iplt(bfd) \
3130 (elf_arm_tdata (bfd)->local_iplt)
3132 #define elf32_arm_local_fdpic_cnts(bfd) \
3133 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3135 #define is_arm_elf(bfd) \
3136 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3137 && elf_tdata (bfd) != NULL \
3138 && elf_object_id (bfd) == ARM_ELF_DATA)
3141 elf32_arm_mkobject (bfd *abfd)
3143 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3147 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3149 /* Structure to handle FDPIC support for extern functions. */
3150 struct fdpic_global {
3151 unsigned int gotofffuncdesc_cnt;
3152 unsigned int gotfuncdesc_cnt;
3153 unsigned int funcdesc_cnt;
3154 int funcdesc_offset;
3155 int gotfuncdesc_offset;
3158 /* Arm ELF linker hash entry. */
3159 struct elf32_arm_link_hash_entry
3161 struct elf_link_hash_entry root;
3163 /* Track dynamic relocs copied for this symbol. */
3164 struct elf_dyn_relocs *dyn_relocs;
3166 /* ARM-specific PLT information. */
3167 struct arm_plt_info plt;
3169 #define GOT_UNKNOWN 0
3170 #define GOT_NORMAL 1
3171 #define GOT_TLS_GD 2
3172 #define GOT_TLS_IE 4
3173 #define GOT_TLS_GDESC 8
3174 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3175 unsigned int tls_type : 8;
3177 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3178 unsigned int is_iplt : 1;
3180 unsigned int unused : 23;
3182 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3183 starting at the end of the jump table. */
3184 bfd_vma tlsdesc_got;
3186 /* The symbol marking the real symbol location for exported thumb
3187 symbols with Arm stubs. */
3188 struct elf_link_hash_entry *export_glue;
3190 /* A pointer to the most recently used stub hash entry against this
3192 struct elf32_arm_stub_hash_entry *stub_cache;
3194 /* Counter for FDPIC relocations against this symbol. */
3195 struct fdpic_global fdpic_cnts;
3198 /* Traverse an arm ELF linker hash table. */
3199 #define elf32_arm_link_hash_traverse(table, func, info) \
3200 (elf_link_hash_traverse \
3202 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3205 /* Get the ARM elf linker hash table from a link_info structure. */
3206 #define elf32_arm_hash_table(info) \
3207 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3208 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3210 #define arm_stub_hash_lookup(table, string, create, copy) \
3211 ((struct elf32_arm_stub_hash_entry *) \
3212 bfd_hash_lookup ((table), (string), (create), (copy)))
3214 /* Array to keep track of which stub sections have been created, and
3215 information on stub grouping. */
3218 /* This is the section to which stubs in the group will be
3221 /* The stub section. */
3225 #define elf32_arm_compute_jump_table_size(htab) \
3226 ((htab)->next_tls_desc_index * 4)
3228 /* ARM ELF linker hash table. */
3229 struct elf32_arm_link_hash_table
3231 /* The main hash table. */
3232 struct elf_link_hash_table root;
3234 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3235 bfd_size_type thumb_glue_size;
3237 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3238 bfd_size_type arm_glue_size;
3240 /* The size in bytes of section containing the ARMv4 BX veneers. */
3241 bfd_size_type bx_glue_size;
3243 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3244 veneer has been populated. */
3245 bfd_vma bx_glue_offset[15];
3247 /* The size in bytes of the section containing glue for VFP11 erratum
3249 bfd_size_type vfp11_erratum_glue_size;
3251 /* The size in bytes of the section containing glue for STM32L4XX erratum
3253 bfd_size_type stm32l4xx_erratum_glue_size;
3255 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3256 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3257 elf32_arm_write_section(). */
3258 struct a8_erratum_fix *a8_erratum_fixes;
3259 unsigned int num_a8_erratum_fixes;
3261 /* An arbitrary input BFD chosen to hold the glue sections. */
3262 bfd * bfd_of_glue_owner;
3264 /* Nonzero to output a BE8 image. */
3267 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3268 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3271 /* The relocation to use for R_ARM_TARGET2 relocations. */
3274 /* 0 = Ignore R_ARM_V4BX.
3275 1 = Convert BX to MOV PC.
3276 2 = Generate v4 interworing stubs. */
3279 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3282 /* Whether we should fix the ARM1176 BLX immediate issue. */
3285 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3288 /* What sort of code sequences we should look for which may trigger the
3289 VFP11 denorm erratum. */
3290 bfd_arm_vfp11_fix vfp11_fix;
3292 /* Global counter for the number of fixes we have emitted. */
3293 int num_vfp11_fixes;
3295 /* What sort of code sequences we should look for which may trigger the
3296 STM32L4XX erratum. */
3297 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3299 /* Global counter for the number of fixes we have emitted. */
3300 int num_stm32l4xx_fixes;
3302 /* Nonzero to force PIC branch veneers. */
3305 /* The number of bytes in the initial entry in the PLT. */
3306 bfd_size_type plt_header_size;
3308 /* The number of bytes in the subsequent PLT etries. */
3309 bfd_size_type plt_entry_size;
3311 /* True if the target system is VxWorks. */
3314 /* True if the target system is Symbian OS. */
3317 /* True if the target system is Native Client. */
3320 /* True if the target uses REL relocations. */
3321 bfd_boolean use_rel;
3323 /* Nonzero if import library must be a secure gateway import library
3324 as per ARMv8-M Security Extensions. */
3327 /* The import library whose symbols' address must remain stable in
3328 the import library generated. */
3331 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3332 bfd_vma next_tls_desc_index;
3334 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3335 bfd_vma num_tls_desc;
3337 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3340 /* The offset into splt of the PLT entry for the TLS descriptor
3341 resolver. Special values are 0, if not necessary (or not found
3342 to be necessary yet), and -1 if needed but not determined
3344 bfd_vma dt_tlsdesc_plt;
3346 /* The offset into sgot of the GOT entry used by the PLT entry
3348 bfd_vma dt_tlsdesc_got;
3350 /* Offset in .plt section of tls_arm_trampoline. */
3351 bfd_vma tls_trampoline;
3353 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3356 bfd_signed_vma refcount;
3360 /* Small local sym cache. */
3361 struct sym_cache sym_cache;
3363 /* For convenience in allocate_dynrelocs. */
3366 /* The amount of space used by the reserved portion of the sgotplt
3367 section, plus whatever space is used by the jump slots. */
3368 bfd_vma sgotplt_jump_table_size;
3370 /* The stub hash table. */
3371 struct bfd_hash_table stub_hash_table;
3373 /* Linker stub bfd. */
3376 /* Linker call-backs. */
3377 asection * (*add_stub_section) (const char *, asection *, asection *,
3379 void (*layout_sections_again) (void);
3381 /* Array to keep track of which stub sections have been created, and
3382 information on stub grouping. */
3383 struct map_stub *stub_group;
3385 /* Input stub section holding secure gateway veneers. */
3386 asection *cmse_stub_sec;
3388 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3389 start to be allocated. */
3390 bfd_vma new_cmse_stub_offset;
3392 /* Number of elements in stub_group. */
3393 unsigned int top_id;
3395 /* Assorted information used by elf32_arm_size_stubs. */
3396 unsigned int bfd_count;
3397 unsigned int top_index;
3398 asection **input_list;
3400 /* True if the target system uses FDPIC. */
3403 /* Fixup section. Used for FDPIC. */
3407 /* Add an FDPIC read-only fixup. */
3409 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3411 bfd_vma fixup_offset;
3413 fixup_offset = srofixup->reloc_count++ * 4;
3414 BFD_ASSERT (fixup_offset < srofixup->size);
3415 bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3419 ctz (unsigned int mask)
3421 #if GCC_VERSION >= 3004
3422 return __builtin_ctz (mask);
3426 for (i = 0; i < 8 * sizeof (mask); i++)
3437 elf32_arm_popcount (unsigned int mask)
3439 #if GCC_VERSION >= 3004
3440 return __builtin_popcount (mask);
3445 for (i = 0; i < 8 * sizeof (mask); i++)
3455 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3456 asection *sreloc, Elf_Internal_Rela *rel);
3459 arm_elf_fill_funcdesc(bfd *output_bfd,
3460 struct bfd_link_info *info,
3461 int *funcdesc_offset,
3465 bfd_vma dynreloc_value,
3468 if ((*funcdesc_offset & 1) == 0)
3470 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3471 asection *sgot = globals->root.sgot;
3473 if (bfd_link_pic(info))
3475 asection *srelgot = globals->root.srelgot;
3476 Elf_Internal_Rela outrel;
3478 outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3479 outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3480 outrel.r_addend = 0;
3482 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3483 bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3484 bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3488 struct elf_link_hash_entry *hgot = globals->root.hgot;
3489 bfd_vma got_value = hgot->root.u.def.value
3490 + hgot->root.u.def.section->output_section->vma
3491 + hgot->root.u.def.section->output_offset;
3493 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3494 sgot->output_section->vma + sgot->output_offset
3496 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3497 sgot->output_section->vma + sgot->output_offset
3499 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3500 bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3502 *funcdesc_offset |= 1;
3506 /* Create an entry in an ARM ELF linker hash table. */
3508 static struct bfd_hash_entry *
3509 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3510 struct bfd_hash_table * table,
3511 const char * string)
3513 struct elf32_arm_link_hash_entry * ret =
3514 (struct elf32_arm_link_hash_entry *) entry;
3516 /* Allocate the structure if it has not already been allocated by a
3519 ret = (struct elf32_arm_link_hash_entry *)
3520 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3522 return (struct bfd_hash_entry *) ret;
3524 /* Call the allocation method of the superclass. */
3525 ret = ((struct elf32_arm_link_hash_entry *)
3526 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3530 ret->dyn_relocs = NULL;
3531 ret->tls_type = GOT_UNKNOWN;
3532 ret->tlsdesc_got = (bfd_vma) -1;
3533 ret->plt.thumb_refcount = 0;
3534 ret->plt.maybe_thumb_refcount = 0;
3535 ret->plt.noncall_refcount = 0;
3536 ret->plt.got_offset = -1;
3537 ret->is_iplt = FALSE;
3538 ret->export_glue = NULL;
3540 ret->stub_cache = NULL;
3542 ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3543 ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3544 ret->fdpic_cnts.funcdesc_cnt = 0;
3545 ret->fdpic_cnts.funcdesc_offset = -1;
3546 ret->fdpic_cnts.gotfuncdesc_offset = -1;
3549 return (struct bfd_hash_entry *) ret;
3552 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3556 elf32_arm_allocate_local_sym_info (bfd *abfd)
3558 if (elf_local_got_refcounts (abfd) == NULL)
3560 bfd_size_type num_syms;
3564 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3565 size = num_syms * (sizeof (bfd_signed_vma)
3566 + sizeof (struct arm_local_iplt_info *)
3569 + sizeof (struct fdpic_local));
3570 data = bfd_zalloc (abfd, size);
3574 elf32_arm_local_fdpic_cnts (abfd) = (struct fdpic_local *) data;
3575 data += num_syms * sizeof (struct fdpic_local);
3577 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3578 data += num_syms * sizeof (bfd_signed_vma);
3580 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3581 data += num_syms * sizeof (struct arm_local_iplt_info *);
3583 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3584 data += num_syms * sizeof (bfd_vma);
3586 elf32_arm_local_got_tls_type (abfd) = data;
3591 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3592 to input bfd ABFD. Create the information if it doesn't already exist.
3593 Return null if an allocation fails. */
3595 static struct arm_local_iplt_info *
3596 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3598 struct arm_local_iplt_info **ptr;
3600 if (!elf32_arm_allocate_local_sym_info (abfd))
3603 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3604 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3606 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3610 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3611 in ABFD's symbol table. If the symbol is global, H points to its
3612 hash table entry, otherwise H is null.
3614 Return true if the symbol does have PLT information. When returning
3615 true, point *ROOT_PLT at the target-independent reference count/offset
3616 union and *ARM_PLT at the ARM-specific information. */
3619 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3620 struct elf32_arm_link_hash_entry *h,
3621 unsigned long r_symndx, union gotplt_union **root_plt,
3622 struct arm_plt_info **arm_plt)
3624 struct arm_local_iplt_info *local_iplt;
3626 if (globals->root.splt == NULL && globals->root.iplt == NULL)
3631 *root_plt = &h->root.plt;
3636 if (elf32_arm_local_iplt (abfd) == NULL)
3639 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3640 if (local_iplt == NULL)
3643 *root_plt = &local_iplt->root;
3644 *arm_plt = &local_iplt->arm;
3648 static bfd_boolean using_thumb_only (struct elf32_arm_link_hash_table *globals);
3650 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3654 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3655 struct arm_plt_info *arm_plt)
3657 struct elf32_arm_link_hash_table *htab;
3659 htab = elf32_arm_hash_table (info);
3661 return (!using_thumb_only(htab) && (arm_plt->thumb_refcount != 0
3662 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3665 /* Return a pointer to the head of the dynamic reloc list that should
3666 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3667 ABFD's symbol table. Return null if an error occurs. */
3669 static struct elf_dyn_relocs **
3670 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3671 Elf_Internal_Sym *isym)
3673 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3675 struct arm_local_iplt_info *local_iplt;
3677 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3678 if (local_iplt == NULL)
3680 return &local_iplt->dyn_relocs;
3684 /* Track dynamic relocs needed for local syms too.
3685 We really need local syms available to do this
3690 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3694 vpp = &elf_section_data (s)->local_dynrel;
3695 return (struct elf_dyn_relocs **) vpp;
3699 /* Initialize an entry in the stub hash table. */
3701 static struct bfd_hash_entry *
3702 stub_hash_newfunc (struct bfd_hash_entry *entry,
3703 struct bfd_hash_table *table,
3706 /* Allocate the structure if it has not already been allocated by a
3710 entry = (struct bfd_hash_entry *)
3711 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3716 /* Call the allocation method of the superclass. */
3717 entry = bfd_hash_newfunc (entry, table, string);
3720 struct elf32_arm_stub_hash_entry *eh;
3722 /* Initialize the local fields. */
3723 eh = (struct elf32_arm_stub_hash_entry *) entry;
3724 eh->stub_sec = NULL;
3725 eh->stub_offset = (bfd_vma) -1;
3726 eh->source_value = 0;
3727 eh->target_value = 0;
3728 eh->target_section = NULL;
3730 eh->stub_type = arm_stub_none;
3732 eh->stub_template = NULL;
3733 eh->stub_template_size = -1;
3736 eh->output_name = NULL;
3742 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3743 shortcuts to them in our hash table. */
3746 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3748 struct elf32_arm_link_hash_table *htab;
3750 htab = elf32_arm_hash_table (info);
3754 /* BPABI objects never have a GOT, or associated sections. */
3755 if (htab->symbian_p)
3758 if (! _bfd_elf_create_got_section (dynobj, info))
3761 /* Also create .rofixup. */
3764 htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3765 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3766 | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3767 if (htab->srofixup == NULL || ! bfd_set_section_alignment (dynobj, htab->srofixup, 2))
3774 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3777 create_ifunc_sections (struct bfd_link_info *info)
3779 struct elf32_arm_link_hash_table *htab;
3780 const struct elf_backend_data *bed;
3785 htab = elf32_arm_hash_table (info);
3786 dynobj = htab->root.dynobj;
3787 bed = get_elf_backend_data (dynobj);
3788 flags = bed->dynamic_sec_flags;
3790 if (htab->root.iplt == NULL)
3792 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3793 flags | SEC_READONLY | SEC_CODE);
3795 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3797 htab->root.iplt = s;
3800 if (htab->root.irelplt == NULL)
3802 s = bfd_make_section_anyway_with_flags (dynobj,
3803 RELOC_SECTION (htab, ".iplt"),
3804 flags | SEC_READONLY);
3806 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3808 htab->root.irelplt = s;
3811 if (htab->root.igotplt == NULL)
3813 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3815 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3817 htab->root.igotplt = s;
3822 /* Determine if we're dealing with a Thumb only architecture. */
3825 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3828 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3829 Tag_CPU_arch_profile);
3832 return profile == 'M';
3834 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3836 /* Force return logic to be reviewed for each new architecture. */
3837 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3839 if (arch == TAG_CPU_ARCH_V6_M
3840 || arch == TAG_CPU_ARCH_V6S_M
3841 || arch == TAG_CPU_ARCH_V7E_M
3842 || arch == TAG_CPU_ARCH_V8M_BASE
3843 || arch == TAG_CPU_ARCH_V8M_MAIN)
3849 /* Determine if we're dealing with a Thumb-2 object. */
3852 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3855 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3859 return thumb_isa == 2;
3861 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3863 /* Force return logic to be reviewed for each new architecture. */
3864 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3866 return (arch == TAG_CPU_ARCH_V6T2
3867 || arch == TAG_CPU_ARCH_V7
3868 || arch == TAG_CPU_ARCH_V7E_M
3869 || arch == TAG_CPU_ARCH_V8
3870 || arch == TAG_CPU_ARCH_V8R
3871 || arch == TAG_CPU_ARCH_V8M_MAIN);
3874 /* Determine whether Thumb-2 BL instruction is available. */
3877 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3880 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3882 /* Force return logic to be reviewed for each new architecture. */
3883 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3885 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3886 return (arch == TAG_CPU_ARCH_V6T2
3887 || arch >= TAG_CPU_ARCH_V7);
3890 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3891 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3895 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3897 struct elf32_arm_link_hash_table *htab;
3899 htab = elf32_arm_hash_table (info);
3903 if (!htab->root.sgot && !create_got_section (dynobj, info))
3906 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3909 if (htab->vxworks_p)
3911 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3914 if (bfd_link_pic (info))
3916 htab->plt_header_size = 0;
3917 htab->plt_entry_size
3918 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3922 htab->plt_header_size
3923 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3924 htab->plt_entry_size
3925 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3928 if (elf_elfheader (dynobj))
3929 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3934 Test for thumb only architectures. Note - we cannot just call
3935 using_thumb_only() as the attributes in the output bfd have not been
3936 initialised at this point, so instead we use the input bfd. */
3937 bfd * saved_obfd = htab->obfd;
3939 htab->obfd = dynobj;
3940 if (using_thumb_only (htab))
3942 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3943 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3945 htab->obfd = saved_obfd;
3948 if (htab->fdpic_p) {
3949 htab->plt_header_size = 0;
3950 if (info->flags & DF_BIND_NOW)
3951 htab->plt_entry_size = 4 * (ARRAY_SIZE(elf32_arm_fdpic_plt_entry) - 5);
3953 htab->plt_entry_size = 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry);
3956 if (!htab->root.splt
3957 || !htab->root.srelplt
3958 || !htab->root.sdynbss
3959 || (!bfd_link_pic (info) && !htab->root.srelbss))
3965 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3968 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3969 struct elf_link_hash_entry *dir,
3970 struct elf_link_hash_entry *ind)
3972 struct elf32_arm_link_hash_entry *edir, *eind;
3974 edir = (struct elf32_arm_link_hash_entry *) dir;
3975 eind = (struct elf32_arm_link_hash_entry *) ind;
3977 if (eind->dyn_relocs != NULL)
3979 if (edir->dyn_relocs != NULL)
3981 struct elf_dyn_relocs **pp;
3982 struct elf_dyn_relocs *p;
3984 /* Add reloc counts against the indirect sym to the direct sym
3985 list. Merge any entries against the same section. */
3986 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3988 struct elf_dyn_relocs *q;
3990 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3991 if (q->sec == p->sec)
3993 q->pc_count += p->pc_count;
3994 q->count += p->count;
4001 *pp = edir->dyn_relocs;
4004 edir->dyn_relocs = eind->dyn_relocs;
4005 eind->dyn_relocs = NULL;
4008 if (ind->root.type == bfd_link_hash_indirect)
4010 /* Copy over PLT info. */
4011 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4012 eind->plt.thumb_refcount = 0;
4013 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4014 eind->plt.maybe_thumb_refcount = 0;
4015 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4016 eind->plt.noncall_refcount = 0;
4018 /* Copy FDPIC counters. */
4019 edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4020 edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4021 edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4023 /* We should only allocate a function to .iplt once the final
4024 symbol information is known. */
4025 BFD_ASSERT (!eind->is_iplt);
4027 if (dir->got.refcount <= 0)
4029 edir->tls_type = eind->tls_type;
4030 eind->tls_type = GOT_UNKNOWN;
4034 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4037 /* Destroy an ARM elf linker hash table. */
4040 elf32_arm_link_hash_table_free (bfd *obfd)
4042 struct elf32_arm_link_hash_table *ret
4043 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4045 bfd_hash_table_free (&ret->stub_hash_table);
4046 _bfd_elf_link_hash_table_free (obfd);
4049 /* Create an ARM elf linker hash table. */
4051 static struct bfd_link_hash_table *
4052 elf32_arm_link_hash_table_create (bfd *abfd)
4054 struct elf32_arm_link_hash_table *ret;
4055 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
4057 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4061 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4062 elf32_arm_link_hash_newfunc,
4063 sizeof (struct elf32_arm_link_hash_entry),
4070 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4071 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4072 #ifdef FOUR_WORD_PLT
4073 ret->plt_header_size = 16;
4074 ret->plt_entry_size = 16;
4076 ret->plt_header_size = 20;
4077 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4079 ret->use_rel = TRUE;
4083 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4084 sizeof (struct elf32_arm_stub_hash_entry)))
4086 _bfd_elf_link_hash_table_free (abfd);
4089 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4091 return &ret->root.root;
4094 /* Determine what kind of NOPs are available. */
4097 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4099 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4102 /* Force return logic to be reviewed for each new architecture. */
4103 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
4105 return (arch == TAG_CPU_ARCH_V6T2
4106 || arch == TAG_CPU_ARCH_V6K
4107 || arch == TAG_CPU_ARCH_V7
4108 || arch == TAG_CPU_ARCH_V8
4109 || arch == TAG_CPU_ARCH_V8R);
4113 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4117 case arm_stub_long_branch_thumb_only:
4118 case arm_stub_long_branch_thumb2_only:
4119 case arm_stub_long_branch_thumb2_only_pure:
4120 case arm_stub_long_branch_v4t_thumb_arm:
4121 case arm_stub_short_branch_v4t_thumb_arm:
4122 case arm_stub_long_branch_v4t_thumb_arm_pic:
4123 case arm_stub_long_branch_v4t_thumb_tls_pic:
4124 case arm_stub_long_branch_thumb_only_pic:
4125 case arm_stub_cmse_branch_thumb_only:
4136 /* Determine the type of stub needed, if any, for a call. */
4138 static enum elf32_arm_stub_type
4139 arm_type_of_stub (struct bfd_link_info *info,
4140 asection *input_sec,
4141 const Elf_Internal_Rela *rel,
4142 unsigned char st_type,
4143 enum arm_st_branch_type *actual_branch_type,
4144 struct elf32_arm_link_hash_entry *hash,
4145 bfd_vma destination,
4151 bfd_signed_vma branch_offset;
4152 unsigned int r_type;
4153 struct elf32_arm_link_hash_table * globals;
4154 bfd_boolean thumb2, thumb2_bl, thumb_only;
4155 enum elf32_arm_stub_type stub_type = arm_stub_none;
4157 enum arm_st_branch_type branch_type = *actual_branch_type;
4158 union gotplt_union *root_plt;
4159 struct arm_plt_info *arm_plt;
4163 if (branch_type == ST_BRANCH_LONG)
4166 globals = elf32_arm_hash_table (info);
4167 if (globals == NULL)
4170 thumb_only = using_thumb_only (globals);
4171 thumb2 = using_thumb2 (globals);
4172 thumb2_bl = using_thumb2_bl (globals);
4174 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4176 /* True for architectures that implement the thumb2 movw instruction. */
4177 thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
4179 /* Determine where the call point is. */
4180 location = (input_sec->output_offset
4181 + input_sec->output_section->vma
4184 r_type = ELF32_R_TYPE (rel->r_info);
4186 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4187 are considering a function call relocation. */
4188 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4189 || r_type == R_ARM_THM_JUMP19)
4190 && branch_type == ST_BRANCH_TO_ARM)
4191 branch_type = ST_BRANCH_TO_THUMB;
4193 /* For TLS call relocs, it is the caller's responsibility to provide
4194 the address of the appropriate trampoline. */
4195 if (r_type != R_ARM_TLS_CALL
4196 && r_type != R_ARM_THM_TLS_CALL
4197 && elf32_arm_get_plt_info (input_bfd, globals, hash,
4198 ELF32_R_SYM (rel->r_info), &root_plt,
4200 && root_plt->offset != (bfd_vma) -1)
4204 if (hash == NULL || hash->is_iplt)
4205 splt = globals->root.iplt;
4207 splt = globals->root.splt;
4212 /* Note when dealing with PLT entries: the main PLT stub is in
4213 ARM mode, so if the branch is in Thumb mode, another
4214 Thumb->ARM stub will be inserted later just before the ARM
4215 PLT stub. If a long branch stub is needed, we'll add a
4216 Thumb->Arm one and branch directly to the ARM PLT entry.
4217 Here, we have to check if a pre-PLT Thumb->ARM stub
4218 is needed and if it will be close enough. */
4220 destination = (splt->output_section->vma
4221 + splt->output_offset
4222 + root_plt->offset);
4225 /* Thumb branch/call to PLT: it can become a branch to ARM
4226 or to Thumb. We must perform the same checks and
4227 corrections as in elf32_arm_final_link_relocate. */
4228 if ((r_type == R_ARM_THM_CALL)
4229 || (r_type == R_ARM_THM_JUMP24))
4231 if (globals->use_blx
4232 && r_type == R_ARM_THM_CALL
4235 /* If the Thumb BLX instruction is available, convert
4236 the BL to a BLX instruction to call the ARM-mode
4238 branch_type = ST_BRANCH_TO_ARM;
4243 /* Target the Thumb stub before the ARM PLT entry. */
4244 destination -= PLT_THUMB_STUB_SIZE;
4245 branch_type = ST_BRANCH_TO_THUMB;
4250 branch_type = ST_BRANCH_TO_ARM;
4254 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4255 BFD_ASSERT (st_type != STT_GNU_IFUNC);
4257 branch_offset = (bfd_signed_vma)(destination - location);
4259 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4260 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4262 /* Handle cases where:
4263 - this call goes too far (different Thumb/Thumb2 max
4265 - it's a Thumb->Arm call and blx is not available, or it's a
4266 Thumb->Arm branch (not bl). A stub is needed in this case,
4267 but only if this call is not through a PLT entry. Indeed,
4268 PLT stubs handle mode switching already. */
4270 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4271 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4273 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4274 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4276 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4277 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4278 && (r_type == R_ARM_THM_JUMP19))
4279 || (branch_type == ST_BRANCH_TO_ARM
4280 && (((r_type == R_ARM_THM_CALL
4281 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4282 || (r_type == R_ARM_THM_JUMP24)
4283 || (r_type == R_ARM_THM_JUMP19))
4286 /* If we need to insert a Thumb-Thumb long branch stub to a
4287 PLT, use one that branches directly to the ARM PLT
4288 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4289 stub, undo this now. */
4290 if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4292 branch_type = ST_BRANCH_TO_ARM;
4293 branch_offset += PLT_THUMB_STUB_SIZE;
4296 if (branch_type == ST_BRANCH_TO_THUMB)
4298 /* Thumb to thumb. */
4301 if (input_sec->flags & SEC_ELF_PURECODE)
4303 (_("%pB(%pA): warning: long branch veneers used in"
4304 " section with SHF_ARM_PURECODE section"
4305 " attribute is only supported for M-profile"
4306 " targets that implement the movw instruction"),
4307 input_bfd, input_sec);
4309 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4311 ? ((globals->use_blx
4312 && (r_type == R_ARM_THM_CALL))
4313 /* V5T and above. Stub starts with ARM code, so
4314 we must be able to switch mode before
4315 reaching it, which is only possible for 'bl'
4316 (ie R_ARM_THM_CALL relocation). */
4317 ? arm_stub_long_branch_any_thumb_pic
4318 /* On V4T, use Thumb code only. */
4319 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4321 /* non-PIC stubs. */
4322 : ((globals->use_blx
4323 && (r_type == R_ARM_THM_CALL))
4324 /* V5T and above. */
4325 ? arm_stub_long_branch_any_any
4327 : arm_stub_long_branch_v4t_thumb_thumb);
4331 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4332 stub_type = arm_stub_long_branch_thumb2_only_pure;
4335 if (input_sec->flags & SEC_ELF_PURECODE)
4337 (_("%pB(%pA): warning: long branch veneers used in"
4338 " section with SHF_ARM_PURECODE section"
4339 " attribute is only supported for M-profile"
4340 " targets that implement the movw instruction"),
4341 input_bfd, input_sec);
4343 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4345 ? arm_stub_long_branch_thumb_only_pic
4347 : (thumb2 ? arm_stub_long_branch_thumb2_only
4348 : arm_stub_long_branch_thumb_only);
4354 if (input_sec->flags & SEC_ELF_PURECODE)
4356 (_("%pB(%pA): warning: long branch veneers used in"
4357 " section with SHF_ARM_PURECODE section"
4358 " attribute is only supported" " for M-profile"
4359 " targets that implement the movw instruction"),
4360 input_bfd, input_sec);
4364 && sym_sec->owner != NULL
4365 && !INTERWORK_FLAG (sym_sec->owner))
4368 (_("%pB(%s): warning: interworking not enabled;"
4369 " first occurrence: %pB: %s call to %s"),
4370 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4374 (bfd_link_pic (info) | globals->pic_veneer)
4376 ? (r_type == R_ARM_THM_TLS_CALL
4377 /* TLS PIC stubs. */
4378 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4379 : arm_stub_long_branch_v4t_thumb_tls_pic)
4380 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4381 /* V5T PIC and above. */
4382 ? arm_stub_long_branch_any_arm_pic
4384 : arm_stub_long_branch_v4t_thumb_arm_pic))
4386 /* non-PIC stubs. */
4387 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4388 /* V5T and above. */
4389 ? arm_stub_long_branch_any_any
4391 : arm_stub_long_branch_v4t_thumb_arm);
4393 /* Handle v4t short branches. */
4394 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4395 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4396 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4397 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4401 else if (r_type == R_ARM_CALL
4402 || r_type == R_ARM_JUMP24
4403 || r_type == R_ARM_PLT32
4404 || r_type == R_ARM_TLS_CALL)
4406 if (input_sec->flags & SEC_ELF_PURECODE)
4408 (_("%pB(%pA): warning: long branch veneers used in"
4409 " section with SHF_ARM_PURECODE section"
4410 " attribute is only supported for M-profile"
4411 " targets that implement the movw instruction"),
4412 input_bfd, input_sec);
4413 if (branch_type == ST_BRANCH_TO_THUMB)
4418 && sym_sec->owner != NULL
4419 && !INTERWORK_FLAG (sym_sec->owner))
4422 (_("%pB(%s): warning: interworking not enabled;"
4423 " first occurrence: %pB: %s call to %s"),
4424 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4427 /* We have an extra 2-bytes reach because of
4428 the mode change (bit 24 (H) of BLX encoding). */
4429 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4430 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4431 || (r_type == R_ARM_CALL && !globals->use_blx)
4432 || (r_type == R_ARM_JUMP24)
4433 || (r_type == R_ARM_PLT32))
4435 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4437 ? ((globals->use_blx)
4438 /* V5T and above. */
4439 ? arm_stub_long_branch_any_thumb_pic
4441 : arm_stub_long_branch_v4t_arm_thumb_pic)
4443 /* non-PIC stubs. */
4444 : ((globals->use_blx)
4445 /* V5T and above. */
4446 ? arm_stub_long_branch_any_any
4448 : arm_stub_long_branch_v4t_arm_thumb);
4454 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4455 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4458 (bfd_link_pic (info) | globals->pic_veneer)
4460 ? (r_type == R_ARM_TLS_CALL
4462 ? arm_stub_long_branch_any_tls_pic
4464 ? arm_stub_long_branch_arm_nacl_pic
4465 : arm_stub_long_branch_any_arm_pic))
4466 /* non-PIC stubs. */
4468 ? arm_stub_long_branch_arm_nacl
4469 : arm_stub_long_branch_any_any);
4474 /* If a stub is needed, record the actual destination type. */
4475 if (stub_type != arm_stub_none)
4476 *actual_branch_type = branch_type;
4481 /* Build a name for an entry in the stub hash table. */
4484 elf32_arm_stub_name (const asection *input_section,
4485 const asection *sym_sec,
4486 const struct elf32_arm_link_hash_entry *hash,
4487 const Elf_Internal_Rela *rel,
4488 enum elf32_arm_stub_type stub_type)
4495 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4496 stub_name = (char *) bfd_malloc (len);
4497 if (stub_name != NULL)
4498 sprintf (stub_name, "%08x_%s+%x_%d",
4499 input_section->id & 0xffffffff,
4500 hash->root.root.root.string,
4501 (int) rel->r_addend & 0xffffffff,
4506 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4507 stub_name = (char *) bfd_malloc (len);
4508 if (stub_name != NULL)
4509 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4510 input_section->id & 0xffffffff,
4511 sym_sec->id & 0xffffffff,
4512 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4513 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4514 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4515 (int) rel->r_addend & 0xffffffff,
4522 /* Look up an entry in the stub hash. Stub entries are cached because
4523 creating the stub name takes a bit of time. */
4525 static struct elf32_arm_stub_hash_entry *
4526 elf32_arm_get_stub_entry (const asection *input_section,
4527 const asection *sym_sec,
4528 struct elf_link_hash_entry *hash,
4529 const Elf_Internal_Rela *rel,
4530 struct elf32_arm_link_hash_table *htab,
4531 enum elf32_arm_stub_type stub_type)
4533 struct elf32_arm_stub_hash_entry *stub_entry;
4534 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4535 const asection *id_sec;
4537 if ((input_section->flags & SEC_CODE) == 0)
4540 /* If this input section is part of a group of sections sharing one
4541 stub section, then use the id of the first section in the group.
4542 Stub names need to include a section id, as there may well be
4543 more than one stub used to reach say, printf, and we need to
4544 distinguish between them. */
4545 BFD_ASSERT (input_section->id <= htab->top_id);
4546 id_sec = htab->stub_group[input_section->id].link_sec;
4548 if (h != NULL && h->stub_cache != NULL
4549 && h->stub_cache->h == h
4550 && h->stub_cache->id_sec == id_sec
4551 && h->stub_cache->stub_type == stub_type)
4553 stub_entry = h->stub_cache;
4559 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4560 if (stub_name == NULL)
4563 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4564 stub_name, FALSE, FALSE);
4566 h->stub_cache = stub_entry;
4574 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4578 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4580 if (stub_type >= max_stub_type)
4581 abort (); /* Should be unreachable. */
4585 case arm_stub_cmse_branch_thumb_only:
4592 abort (); /* Should be unreachable. */
4595 /* Required alignment (as a power of 2) for the dedicated section holding
4596 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4597 with input sections. */
4600 arm_dedicated_stub_output_section_required_alignment
4601 (enum elf32_arm_stub_type stub_type)
4603 if (stub_type >= max_stub_type)
4604 abort (); /* Should be unreachable. */
4608 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4610 case arm_stub_cmse_branch_thumb_only:
4614 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4618 abort (); /* Should be unreachable. */
4621 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4622 NULL if veneers of this type are interspersed with input sections. */
4625 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4627 if (stub_type >= max_stub_type)
4628 abort (); /* Should be unreachable. */
4632 case arm_stub_cmse_branch_thumb_only:
4633 return ".gnu.sgstubs";
4636 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4640 abort (); /* Should be unreachable. */
4643 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4644 returns the address of the hash table field in HTAB holding a pointer to the
4645 corresponding input section. Otherwise, returns NULL. */
4648 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4649 enum elf32_arm_stub_type stub_type)
4651 if (stub_type >= max_stub_type)
4652 abort (); /* Should be unreachable. */
4656 case arm_stub_cmse_branch_thumb_only:
4657 return &htab->cmse_stub_sec;
4660 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4664 abort (); /* Should be unreachable. */
4667 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4668 is the section that branch into veneer and can be NULL if stub should go in
4669 a dedicated output section. Returns a pointer to the stub section, and the
4670 section to which the stub section will be attached (in *LINK_SEC_P).
4671 LINK_SEC_P may be NULL. */
4674 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4675 struct elf32_arm_link_hash_table *htab,
4676 enum elf32_arm_stub_type stub_type)
4678 asection *link_sec, *out_sec, **stub_sec_p;
4679 const char *stub_sec_prefix;
4680 bfd_boolean dedicated_output_section =
4681 arm_dedicated_stub_output_section_required (stub_type);
4684 if (dedicated_output_section)
4686 bfd *output_bfd = htab->obfd;
4687 const char *out_sec_name =
4688 arm_dedicated_stub_output_section_name (stub_type);
4690 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4691 stub_sec_prefix = out_sec_name;
4692 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4693 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4694 if (out_sec == NULL)
4696 _bfd_error_handler (_("no address assigned to the veneers output "
4697 "section %s"), out_sec_name);
4703 BFD_ASSERT (section->id <= htab->top_id);
4704 link_sec = htab->stub_group[section->id].link_sec;
4705 BFD_ASSERT (link_sec != NULL);
4706 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4707 if (*stub_sec_p == NULL)
4708 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4709 stub_sec_prefix = link_sec->name;
4710 out_sec = link_sec->output_section;
4711 align = htab->nacl_p ? 4 : 3;
4714 if (*stub_sec_p == NULL)
4720 namelen = strlen (stub_sec_prefix);
4721 len = namelen + sizeof (STUB_SUFFIX);
4722 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4726 memcpy (s_name, stub_sec_prefix, namelen);
4727 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4728 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4730 if (*stub_sec_p == NULL)
4733 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4734 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4738 if (!dedicated_output_section)
4739 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4742 *link_sec_p = link_sec;
4747 /* Add a new stub entry to the stub hash. Not all fields of the new
4748 stub entry are initialised. */
4750 static struct elf32_arm_stub_hash_entry *
4751 elf32_arm_add_stub (const char *stub_name, asection *section,
4752 struct elf32_arm_link_hash_table *htab,
4753 enum elf32_arm_stub_type stub_type)
4757 struct elf32_arm_stub_hash_entry *stub_entry;
4759 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4761 if (stub_sec == NULL)
4764 /* Enter this entry into the linker stub hash table. */
4765 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4767 if (stub_entry == NULL)
4769 if (section == NULL)
4771 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4772 section->owner, stub_name);
4776 stub_entry->stub_sec = stub_sec;
4777 stub_entry->stub_offset = (bfd_vma) -1;
4778 stub_entry->id_sec = link_sec;
4783 /* Store an Arm insn into an output section not processed by
4784 elf32_arm_write_section. */
4787 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4788 bfd * output_bfd, bfd_vma val, void * ptr)
4790 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4791 bfd_putl32 (val, ptr);
4793 bfd_putb32 (val, ptr);
4796 /* Store a 16-bit Thumb insn into an output section not processed by
4797 elf32_arm_write_section. */
4800 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4801 bfd * output_bfd, bfd_vma val, void * ptr)
4803 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4804 bfd_putl16 (val, ptr);
4806 bfd_putb16 (val, ptr);
4809 /* Store a Thumb2 insn into an output section not processed by
4810 elf32_arm_write_section. */
4813 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4814 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4816 /* T2 instructions are 16-bit streamed. */
4817 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4819 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4820 bfd_putl16 ((val & 0xffff), ptr + 2);
4824 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4825 bfd_putb16 ((val & 0xffff), ptr + 2);
4829 /* If it's possible to change R_TYPE to a more efficient access
4830 model, return the new reloc type. */
4833 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4834 struct elf_link_hash_entry *h)
4836 int is_local = (h == NULL);
4838 if (bfd_link_pic (info)
4839 || (h && h->root.type == bfd_link_hash_undefweak))
4842 /* We do not support relaxations for Old TLS models. */
4845 case R_ARM_TLS_GOTDESC:
4846 case R_ARM_TLS_CALL:
4847 case R_ARM_THM_TLS_CALL:
4848 case R_ARM_TLS_DESCSEQ:
4849 case R_ARM_THM_TLS_DESCSEQ:
4850 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4856 static bfd_reloc_status_type elf32_arm_final_link_relocate
4857 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4858 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4859 const char *, unsigned char, enum arm_st_branch_type,
4860 struct elf_link_hash_entry *, bfd_boolean *, char **);
4863 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4867 case arm_stub_a8_veneer_b_cond:
4868 case arm_stub_a8_veneer_b:
4869 case arm_stub_a8_veneer_bl:
4872 case arm_stub_long_branch_any_any:
4873 case arm_stub_long_branch_v4t_arm_thumb:
4874 case arm_stub_long_branch_thumb_only:
4875 case arm_stub_long_branch_thumb2_only:
4876 case arm_stub_long_branch_thumb2_only_pure:
4877 case arm_stub_long_branch_v4t_thumb_thumb:
4878 case arm_stub_long_branch_v4t_thumb_arm:
4879 case arm_stub_short_branch_v4t_thumb_arm:
4880 case arm_stub_long_branch_any_arm_pic:
4881 case arm_stub_long_branch_any_thumb_pic:
4882 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4883 case arm_stub_long_branch_v4t_arm_thumb_pic:
4884 case arm_stub_long_branch_v4t_thumb_arm_pic:
4885 case arm_stub_long_branch_thumb_only_pic:
4886 case arm_stub_long_branch_any_tls_pic:
4887 case arm_stub_long_branch_v4t_thumb_tls_pic:
4888 case arm_stub_cmse_branch_thumb_only:
4889 case arm_stub_a8_veneer_blx:
4892 case arm_stub_long_branch_arm_nacl:
4893 case arm_stub_long_branch_arm_nacl_pic:
4897 abort (); /* Should be unreachable. */
4901 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4902 veneering (TRUE) or have their own symbol (FALSE). */
4905 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4907 if (stub_type >= max_stub_type)
4908 abort (); /* Should be unreachable. */
4912 case arm_stub_cmse_branch_thumb_only:
4919 abort (); /* Should be unreachable. */
4922 /* Returns the padding needed for the dedicated section used stubs of type
4926 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4928 if (stub_type >= max_stub_type)
4929 abort (); /* Should be unreachable. */
4933 case arm_stub_cmse_branch_thumb_only:
4940 abort (); /* Should be unreachable. */
4943 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4944 returns the address of the hash table field in HTAB holding the offset at
4945 which new veneers should be layed out in the stub section. */
4948 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
4949 enum elf32_arm_stub_type stub_type)
4953 case arm_stub_cmse_branch_thumb_only:
4954 return &htab->new_cmse_stub_offset;
4957 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4963 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4967 bfd_boolean removed_sg_veneer;
4968 struct elf32_arm_stub_hash_entry *stub_entry;
4969 struct elf32_arm_link_hash_table *globals;
4970 struct bfd_link_info *info;
4977 const insn_sequence *template_sequence;
4979 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4980 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4982 int just_allocated = 0;
4984 /* Massage our args to the form they really have. */
4985 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4986 info = (struct bfd_link_info *) in_arg;
4988 globals = elf32_arm_hash_table (info);
4989 if (globals == NULL)
4992 stub_sec = stub_entry->stub_sec;
4994 if ((globals->fix_cortex_a8 < 0)
4995 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4996 /* We have to do less-strictly-aligned fixes last. */
4999 /* Assign a slot at the end of section if none assigned yet. */
5000 if (stub_entry->stub_offset == (bfd_vma) -1)
5002 stub_entry->stub_offset = stub_sec->size;
5005 loc = stub_sec->contents + stub_entry->stub_offset;
5007 stub_bfd = stub_sec->owner;
5009 /* This is the address of the stub destination. */
5010 sym_value = (stub_entry->target_value
5011 + stub_entry->target_section->output_offset
5012 + stub_entry->target_section->output_section->vma);
5014 template_sequence = stub_entry->stub_template;
5015 template_size = stub_entry->stub_template_size;
5018 for (i = 0; i < template_size; i++)
5020 switch (template_sequence[i].type)
5024 bfd_vma data = (bfd_vma) template_sequence[i].data;
5025 if (template_sequence[i].reloc_addend != 0)
5027 /* We've borrowed the reloc_addend field to mean we should
5028 insert a condition code into this (Thumb-1 branch)
5029 instruction. See THUMB16_BCOND_INSN. */
5030 BFD_ASSERT ((data & 0xff00) == 0xd000);
5031 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5033 bfd_put_16 (stub_bfd, data, loc + size);
5039 bfd_put_16 (stub_bfd,
5040 (template_sequence[i].data >> 16) & 0xffff,
5042 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5044 if (template_sequence[i].r_type != R_ARM_NONE)
5046 stub_reloc_idx[nrelocs] = i;
5047 stub_reloc_offset[nrelocs++] = size;
5053 bfd_put_32 (stub_bfd, template_sequence[i].data,
5055 /* Handle cases where the target is encoded within the
5057 if (template_sequence[i].r_type == R_ARM_JUMP24)
5059 stub_reloc_idx[nrelocs] = i;
5060 stub_reloc_offset[nrelocs++] = size;
5066 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5067 stub_reloc_idx[nrelocs] = i;
5068 stub_reloc_offset[nrelocs++] = size;
5079 stub_sec->size += size;
5081 /* Stub size has already been computed in arm_size_one_stub. Check
5083 BFD_ASSERT (size == stub_entry->stub_size);
5085 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5086 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5089 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5090 to relocate in each stub. */
5092 (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5093 BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5095 for (i = 0; i < nrelocs; i++)
5097 Elf_Internal_Rela rel;
5098 bfd_boolean unresolved_reloc;
5099 char *error_message;
5101 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5103 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5104 rel.r_info = ELF32_R_INFO (0,
5105 template_sequence[stub_reloc_idx[i]].r_type);
5108 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5109 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5110 template should refer back to the instruction after the original
5111 branch. We use target_section as Cortex-A8 erratum workaround stubs
5112 are only generated when both source and target are in the same
5114 points_to = stub_entry->target_section->output_section->vma
5115 + stub_entry->target_section->output_offset
5116 + stub_entry->source_value;
5118 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5119 (template_sequence[stub_reloc_idx[i]].r_type),
5120 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5121 points_to, info, stub_entry->target_section, "", STT_FUNC,
5122 stub_entry->branch_type,
5123 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5131 /* Calculate the template, template size and instruction size for a stub.
5132 Return value is the instruction size. */
5135 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5136 const insn_sequence **stub_template,
5137 int *stub_template_size)
5139 const insn_sequence *template_sequence = NULL;
5140 int template_size = 0, i;
5143 template_sequence = stub_definitions[stub_type].template_sequence;
5145 *stub_template = template_sequence;
5147 template_size = stub_definitions[stub_type].template_size;
5148 if (stub_template_size)
5149 *stub_template_size = template_size;
5152 for (i = 0; i < template_size; i++)
5154 switch (template_sequence[i].type)
5175 /* As above, but don't actually build the stub. Just bump offset so
5176 we know stub section sizes. */
5179 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5180 void *in_arg ATTRIBUTE_UNUSED)
5182 struct elf32_arm_stub_hash_entry *stub_entry;
5183 const insn_sequence *template_sequence;
5184 int template_size, size;
5186 /* Massage our args to the form they really have. */
5187 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5189 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
5190 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
5192 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5195 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5196 if (stub_entry->stub_template_size)
5198 stub_entry->stub_size = size;
5199 stub_entry->stub_template = template_sequence;
5200 stub_entry->stub_template_size = template_size;
5203 /* Already accounted for. */
5204 if (stub_entry->stub_offset != (bfd_vma) -1)
5207 size = (size + 7) & ~7;
5208 stub_entry->stub_sec->size += size;
5213 /* External entry points for sizing and building linker stubs. */
5215 /* Set up various things so that we can make a list of input sections
5216 for each output section included in the link. Returns -1 on error,
5217 0 when no stubs will be needed, and 1 on success. */
5220 elf32_arm_setup_section_lists (bfd *output_bfd,
5221 struct bfd_link_info *info)
5224 unsigned int bfd_count;
5225 unsigned int top_id, top_index;
5227 asection **input_list, **list;
5229 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5233 if (! is_elf_hash_table (htab))
5236 /* Count the number of input BFDs and find the top input section id. */
5237 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5239 input_bfd = input_bfd->link.next)
5242 for (section = input_bfd->sections;
5244 section = section->next)
5246 if (top_id < section->id)
5247 top_id = section->id;
5250 htab->bfd_count = bfd_count;
5252 amt = sizeof (struct map_stub) * (top_id + 1);
5253 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5254 if (htab->stub_group == NULL)
5256 htab->top_id = top_id;
5258 /* We can't use output_bfd->section_count here to find the top output
5259 section index as some sections may have been removed, and
5260 _bfd_strip_section_from_output doesn't renumber the indices. */
5261 for (section = output_bfd->sections, top_index = 0;
5263 section = section->next)
5265 if (top_index < section->index)
5266 top_index = section->index;
5269 htab->top_index = top_index;
5270 amt = sizeof (asection *) * (top_index + 1);
5271 input_list = (asection **) bfd_malloc (amt);
5272 htab->input_list = input_list;
5273 if (input_list == NULL)
5276 /* For sections we aren't interested in, mark their entries with a
5277 value we can check later. */
5278 list = input_list + top_index;
5280 *list = bfd_abs_section_ptr;
5281 while (list-- != input_list);
5283 for (section = output_bfd->sections;
5285 section = section->next)
5287 if ((section->flags & SEC_CODE) != 0)
5288 input_list[section->index] = NULL;
5294 /* The linker repeatedly calls this function for each input section,
5295 in the order that input sections are linked into output sections.
5296 Build lists of input sections to determine groupings between which
5297 we may insert linker stubs. */
5300 elf32_arm_next_input_section (struct bfd_link_info *info,
5303 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5308 if (isec->output_section->index <= htab->top_index)
5310 asection **list = htab->input_list + isec->output_section->index;
5312 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5314 /* Steal the link_sec pointer for our list. */
5315 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5316 /* This happens to make the list in reverse order,
5317 which we reverse later. */
5318 PREV_SEC (isec) = *list;
5324 /* See whether we can group stub sections together. Grouping stub
5325 sections may result in fewer stubs. More importantly, we need to
5326 put all .init* and .fini* stubs at the end of the .init or
5327 .fini output sections respectively, because glibc splits the
5328 _init and _fini functions into multiple parts. Putting a stub in
5329 the middle of a function is not a good idea. */
5332 group_sections (struct elf32_arm_link_hash_table *htab,
5333 bfd_size_type stub_group_size,
5334 bfd_boolean stubs_always_after_branch)
5336 asection **list = htab->input_list;
5340 asection *tail = *list;
5343 if (tail == bfd_abs_section_ptr)
5346 /* Reverse the list: we must avoid placing stubs at the
5347 beginning of the section because the beginning of the text
5348 section may be required for an interrupt vector in bare metal
5350 #define NEXT_SEC PREV_SEC
5352 while (tail != NULL)
5354 /* Pop from tail. */
5355 asection *item = tail;
5356 tail = PREV_SEC (item);
5359 NEXT_SEC (item) = head;
5363 while (head != NULL)
5367 bfd_vma stub_group_start = head->output_offset;
5368 bfd_vma end_of_next;
5371 while (NEXT_SEC (curr) != NULL)
5373 next = NEXT_SEC (curr);
5374 end_of_next = next->output_offset + next->size;
5375 if (end_of_next - stub_group_start >= stub_group_size)
5376 /* End of NEXT is too far from start, so stop. */
5378 /* Add NEXT to the group. */
5382 /* OK, the size from the start to the start of CURR is less
5383 than stub_group_size and thus can be handled by one stub
5384 section. (Or the head section is itself larger than
5385 stub_group_size, in which case we may be toast.)
5386 We should really be keeping track of the total size of
5387 stubs added here, as stubs contribute to the final output
5391 next = NEXT_SEC (head);
5392 /* Set up this stub group. */
5393 htab->stub_group[head->id].link_sec = curr;
5395 while (head != curr && (head = next) != NULL);
5397 /* But wait, there's more! Input sections up to stub_group_size
5398 bytes after the stub section can be handled by it too. */
5399 if (!stubs_always_after_branch)
5401 stub_group_start = curr->output_offset + curr->size;
5403 while (next != NULL)
5405 end_of_next = next->output_offset + next->size;
5406 if (end_of_next - stub_group_start >= stub_group_size)
5407 /* End of NEXT is too far from stubs, so stop. */
5409 /* Add NEXT to the stub group. */
5411 next = NEXT_SEC (head);
5412 htab->stub_group[head->id].link_sec = curr;
5418 while (list++ != htab->input_list + htab->top_index);
5420 free (htab->input_list);
5425 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5429 a8_reloc_compare (const void *a, const void *b)
5431 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5432 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5434 if (ra->from < rb->from)
5436 else if (ra->from > rb->from)
5442 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5443 const char *, char **);
5445 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5446 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5447 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5451 cortex_a8_erratum_scan (bfd *input_bfd,
5452 struct bfd_link_info *info,
5453 struct a8_erratum_fix **a8_fixes_p,
5454 unsigned int *num_a8_fixes_p,
5455 unsigned int *a8_fix_table_size_p,
5456 struct a8_erratum_reloc *a8_relocs,
5457 unsigned int num_a8_relocs,
5458 unsigned prev_num_a8_fixes,
5459 bfd_boolean *stub_changed_p)
5462 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5463 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5464 unsigned int num_a8_fixes = *num_a8_fixes_p;
5465 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5470 for (section = input_bfd->sections;
5472 section = section->next)
5474 bfd_byte *contents = NULL;
5475 struct _arm_elf_section_data *sec_data;
5479 if (elf_section_type (section) != SHT_PROGBITS
5480 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5481 || (section->flags & SEC_EXCLUDE) != 0
5482 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5483 || (section->output_section == bfd_abs_section_ptr))
5486 base_vma = section->output_section->vma + section->output_offset;
5488 if (elf_section_data (section)->this_hdr.contents != NULL)
5489 contents = elf_section_data (section)->this_hdr.contents;
5490 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5493 sec_data = elf32_arm_section_data (section);
5495 for (span = 0; span < sec_data->mapcount; span++)
5497 unsigned int span_start = sec_data->map[span].vma;
5498 unsigned int span_end = (span == sec_data->mapcount - 1)
5499 ? section->size : sec_data->map[span + 1].vma;
5501 char span_type = sec_data->map[span].type;
5502 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5504 if (span_type != 't')
5507 /* Span is entirely within a single 4KB region: skip scanning. */
5508 if (((base_vma + span_start) & ~0xfff)
5509 == ((base_vma + span_end) & ~0xfff))
5512 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5514 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5515 * The branch target is in the same 4KB region as the
5516 first half of the branch.
5517 * The instruction before the branch is a 32-bit
5518 length non-branch instruction. */
5519 for (i = span_start; i < span_end;)
5521 unsigned int insn = bfd_getl16 (&contents[i]);
5522 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5523 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5525 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5530 /* Load the rest of the insn (in manual-friendly order). */
5531 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5533 /* Encoding T4: B<c>.W. */
5534 is_b = (insn & 0xf800d000) == 0xf0009000;
5535 /* Encoding T1: BL<c>.W. */
5536 is_bl = (insn & 0xf800d000) == 0xf000d000;
5537 /* Encoding T2: BLX<c>.W. */
5538 is_blx = (insn & 0xf800d000) == 0xf000c000;
5539 /* Encoding T3: B<c>.W (not permitted in IT block). */
5540 is_bcc = (insn & 0xf800d000) == 0xf0008000
5541 && (insn & 0x07f00000) != 0x03800000;
5544 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5546 if (((base_vma + i) & 0xfff) == 0xffe
5550 && ! last_was_branch)
5552 bfd_signed_vma offset = 0;
5553 bfd_boolean force_target_arm = FALSE;
5554 bfd_boolean force_target_thumb = FALSE;
5556 enum elf32_arm_stub_type stub_type = arm_stub_none;
5557 struct a8_erratum_reloc key, *found;
5558 bfd_boolean use_plt = FALSE;
5560 key.from = base_vma + i;
5561 found = (struct a8_erratum_reloc *)
5562 bsearch (&key, a8_relocs, num_a8_relocs,
5563 sizeof (struct a8_erratum_reloc),
5568 char *error_message = NULL;
5569 struct elf_link_hash_entry *entry;
5571 /* We don't care about the error returned from this
5572 function, only if there is glue or not. */
5573 entry = find_thumb_glue (info, found->sym_name,
5577 found->non_a8_stub = TRUE;
5579 /* Keep a simpler condition, for the sake of clarity. */
5580 if (htab->root.splt != NULL && found->hash != NULL
5581 && found->hash->root.plt.offset != (bfd_vma) -1)
5584 if (found->r_type == R_ARM_THM_CALL)
5586 if (found->branch_type == ST_BRANCH_TO_ARM
5588 force_target_arm = TRUE;
5590 force_target_thumb = TRUE;
5594 /* Check if we have an offending branch instruction. */
5596 if (found && found->non_a8_stub)
5597 /* We've already made a stub for this instruction, e.g.
5598 it's a long branch or a Thumb->ARM stub. Assume that
5599 stub will suffice to work around the A8 erratum (see
5600 setting of always_after_branch above). */
5604 offset = (insn & 0x7ff) << 1;
5605 offset |= (insn & 0x3f0000) >> 4;
5606 offset |= (insn & 0x2000) ? 0x40000 : 0;
5607 offset |= (insn & 0x800) ? 0x80000 : 0;
5608 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5609 if (offset & 0x100000)
5610 offset |= ~ ((bfd_signed_vma) 0xfffff);
5611 stub_type = arm_stub_a8_veneer_b_cond;
5613 else if (is_b || is_bl || is_blx)
5615 int s = (insn & 0x4000000) != 0;
5616 int j1 = (insn & 0x2000) != 0;
5617 int j2 = (insn & 0x800) != 0;
5621 offset = (insn & 0x7ff) << 1;
5622 offset |= (insn & 0x3ff0000) >> 4;
5626 if (offset & 0x1000000)
5627 offset |= ~ ((bfd_signed_vma) 0xffffff);
5630 offset &= ~ ((bfd_signed_vma) 3);
5632 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5633 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5636 if (stub_type != arm_stub_none)
5638 bfd_vma pc_for_insn = base_vma + i + 4;
5640 /* The original instruction is a BL, but the target is
5641 an ARM instruction. If we were not making a stub,
5642 the BL would have been converted to a BLX. Use the
5643 BLX stub instead in that case. */
5644 if (htab->use_blx && force_target_arm
5645 && stub_type == arm_stub_a8_veneer_bl)
5647 stub_type = arm_stub_a8_veneer_blx;
5651 /* Conversely, if the original instruction was
5652 BLX but the target is Thumb mode, use the BL
5654 else if (force_target_thumb
5655 && stub_type == arm_stub_a8_veneer_blx)
5657 stub_type = arm_stub_a8_veneer_bl;
5663 pc_for_insn &= ~ ((bfd_vma) 3);
5665 /* If we found a relocation, use the proper destination,
5666 not the offset in the (unrelocated) instruction.
5667 Note this is always done if we switched the stub type
5671 (bfd_signed_vma) (found->destination - pc_for_insn);
5673 /* If the stub will use a Thumb-mode branch to a
5674 PLT target, redirect it to the preceding Thumb
5676 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5677 offset -= PLT_THUMB_STUB_SIZE;
5679 target = pc_for_insn + offset;
5681 /* The BLX stub is ARM-mode code. Adjust the offset to
5682 take the different PC value (+8 instead of +4) into
5684 if (stub_type == arm_stub_a8_veneer_blx)
5687 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5689 char *stub_name = NULL;
5691 if (num_a8_fixes == a8_fix_table_size)
5693 a8_fix_table_size *= 2;
5694 a8_fixes = (struct a8_erratum_fix *)
5695 bfd_realloc (a8_fixes,
5696 sizeof (struct a8_erratum_fix)
5697 * a8_fix_table_size);
5700 if (num_a8_fixes < prev_num_a8_fixes)
5702 /* If we're doing a subsequent scan,
5703 check if we've found the same fix as
5704 before, and try and reuse the stub
5706 stub_name = a8_fixes[num_a8_fixes].stub_name;
5707 if ((a8_fixes[num_a8_fixes].section != section)
5708 || (a8_fixes[num_a8_fixes].offset != i))
5712 *stub_changed_p = TRUE;
5718 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5719 if (stub_name != NULL)
5720 sprintf (stub_name, "%x:%x", section->id, i);
5723 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5724 a8_fixes[num_a8_fixes].section = section;
5725 a8_fixes[num_a8_fixes].offset = i;
5726 a8_fixes[num_a8_fixes].target_offset =
5728 a8_fixes[num_a8_fixes].orig_insn = insn;
5729 a8_fixes[num_a8_fixes].stub_name = stub_name;
5730 a8_fixes[num_a8_fixes].stub_type = stub_type;
5731 a8_fixes[num_a8_fixes].branch_type =
5732 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5739 i += insn_32bit ? 4 : 2;
5740 last_was_32bit = insn_32bit;
5741 last_was_branch = is_32bit_branch;
5745 if (elf_section_data (section)->this_hdr.contents == NULL)
5749 *a8_fixes_p = a8_fixes;
5750 *num_a8_fixes_p = num_a8_fixes;
5751 *a8_fix_table_size_p = a8_fix_table_size;
5756 /* Create or update a stub entry depending on whether the stub can already be
5757 found in HTAB. The stub is identified by:
5758 - its type STUB_TYPE
5759 - its source branch (note that several can share the same stub) whose
5760 section and relocation (if any) are given by SECTION and IRELA
5762 - its target symbol whose input section, hash, name, value and branch type
5763 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5766 If found, the value of the stub's target symbol is updated from SYM_VALUE
5767 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5768 TRUE and the stub entry is initialized.
5770 Returns the stub that was created or updated, or NULL if an error
5773 static struct elf32_arm_stub_hash_entry *
5774 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5775 enum elf32_arm_stub_type stub_type, asection *section,
5776 Elf_Internal_Rela *irela, asection *sym_sec,
5777 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5778 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5779 bfd_boolean *new_stub)
5781 const asection *id_sec;
5783 struct elf32_arm_stub_hash_entry *stub_entry;
5784 unsigned int r_type;
5785 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5787 BFD_ASSERT (stub_type != arm_stub_none);
5791 stub_name = sym_name;
5795 BFD_ASSERT (section);
5796 BFD_ASSERT (section->id <= htab->top_id);
5798 /* Support for grouping stub sections. */
5799 id_sec = htab->stub_group[section->id].link_sec;
5801 /* Get the name of this stub. */
5802 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5808 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5810 /* The proper stub has already been created, just update its value. */
5811 if (stub_entry != NULL)
5815 stub_entry->target_value = sym_value;
5819 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5820 if (stub_entry == NULL)
5827 stub_entry->target_value = sym_value;
5828 stub_entry->target_section = sym_sec;
5829 stub_entry->stub_type = stub_type;
5830 stub_entry->h = hash;
5831 stub_entry->branch_type = branch_type;
5834 stub_entry->output_name = sym_name;
5837 if (sym_name == NULL)
5838 sym_name = "unnamed";
5839 stub_entry->output_name = (char *)
5840 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5841 + strlen (sym_name));
5842 if (stub_entry->output_name == NULL)
5848 /* For historical reasons, use the existing names for ARM-to-Thumb and
5849 Thumb-to-ARM stubs. */
5850 r_type = ELF32_R_TYPE (irela->r_info);
5851 if ((r_type == (unsigned int) R_ARM_THM_CALL
5852 || r_type == (unsigned int) R_ARM_THM_JUMP24
5853 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5854 && branch_type == ST_BRANCH_TO_ARM)
5855 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5856 else if ((r_type == (unsigned int) R_ARM_CALL
5857 || r_type == (unsigned int) R_ARM_JUMP24)
5858 && branch_type == ST_BRANCH_TO_THUMB)
5859 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5861 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5868 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5869 gateway veneer to transition from non secure to secure state and create them
5872 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5873 defines the conditions that govern Secure Gateway veneer creation for a
5874 given symbol <SYM> as follows:
5875 - it has function type
5876 - it has non local binding
5877 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5878 same type, binding and value as <SYM> (called normal symbol).
5879 An entry function can handle secure state transition itself in which case
5880 its special symbol would have a different value from the normal symbol.
5882 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5883 entry mapping while HTAB gives the name to hash entry mapping.
5884 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5887 The return value gives whether a stub failed to be allocated. */
5890 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5891 obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5892 int *cmse_stub_created)
5894 const struct elf_backend_data *bed;
5895 Elf_Internal_Shdr *symtab_hdr;
5896 unsigned i, j, sym_count, ext_start;
5897 Elf_Internal_Sym *cmse_sym, *local_syms;
5898 struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5899 enum arm_st_branch_type branch_type;
5900 char *sym_name, *lsym_name;
5903 struct elf32_arm_stub_hash_entry *stub_entry;
5904 bfd_boolean is_v8m, new_stub, cmse_invalid, ret = TRUE;
5906 bed = get_elf_backend_data (input_bfd);
5907 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5908 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5909 ext_start = symtab_hdr->sh_info;
5910 is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5911 && out_attr[Tag_CPU_arch_profile].i == 'M');
5913 local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5914 if (local_syms == NULL)
5915 local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5916 symtab_hdr->sh_info, 0, NULL, NULL,
5918 if (symtab_hdr->sh_info && local_syms == NULL)
5922 for (i = 0; i < sym_count; i++)
5924 cmse_invalid = FALSE;
5928 cmse_sym = &local_syms[i];
5929 /* Not a special symbol. */
5930 if (!ARM_GET_SYM_CMSE_SPCL (cmse_sym->st_target_internal))
5932 sym_name = bfd_elf_string_from_elf_section (input_bfd,
5933 symtab_hdr->sh_link,
5935 /* Special symbol with local binding. */
5936 cmse_invalid = TRUE;
5940 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
5941 sym_name = (char *) cmse_hash->root.root.root.string;
5943 /* Not a special symbol. */
5944 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
5947 /* Special symbol has incorrect binding or type. */
5948 if ((cmse_hash->root.root.type != bfd_link_hash_defined
5949 && cmse_hash->root.root.type != bfd_link_hash_defweak)
5950 || cmse_hash->root.type != STT_FUNC)
5951 cmse_invalid = TRUE;
5956 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
5957 "ARMv8-M architecture or later"),
5958 input_bfd, sym_name);
5959 is_v8m = TRUE; /* Avoid multiple warning. */
5965 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
5966 " a global or weak function symbol"),
5967 input_bfd, sym_name);
5973 sym_name += strlen (CMSE_PREFIX);
5974 hash = (struct elf32_arm_link_hash_entry *)
5975 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
5977 /* No associated normal symbol or it is neither global nor weak. */
5979 || (hash->root.root.type != bfd_link_hash_defined
5980 && hash->root.root.type != bfd_link_hash_defweak)
5981 || hash->root.type != STT_FUNC)
5983 /* Initialize here to avoid warning about use of possibly
5984 uninitialized variable. */
5989 /* Searching for a normal symbol with local binding. */
5990 for (; j < ext_start; j++)
5993 bfd_elf_string_from_elf_section (input_bfd,
5994 symtab_hdr->sh_link,
5995 local_syms[j].st_name);
5996 if (!strcmp (sym_name, lsym_name))
6001 if (hash || j < ext_start)
6004 (_("%pB: invalid standard symbol `%s'; it must be "
6005 "a global or weak function symbol"),
6006 input_bfd, sym_name);
6010 (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6016 sym_value = hash->root.root.u.def.value;
6017 section = hash->root.root.u.def.section;
6019 if (cmse_hash->root.root.u.def.section != section)
6022 (_("%pB: `%s' and its special symbol are in different sections"),
6023 input_bfd, sym_name);
6026 if (cmse_hash->root.root.u.def.value != sym_value)
6027 continue; /* Ignore: could be an entry function starting with SG. */
6029 /* If this section is a link-once section that will be discarded, then
6030 don't create any stubs. */
6031 if (section->output_section == NULL)
6034 (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6038 if (hash->root.size == 0)
6041 (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6047 branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6049 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6050 NULL, NULL, section, hash, sym_name,
6051 sym_value, branch_type, &new_stub);
6053 if (stub_entry == NULL)
6057 BFD_ASSERT (new_stub);
6058 (*cmse_stub_created)++;
6062 if (!symtab_hdr->contents)
6067 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6068 code entry function, ie can be called from non secure code without using a
6072 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6074 bfd_byte contents[4];
6075 uint32_t first_insn;
6080 /* Defined symbol of function type. */
6081 if (hash->root.root.type != bfd_link_hash_defined
6082 && hash->root.root.type != bfd_link_hash_defweak)
6084 if (hash->root.type != STT_FUNC)
6087 /* Read first instruction. */
6088 section = hash->root.root.u.def.section;
6089 abfd = section->owner;
6090 offset = hash->root.root.u.def.value - section->vma;
6091 if (!bfd_get_section_contents (abfd, section, contents, offset,
6095 first_insn = bfd_get_32 (abfd, contents);
6097 /* Starts by SG instruction. */
6098 return first_insn == 0xe97fe97f;
6101 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6102 secure gateway veneers (ie. the veneers was not in the input import library)
6103 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6106 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6108 struct elf32_arm_stub_hash_entry *stub_entry;
6109 struct bfd_link_info *info;
6111 /* Massage our args to the form they really have. */
6112 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6113 info = (struct bfd_link_info *) gen_info;
6115 if (info->out_implib_bfd)
6118 if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6121 if (stub_entry->stub_offset == (bfd_vma) -1)
6122 _bfd_error_handler (" %s", stub_entry->output_name);
6127 /* Set offset of each secure gateway veneers so that its address remain
6128 identical to the one in the input import library referred by
6129 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6130 (present in input import library but absent from the executable being
6131 linked) or if new veneers appeared and there is no output import library
6132 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6133 number of secure gateway veneers found in the input import library.
6135 The function returns whether an error occurred. If no error occurred,
6136 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6137 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6138 veneer observed set for new veneers to be layed out after. */
6141 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6142 struct elf32_arm_link_hash_table *htab,
6143 int *cmse_stub_created)
6150 asection *stub_out_sec;
6151 bfd_boolean ret = TRUE;
6152 Elf_Internal_Sym *intsym;
6153 const char *out_sec_name;
6154 bfd_size_type cmse_stub_size;
6155 asymbol **sympp = NULL, *sym;
6156 struct elf32_arm_link_hash_entry *hash;
6157 const insn_sequence *cmse_stub_template;
6158 struct elf32_arm_stub_hash_entry *stub_entry;
6159 int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6160 bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6161 bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6163 /* No input secure gateway import library. */
6164 if (!htab->in_implib_bfd)
6167 in_implib_bfd = htab->in_implib_bfd;
6168 if (!htab->cmse_implib)
6170 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6171 "Gateway import libraries"), in_implib_bfd);
6175 /* Get symbol table size. */
6176 symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6180 /* Read in the input secure gateway import library's symbol table. */
6181 sympp = (asymbol **) xmalloc (symsize);
6182 symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6189 htab->new_cmse_stub_offset = 0;
6191 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6192 &cmse_stub_template,
6193 &cmse_stub_template_size);
6195 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6197 bfd_get_section_by_name (htab->obfd, out_sec_name);
6198 if (stub_out_sec != NULL)
6199 cmse_stub_sec_vma = stub_out_sec->vma;
6201 /* Set addresses of veneers mentionned in input secure gateway import
6202 library's symbol table. */
6203 for (i = 0; i < symcount; i++)
6207 sym_name = (char *) bfd_asymbol_name (sym);
6208 intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6210 if (sym->section != bfd_abs_section_ptr
6211 || !(flags & (BSF_GLOBAL | BSF_WEAK))
6212 || (flags & BSF_FUNCTION) != BSF_FUNCTION
6213 || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6214 != ST_BRANCH_TO_THUMB))
6216 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6217 "symbol should be absolute, global and "
6218 "refer to Thumb functions"),
6219 in_implib_bfd, sym_name);
6224 veneer_value = bfd_asymbol_value (sym);
6225 stub_offset = veneer_value - cmse_stub_sec_vma;
6226 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6228 hash = (struct elf32_arm_link_hash_entry *)
6229 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6231 /* Stub entry should have been created by cmse_scan or the symbol be of
6232 a secure function callable from non secure code. */
6233 if (!stub_entry && !hash)
6235 bfd_boolean new_stub;
6238 (_("entry function `%s' disappeared from secure code"), sym_name);
6239 hash = (struct elf32_arm_link_hash_entry *)
6240 elf_link_hash_lookup (&(htab)->root, sym_name, TRUE, TRUE, TRUE);
6242 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6243 NULL, NULL, bfd_abs_section_ptr, hash,
6244 sym_name, veneer_value,
6245 ST_BRANCH_TO_THUMB, &new_stub);
6246 if (stub_entry == NULL)
6250 BFD_ASSERT (new_stub);
6251 new_cmse_stubs_created++;
6252 (*cmse_stub_created)++;
6254 stub_entry->stub_template_size = stub_entry->stub_size = 0;
6255 stub_entry->stub_offset = stub_offset;
6257 /* Symbol found is not callable from non secure code. */
6258 else if (!stub_entry)
6260 if (!cmse_entry_fct_p (hash))
6262 _bfd_error_handler (_("`%s' refers to a non entry function"),
6270 /* Only stubs for SG veneers should have been created. */
6271 BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6273 /* Check visibility hasn't changed. */
6274 if (!!(flags & BSF_GLOBAL)
6275 != (hash->root.root.type == bfd_link_hash_defined))
6277 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6280 stub_entry->stub_offset = stub_offset;
6283 /* Size should match that of a SG veneer. */
6284 if (intsym->st_size != cmse_stub_size)
6286 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6287 in_implib_bfd, sym_name);
6291 /* Previous veneer address is before current SG veneer section. */
6292 if (veneer_value < cmse_stub_sec_vma)
6294 /* Avoid offset underflow. */
6296 stub_entry->stub_offset = 0;
6301 /* Complain if stub offset not a multiple of stub size. */
6302 if (stub_offset % cmse_stub_size)
6305 (_("offset of veneer for entry function `%s' not a multiple of "
6306 "its size"), sym_name);
6313 new_cmse_stubs_created--;
6314 if (veneer_value < cmse_stub_array_start)
6315 cmse_stub_array_start = veneer_value;
6316 next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6317 if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6318 htab->new_cmse_stub_offset = next_cmse_stub_offset;
6321 if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6323 BFD_ASSERT (new_cmse_stubs_created > 0);
6325 (_("new entry function(s) introduced but no output import library "
6327 bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6330 if (cmse_stub_array_start != cmse_stub_sec_vma)
6333 (_("start address of `%s' is different from previous link"),
6343 /* Determine and set the size of the stub section for a final link.
6345 The basic idea here is to examine all the relocations looking for
6346 PC-relative calls to a target that is unreachable with a "bl"
6350 elf32_arm_size_stubs (bfd *output_bfd,
6352 struct bfd_link_info *info,
6353 bfd_signed_vma group_size,
6354 asection * (*add_stub_section) (const char *, asection *,
6357 void (*layout_sections_again) (void))
6359 bfd_boolean ret = TRUE;
6360 obj_attribute *out_attr;
6361 int cmse_stub_created = 0;
6362 bfd_size_type stub_group_size;
6363 bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
6364 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6365 struct a8_erratum_fix *a8_fixes = NULL;
6366 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6367 struct a8_erratum_reloc *a8_relocs = NULL;
6368 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6373 if (htab->fix_cortex_a8)
6375 a8_fixes = (struct a8_erratum_fix *)
6376 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6377 a8_relocs = (struct a8_erratum_reloc *)
6378 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6381 /* Propagate mach to stub bfd, because it may not have been
6382 finalized when we created stub_bfd. */
6383 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6384 bfd_get_mach (output_bfd));
6386 /* Stash our params away. */
6387 htab->stub_bfd = stub_bfd;
6388 htab->add_stub_section = add_stub_section;
6389 htab->layout_sections_again = layout_sections_again;
6390 stubs_always_after_branch = group_size < 0;
6392 out_attr = elf_known_obj_attributes_proc (output_bfd);
6393 m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6395 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6396 as the first half of a 32-bit branch straddling two 4K pages. This is a
6397 crude way of enforcing that. */
6398 if (htab->fix_cortex_a8)
6399 stubs_always_after_branch = 1;
6402 stub_group_size = -group_size;
6404 stub_group_size = group_size;
6406 if (stub_group_size == 1)
6408 /* Default values. */
6409 /* Thumb branch range is +-4MB has to be used as the default
6410 maximum size (a given section can contain both ARM and Thumb
6411 code, so the worst case has to be taken into account).
6413 This value is 24K less than that, which allows for 2025
6414 12-byte stubs. If we exceed that, then we will fail to link.
6415 The user will have to relink with an explicit group size
6417 stub_group_size = 4170000;
6420 group_sections (htab, stub_group_size, stubs_always_after_branch);
6422 /* If we're applying the cortex A8 fix, we need to determine the
6423 program header size now, because we cannot change it later --
6424 that could alter section placements. Notice the A8 erratum fix
6425 ends up requiring the section addresses to remain unchanged
6426 modulo the page size. That's something we cannot represent
6427 inside BFD, and we don't want to force the section alignment to
6428 be the page size. */
6429 if (htab->fix_cortex_a8)
6430 (*htab->layout_sections_again) ();
6435 unsigned int bfd_indx;
6437 enum elf32_arm_stub_type stub_type;
6438 bfd_boolean stub_changed = FALSE;
6439 unsigned prev_num_a8_fixes = num_a8_fixes;
6442 for (input_bfd = info->input_bfds, bfd_indx = 0;
6444 input_bfd = input_bfd->link.next, bfd_indx++)
6446 Elf_Internal_Shdr *symtab_hdr;
6448 Elf_Internal_Sym *local_syms = NULL;
6450 if (!is_arm_elf (input_bfd))
6455 /* We'll need the symbol table in a second. */
6456 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6457 if (symtab_hdr->sh_info == 0)
6460 /* Limit scan of symbols to object file whose profile is
6461 Microcontroller to not hinder performance in the general case. */
6462 if (m_profile && first_veneer_scan)
6464 struct elf_link_hash_entry **sym_hashes;
6466 sym_hashes = elf_sym_hashes (input_bfd);
6467 if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6468 &cmse_stub_created))
6469 goto error_ret_free_local;
6471 if (cmse_stub_created != 0)
6472 stub_changed = TRUE;
6475 /* Walk over each section attached to the input bfd. */
6476 for (section = input_bfd->sections;
6478 section = section->next)
6480 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6482 /* If there aren't any relocs, then there's nothing more
6484 if ((section->flags & SEC_RELOC) == 0
6485 || section->reloc_count == 0
6486 || (section->flags & SEC_CODE) == 0)
6489 /* If this section is a link-once section that will be
6490 discarded, then don't create any stubs. */
6491 if (section->output_section == NULL
6492 || section->output_section->owner != output_bfd)
6495 /* Get the relocs. */
6497 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6498 NULL, info->keep_memory);
6499 if (internal_relocs == NULL)
6500 goto error_ret_free_local;
6502 /* Now examine each relocation. */
6503 irela = internal_relocs;
6504 irelaend = irela + section->reloc_count;
6505 for (; irela < irelaend; irela++)
6507 unsigned int r_type, r_indx;
6510 bfd_vma destination;
6511 struct elf32_arm_link_hash_entry *hash;
6512 const char *sym_name;
6513 unsigned char st_type;
6514 enum arm_st_branch_type branch_type;
6515 bfd_boolean created_stub = FALSE;
6517 r_type = ELF32_R_TYPE (irela->r_info);
6518 r_indx = ELF32_R_SYM (irela->r_info);
6520 if (r_type >= (unsigned int) R_ARM_max)
6522 bfd_set_error (bfd_error_bad_value);
6523 error_ret_free_internal:
6524 if (elf_section_data (section)->relocs == NULL)
6525 free (internal_relocs);
6527 error_ret_free_local:
6528 if (local_syms != NULL
6529 && (symtab_hdr->contents
6530 != (unsigned char *) local_syms))
6536 if (r_indx >= symtab_hdr->sh_info)
6537 hash = elf32_arm_hash_entry
6538 (elf_sym_hashes (input_bfd)
6539 [r_indx - symtab_hdr->sh_info]);
6541 /* Only look for stubs on branch instructions, or
6542 non-relaxed TLSCALL */
6543 if ((r_type != (unsigned int) R_ARM_CALL)
6544 && (r_type != (unsigned int) R_ARM_THM_CALL)
6545 && (r_type != (unsigned int) R_ARM_JUMP24)
6546 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6547 && (r_type != (unsigned int) R_ARM_THM_XPC22)
6548 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6549 && (r_type != (unsigned int) R_ARM_PLT32)
6550 && !((r_type == (unsigned int) R_ARM_TLS_CALL
6551 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6552 && r_type == elf32_arm_tls_transition
6553 (info, r_type, &hash->root)
6554 && ((hash ? hash->tls_type
6555 : (elf32_arm_local_got_tls_type
6556 (input_bfd)[r_indx]))
6557 & GOT_TLS_GDESC) != 0))
6560 /* Now determine the call target, its name, value,
6567 if (r_type == (unsigned int) R_ARM_TLS_CALL
6568 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6570 /* A non-relaxed TLS call. The target is the
6571 plt-resident trampoline and nothing to do
6573 BFD_ASSERT (htab->tls_trampoline > 0);
6574 sym_sec = htab->root.splt;
6575 sym_value = htab->tls_trampoline;
6578 branch_type = ST_BRANCH_TO_ARM;
6582 /* It's a local symbol. */
6583 Elf_Internal_Sym *sym;
6585 if (local_syms == NULL)
6588 = (Elf_Internal_Sym *) symtab_hdr->contents;
6589 if (local_syms == NULL)
6591 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6592 symtab_hdr->sh_info, 0,
6594 if (local_syms == NULL)
6595 goto error_ret_free_internal;
6598 sym = local_syms + r_indx;
6599 if (sym->st_shndx == SHN_UNDEF)
6600 sym_sec = bfd_und_section_ptr;
6601 else if (sym->st_shndx == SHN_ABS)
6602 sym_sec = bfd_abs_section_ptr;
6603 else if (sym->st_shndx == SHN_COMMON)
6604 sym_sec = bfd_com_section_ptr;
6607 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6610 /* This is an undefined symbol. It can never
6614 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6615 sym_value = sym->st_value;
6616 destination = (sym_value + irela->r_addend
6617 + sym_sec->output_offset
6618 + sym_sec->output_section->vma);
6619 st_type = ELF_ST_TYPE (sym->st_info);
6621 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6623 = bfd_elf_string_from_elf_section (input_bfd,
6624 symtab_hdr->sh_link,
6629 /* It's an external symbol. */
6630 while (hash->root.root.type == bfd_link_hash_indirect
6631 || hash->root.root.type == bfd_link_hash_warning)
6632 hash = ((struct elf32_arm_link_hash_entry *)
6633 hash->root.root.u.i.link);
6635 if (hash->root.root.type == bfd_link_hash_defined
6636 || hash->root.root.type == bfd_link_hash_defweak)
6638 sym_sec = hash->root.root.u.def.section;
6639 sym_value = hash->root.root.u.def.value;
6641 struct elf32_arm_link_hash_table *globals =
6642 elf32_arm_hash_table (info);
6644 /* For a destination in a shared library,
6645 use the PLT stub as target address to
6646 decide whether a branch stub is
6649 && globals->root.splt != NULL
6651 && hash->root.plt.offset != (bfd_vma) -1)
6653 sym_sec = globals->root.splt;
6654 sym_value = hash->root.plt.offset;
6655 if (sym_sec->output_section != NULL)
6656 destination = (sym_value
6657 + sym_sec->output_offset
6658 + sym_sec->output_section->vma);
6660 else if (sym_sec->output_section != NULL)
6661 destination = (sym_value + irela->r_addend
6662 + sym_sec->output_offset
6663 + sym_sec->output_section->vma);
6665 else if ((hash->root.root.type == bfd_link_hash_undefined)
6666 || (hash->root.root.type == bfd_link_hash_undefweak))
6668 /* For a shared library, use the PLT stub as
6669 target address to decide whether a long
6670 branch stub is needed.
6671 For absolute code, they cannot be handled. */
6672 struct elf32_arm_link_hash_table *globals =
6673 elf32_arm_hash_table (info);
6676 && globals->root.splt != NULL
6678 && hash->root.plt.offset != (bfd_vma) -1)
6680 sym_sec = globals->root.splt;
6681 sym_value = hash->root.plt.offset;
6682 if (sym_sec->output_section != NULL)
6683 destination = (sym_value
6684 + sym_sec->output_offset
6685 + sym_sec->output_section->vma);
6692 bfd_set_error (bfd_error_bad_value);
6693 goto error_ret_free_internal;
6695 st_type = hash->root.type;
6697 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6698 sym_name = hash->root.root.root.string;
6703 bfd_boolean new_stub;
6704 struct elf32_arm_stub_hash_entry *stub_entry;
6706 /* Determine what (if any) linker stub is needed. */
6707 stub_type = arm_type_of_stub (info, section, irela,
6708 st_type, &branch_type,
6709 hash, destination, sym_sec,
6710 input_bfd, sym_name);
6711 if (stub_type == arm_stub_none)
6714 /* We've either created a stub for this reloc already,
6715 or we are about to. */
6717 elf32_arm_create_stub (htab, stub_type, section, irela,
6719 (char *) sym_name, sym_value,
6720 branch_type, &new_stub);
6722 created_stub = stub_entry != NULL;
6724 goto error_ret_free_internal;
6728 stub_changed = TRUE;
6732 /* Look for relocations which might trigger Cortex-A8
6734 if (htab->fix_cortex_a8
6735 && (r_type == (unsigned int) R_ARM_THM_JUMP24
6736 || r_type == (unsigned int) R_ARM_THM_JUMP19
6737 || r_type == (unsigned int) R_ARM_THM_CALL
6738 || r_type == (unsigned int) R_ARM_THM_XPC22))
6740 bfd_vma from = section->output_section->vma
6741 + section->output_offset
6744 if ((from & 0xfff) == 0xffe)
6746 /* Found a candidate. Note we haven't checked the
6747 destination is within 4K here: if we do so (and
6748 don't create an entry in a8_relocs) we can't tell
6749 that a branch should have been relocated when
6751 if (num_a8_relocs == a8_reloc_table_size)
6753 a8_reloc_table_size *= 2;
6754 a8_relocs = (struct a8_erratum_reloc *)
6755 bfd_realloc (a8_relocs,
6756 sizeof (struct a8_erratum_reloc)
6757 * a8_reloc_table_size);
6760 a8_relocs[num_a8_relocs].from = from;
6761 a8_relocs[num_a8_relocs].destination = destination;
6762 a8_relocs[num_a8_relocs].r_type = r_type;
6763 a8_relocs[num_a8_relocs].branch_type = branch_type;
6764 a8_relocs[num_a8_relocs].sym_name = sym_name;
6765 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6766 a8_relocs[num_a8_relocs].hash = hash;
6773 /* We're done with the internal relocs, free them. */
6774 if (elf_section_data (section)->relocs == NULL)
6775 free (internal_relocs);
6778 if (htab->fix_cortex_a8)
6780 /* Sort relocs which might apply to Cortex-A8 erratum. */
6781 qsort (a8_relocs, num_a8_relocs,
6782 sizeof (struct a8_erratum_reloc),
6785 /* Scan for branches which might trigger Cortex-A8 erratum. */
6786 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6787 &num_a8_fixes, &a8_fix_table_size,
6788 a8_relocs, num_a8_relocs,
6789 prev_num_a8_fixes, &stub_changed)
6791 goto error_ret_free_local;
6794 if (local_syms != NULL
6795 && symtab_hdr->contents != (unsigned char *) local_syms)
6797 if (!info->keep_memory)
6800 symtab_hdr->contents = (unsigned char *) local_syms;
6804 if (first_veneer_scan
6805 && !set_cmse_veneer_addr_from_implib (info, htab,
6806 &cmse_stub_created))
6809 if (prev_num_a8_fixes != num_a8_fixes)
6810 stub_changed = TRUE;
6815 /* OK, we've added some stubs. Find out the new size of the
6817 for (stub_sec = htab->stub_bfd->sections;
6819 stub_sec = stub_sec->next)
6821 /* Ignore non-stub sections. */
6822 if (!strstr (stub_sec->name, STUB_SUFFIX))
6828 /* Add new SG veneers after those already in the input import
6830 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6833 bfd_vma *start_offset_p;
6834 asection **stub_sec_p;
6836 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6837 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6838 if (start_offset_p == NULL)
6841 BFD_ASSERT (stub_sec_p != NULL);
6842 if (*stub_sec_p != NULL)
6843 (*stub_sec_p)->size = *start_offset_p;
6846 /* Compute stub section size, considering padding. */
6847 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6848 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6852 asection **stub_sec_p;
6854 padding = arm_dedicated_stub_section_padding (stub_type);
6855 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6856 /* Skip if no stub input section or no stub section padding
6858 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6860 /* Stub section padding required but no dedicated section. */
6861 BFD_ASSERT (stub_sec_p);
6863 size = (*stub_sec_p)->size;
6864 size = (size + padding - 1) & ~(padding - 1);
6865 (*stub_sec_p)->size = size;
6868 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6869 if (htab->fix_cortex_a8)
6870 for (i = 0; i < num_a8_fixes; i++)
6872 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6873 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6875 if (stub_sec == NULL)
6879 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6884 /* Ask the linker to do its stuff. */
6885 (*htab->layout_sections_again) ();
6886 first_veneer_scan = FALSE;
6889 /* Add stubs for Cortex-A8 erratum fixes now. */
6890 if (htab->fix_cortex_a8)
6892 for (i = 0; i < num_a8_fixes; i++)
6894 struct elf32_arm_stub_hash_entry *stub_entry;
6895 char *stub_name = a8_fixes[i].stub_name;
6896 asection *section = a8_fixes[i].section;
6897 unsigned int section_id = a8_fixes[i].section->id;
6898 asection *link_sec = htab->stub_group[section_id].link_sec;
6899 asection *stub_sec = htab->stub_group[section_id].stub_sec;
6900 const insn_sequence *template_sequence;
6901 int template_size, size = 0;
6903 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6905 if (stub_entry == NULL)
6907 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6908 section->owner, stub_name);
6912 stub_entry->stub_sec = stub_sec;
6913 stub_entry->stub_offset = (bfd_vma) -1;
6914 stub_entry->id_sec = link_sec;
6915 stub_entry->stub_type = a8_fixes[i].stub_type;
6916 stub_entry->source_value = a8_fixes[i].offset;
6917 stub_entry->target_section = a8_fixes[i].section;
6918 stub_entry->target_value = a8_fixes[i].target_offset;
6919 stub_entry->orig_insn = a8_fixes[i].orig_insn;
6920 stub_entry->branch_type = a8_fixes[i].branch_type;
6922 size = find_stub_size_and_template (a8_fixes[i].stub_type,
6926 stub_entry->stub_size = size;
6927 stub_entry->stub_template = template_sequence;
6928 stub_entry->stub_template_size = template_size;
6931 /* Stash the Cortex-A8 erratum fix array for use later in
6932 elf32_arm_write_section(). */
6933 htab->a8_erratum_fixes = a8_fixes;
6934 htab->num_a8_erratum_fixes = num_a8_fixes;
6938 htab->a8_erratum_fixes = NULL;
6939 htab->num_a8_erratum_fixes = 0;
6944 /* Build all the stubs associated with the current output file. The
6945 stubs are kept in a hash table attached to the main linker hash
6946 table. We also set up the .plt entries for statically linked PIC
6947 functions here. This function is called via arm_elf_finish in the
6951 elf32_arm_build_stubs (struct bfd_link_info *info)
6954 struct bfd_hash_table *table;
6955 enum elf32_arm_stub_type stub_type;
6956 struct elf32_arm_link_hash_table *htab;
6958 htab = elf32_arm_hash_table (info);
6962 for (stub_sec = htab->stub_bfd->sections;
6964 stub_sec = stub_sec->next)
6968 /* Ignore non-stub sections. */
6969 if (!strstr (stub_sec->name, STUB_SUFFIX))
6972 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
6973 must at least be done for stub section requiring padding and for SG
6974 veneers to ensure that a non secure code branching to a removed SG
6975 veneer causes an error. */
6976 size = stub_sec->size;
6977 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
6978 if (stub_sec->contents == NULL && size != 0)
6984 /* Add new SG veneers after those already in the input import library. */
6985 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
6987 bfd_vma *start_offset_p;
6988 asection **stub_sec_p;
6990 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6991 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6992 if (start_offset_p == NULL)
6995 BFD_ASSERT (stub_sec_p != NULL);
6996 if (*stub_sec_p != NULL)
6997 (*stub_sec_p)->size = *start_offset_p;
7000 /* Build the stubs as directed by the stub hash table. */
7001 table = &htab->stub_hash_table;
7002 bfd_hash_traverse (table, arm_build_one_stub, info);
7003 if (htab->fix_cortex_a8)
7005 /* Place the cortex a8 stubs last. */
7006 htab->fix_cortex_a8 = -1;
7007 bfd_hash_traverse (table, arm_build_one_stub, info);
7013 /* Locate the Thumb encoded calling stub for NAME. */
7015 static struct elf_link_hash_entry *
7016 find_thumb_glue (struct bfd_link_info *link_info,
7018 char **error_message)
7021 struct elf_link_hash_entry *hash;
7022 struct elf32_arm_link_hash_table *hash_table;
7024 /* We need a pointer to the armelf specific hash table. */
7025 hash_table = elf32_arm_hash_table (link_info);
7026 if (hash_table == NULL)
7029 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7030 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7032 BFD_ASSERT (tmp_name);
7034 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7036 hash = elf_link_hash_lookup
7037 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7040 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7041 "Thumb", tmp_name, name) == -1)
7042 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7049 /* Locate the ARM encoded calling stub for NAME. */
7051 static struct elf_link_hash_entry *
7052 find_arm_glue (struct bfd_link_info *link_info,
7054 char **error_message)
7057 struct elf_link_hash_entry *myh;
7058 struct elf32_arm_link_hash_table *hash_table;
7060 /* We need a pointer to the elfarm specific hash table. */
7061 hash_table = elf32_arm_hash_table (link_info);
7062 if (hash_table == NULL)
7065 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7066 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7068 BFD_ASSERT (tmp_name);
7070 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7072 myh = elf_link_hash_lookup
7073 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7076 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7077 "ARM", tmp_name, name) == -1)
7078 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7085 /* ARM->Thumb glue (static images):
7089 ldr r12, __func_addr
7092 .word func @ behave as if you saw a ARM_32 reloc.
7099 .word func @ behave as if you saw a ARM_32 reloc.
7101 (relocatable images)
7104 ldr r12, __func_offset
7110 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7111 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7112 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7113 static const insn32 a2t3_func_addr_insn = 0x00000001;
7115 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7116 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7117 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7119 #define ARM2THUMB_PIC_GLUE_SIZE 16
7120 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7121 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7122 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7124 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7128 __func_from_thumb: __func_from_thumb:
7130 nop ldr r6, __func_addr
7140 #define THUMB2ARM_GLUE_SIZE 8
7141 static const insn16 t2a1_bx_pc_insn = 0x4778;
7142 static const insn16 t2a2_noop_insn = 0x46c0;
7143 static const insn32 t2a3_b_insn = 0xea000000;
7145 #define VFP11_ERRATUM_VENEER_SIZE 8
7146 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7147 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7149 #define ARM_BX_VENEER_SIZE 12
7150 static const insn32 armbx1_tst_insn = 0xe3100001;
7151 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7152 static const insn32 armbx3_bx_insn = 0xe12fff10;
7154 #ifndef ELFARM_NABI_C_INCLUDED
7156 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7159 bfd_byte * contents;
7163 /* Do not include empty glue sections in the output. */
7166 s = bfd_get_linker_section (abfd, name);
7168 s->flags |= SEC_EXCLUDE;
7173 BFD_ASSERT (abfd != NULL);
7175 s = bfd_get_linker_section (abfd, name);
7176 BFD_ASSERT (s != NULL);
7178 contents = (bfd_byte *) bfd_alloc (abfd, size);
7180 BFD_ASSERT (s->size == size);
7181 s->contents = contents;
7185 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7187 struct elf32_arm_link_hash_table * globals;
7189 globals = elf32_arm_hash_table (info);
7190 BFD_ASSERT (globals != NULL);
7192 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7193 globals->arm_glue_size,
7194 ARM2THUMB_GLUE_SECTION_NAME);
7196 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7197 globals->thumb_glue_size,
7198 THUMB2ARM_GLUE_SECTION_NAME);
7200 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7201 globals->vfp11_erratum_glue_size,
7202 VFP11_ERRATUM_VENEER_SECTION_NAME);
7204 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7205 globals->stm32l4xx_erratum_glue_size,
7206 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7208 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7209 globals->bx_glue_size,
7210 ARM_BX_GLUE_SECTION_NAME);
7215 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7216 returns the symbol identifying the stub. */
7218 static struct elf_link_hash_entry *
7219 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7220 struct elf_link_hash_entry * h)
7222 const char * name = h->root.root.string;
7225 struct elf_link_hash_entry * myh;
7226 struct bfd_link_hash_entry * bh;
7227 struct elf32_arm_link_hash_table * globals;
7231 globals = elf32_arm_hash_table (link_info);
7232 BFD_ASSERT (globals != NULL);
7233 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7235 s = bfd_get_linker_section
7236 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7238 BFD_ASSERT (s != NULL);
7240 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7241 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7243 BFD_ASSERT (tmp_name);
7245 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7247 myh = elf_link_hash_lookup
7248 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7252 /* We've already seen this guy. */
7257 /* The only trick here is using hash_table->arm_glue_size as the value.
7258 Even though the section isn't allocated yet, this is where we will be
7259 putting it. The +1 on the value marks that the stub has not been
7260 output yet - not that it is a Thumb function. */
7262 val = globals->arm_glue_size + 1;
7263 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7264 tmp_name, BSF_GLOBAL, s, val,
7265 NULL, TRUE, FALSE, &bh);
7267 myh = (struct elf_link_hash_entry *) bh;
7268 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7269 myh->forced_local = 1;
7273 if (bfd_link_pic (link_info)
7274 || globals->root.is_relocatable_executable
7275 || globals->pic_veneer)
7276 size = ARM2THUMB_PIC_GLUE_SIZE;
7277 else if (globals->use_blx)
7278 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7280 size = ARM2THUMB_STATIC_GLUE_SIZE;
7283 globals->arm_glue_size += size;
7288 /* Allocate space for ARMv4 BX veneers. */
7291 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7294 struct elf32_arm_link_hash_table *globals;
7296 struct elf_link_hash_entry *myh;
7297 struct bfd_link_hash_entry *bh;
7300 /* BX PC does not need a veneer. */
7304 globals = elf32_arm_hash_table (link_info);
7305 BFD_ASSERT (globals != NULL);
7306 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7308 /* Check if this veneer has already been allocated. */
7309 if (globals->bx_glue_offset[reg])
7312 s = bfd_get_linker_section
7313 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7315 BFD_ASSERT (s != NULL);
7317 /* Add symbol for veneer. */
7319 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7321 BFD_ASSERT (tmp_name);
7323 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7325 myh = elf_link_hash_lookup
7326 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
7328 BFD_ASSERT (myh == NULL);
7331 val = globals->bx_glue_size;
7332 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7333 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7334 NULL, TRUE, FALSE, &bh);
7336 myh = (struct elf_link_hash_entry *) bh;
7337 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7338 myh->forced_local = 1;
7340 s->size += ARM_BX_VENEER_SIZE;
7341 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7342 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7346 /* Add an entry to the code/data map for section SEC. */
7349 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7351 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7352 unsigned int newidx;
7354 if (sec_data->map == NULL)
7356 sec_data->map = (elf32_arm_section_map *)
7357 bfd_malloc (sizeof (elf32_arm_section_map));
7358 sec_data->mapcount = 0;
7359 sec_data->mapsize = 1;
7362 newidx = sec_data->mapcount++;
7364 if (sec_data->mapcount > sec_data->mapsize)
7366 sec_data->mapsize *= 2;
7367 sec_data->map = (elf32_arm_section_map *)
7368 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7369 * sizeof (elf32_arm_section_map));
7374 sec_data->map[newidx].vma = vma;
7375 sec_data->map[newidx].type = type;
7380 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7381 veneers are handled for now. */
7384 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7385 elf32_vfp11_erratum_list *branch,
7387 asection *branch_sec,
7388 unsigned int offset)
7391 struct elf32_arm_link_hash_table *hash_table;
7393 struct elf_link_hash_entry *myh;
7394 struct bfd_link_hash_entry *bh;
7396 struct _arm_elf_section_data *sec_data;
7397 elf32_vfp11_erratum_list *newerr;
7399 hash_table = elf32_arm_hash_table (link_info);
7400 BFD_ASSERT (hash_table != NULL);
7401 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7403 s = bfd_get_linker_section
7404 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7406 sec_data = elf32_arm_section_data (s);
7408 BFD_ASSERT (s != NULL);
7410 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7411 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7413 BFD_ASSERT (tmp_name);
7415 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7416 hash_table->num_vfp11_fixes);
7418 myh = elf_link_hash_lookup
7419 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7421 BFD_ASSERT (myh == NULL);
7424 val = hash_table->vfp11_erratum_glue_size;
7425 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7426 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7427 NULL, TRUE, FALSE, &bh);
7429 myh = (struct elf_link_hash_entry *) bh;
7430 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7431 myh->forced_local = 1;
7433 /* Link veneer back to calling location. */
7434 sec_data->erratumcount += 1;
7435 newerr = (elf32_vfp11_erratum_list *)
7436 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7438 newerr->type = VFP11_ERRATUM_ARM_VENEER;
7440 newerr->u.v.branch = branch;
7441 newerr->u.v.id = hash_table->num_vfp11_fixes;
7442 branch->u.b.veneer = newerr;
7444 newerr->next = sec_data->erratumlist;
7445 sec_data->erratumlist = newerr;
7447 /* A symbol for the return from the veneer. */
7448 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7449 hash_table->num_vfp11_fixes);
7451 myh = elf_link_hash_lookup
7452 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7459 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7460 branch_sec, val, NULL, TRUE, FALSE, &bh);
7462 myh = (struct elf_link_hash_entry *) bh;
7463 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7464 myh->forced_local = 1;
7468 /* Generate a mapping symbol for the veneer section, and explicitly add an
7469 entry for that symbol to the code/data map for the section. */
7470 if (hash_table->vfp11_erratum_glue_size == 0)
7473 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7474 ever requires this erratum fix. */
7475 _bfd_generic_link_add_one_symbol (link_info,
7476 hash_table->bfd_of_glue_owner, "$a",
7477 BSF_LOCAL, s, 0, NULL,
7480 myh = (struct elf_link_hash_entry *) bh;
7481 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7482 myh->forced_local = 1;
7484 /* The elf32_arm_init_maps function only cares about symbols from input
7485 BFDs. We must make a note of this generated mapping symbol
7486 ourselves so that code byteswapping works properly in
7487 elf32_arm_write_section. */
7488 elf32_arm_section_map_add (s, 'a', 0);
7491 s->size += VFP11_ERRATUM_VENEER_SIZE;
7492 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7493 hash_table->num_vfp11_fixes++;
7495 /* The offset of the veneer. */
7499 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7500 veneers need to be handled because used only in Cortex-M. */
7503 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7504 elf32_stm32l4xx_erratum_list *branch,
7506 asection *branch_sec,
7507 unsigned int offset,
7508 bfd_size_type veneer_size)
7511 struct elf32_arm_link_hash_table *hash_table;
7513 struct elf_link_hash_entry *myh;
7514 struct bfd_link_hash_entry *bh;
7516 struct _arm_elf_section_data *sec_data;
7517 elf32_stm32l4xx_erratum_list *newerr;
7519 hash_table = elf32_arm_hash_table (link_info);
7520 BFD_ASSERT (hash_table != NULL);
7521 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7523 s = bfd_get_linker_section
7524 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7526 BFD_ASSERT (s != NULL);
7528 sec_data = elf32_arm_section_data (s);
7530 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7531 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7533 BFD_ASSERT (tmp_name);
7535 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7536 hash_table->num_stm32l4xx_fixes);
7538 myh = elf_link_hash_lookup
7539 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7541 BFD_ASSERT (myh == NULL);
7544 val = hash_table->stm32l4xx_erratum_glue_size;
7545 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7546 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7547 NULL, TRUE, FALSE, &bh);
7549 myh = (struct elf_link_hash_entry *) bh;
7550 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7551 myh->forced_local = 1;
7553 /* Link veneer back to calling location. */
7554 sec_data->stm32l4xx_erratumcount += 1;
7555 newerr = (elf32_stm32l4xx_erratum_list *)
7556 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7558 newerr->type = STM32L4XX_ERRATUM_VENEER;
7560 newerr->u.v.branch = branch;
7561 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7562 branch->u.b.veneer = newerr;
7564 newerr->next = sec_data->stm32l4xx_erratumlist;
7565 sec_data->stm32l4xx_erratumlist = newerr;
7567 /* A symbol for the return from the veneer. */
7568 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7569 hash_table->num_stm32l4xx_fixes);
7571 myh = elf_link_hash_lookup
7572 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7579 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7580 branch_sec, val, NULL, TRUE, FALSE, &bh);
7582 myh = (struct elf_link_hash_entry *) bh;
7583 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7584 myh->forced_local = 1;
7588 /* Generate a mapping symbol for the veneer section, and explicitly add an
7589 entry for that symbol to the code/data map for the section. */
7590 if (hash_table->stm32l4xx_erratum_glue_size == 0)
7593 /* Creates a THUMB symbol since there is no other choice. */
7594 _bfd_generic_link_add_one_symbol (link_info,
7595 hash_table->bfd_of_glue_owner, "$t",
7596 BSF_LOCAL, s, 0, NULL,
7599 myh = (struct elf_link_hash_entry *) bh;
7600 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7601 myh->forced_local = 1;
7603 /* The elf32_arm_init_maps function only cares about symbols from input
7604 BFDs. We must make a note of this generated mapping symbol
7605 ourselves so that code byteswapping works properly in
7606 elf32_arm_write_section. */
7607 elf32_arm_section_map_add (s, 't', 0);
7610 s->size += veneer_size;
7611 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7612 hash_table->num_stm32l4xx_fixes++;
7614 /* The offset of the veneer. */
7618 #define ARM_GLUE_SECTION_FLAGS \
7619 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7620 | SEC_READONLY | SEC_LINKER_CREATED)
7622 /* Create a fake section for use by the ARM backend of the linker. */
7625 arm_make_glue_section (bfd * abfd, const char * name)
7629 sec = bfd_get_linker_section (abfd, name);
7634 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7637 || !bfd_set_section_alignment (abfd, sec, 2))
7640 /* Set the gc mark to prevent the section from being removed by garbage
7641 collection, despite the fact that no relocs refer to this section. */
7647 /* Set size of .plt entries. This function is called from the
7648 linker scripts in ld/emultempl/{armelf}.em. */
7651 bfd_elf32_arm_use_long_plt (void)
7653 elf32_arm_use_long_plt_entry = TRUE;
7656 /* Add the glue sections to ABFD. This function is called from the
7657 linker scripts in ld/emultempl/{armelf}.em. */
7660 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7661 struct bfd_link_info *info)
7663 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7664 bfd_boolean dostm32l4xx = globals
7665 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7666 bfd_boolean addglue;
7668 /* If we are only performing a partial
7669 link do not bother adding the glue. */
7670 if (bfd_link_relocatable (info))
7673 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7674 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7675 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7676 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7682 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7685 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7686 ensures they are not marked for deletion by
7687 strip_excluded_output_sections () when veneers are going to be created
7688 later. Not doing so would trigger assert on empty section size in
7689 lang_size_sections_1 (). */
7692 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7694 enum elf32_arm_stub_type stub_type;
7696 /* If we are only performing a partial
7697 link do not bother adding the glue. */
7698 if (bfd_link_relocatable (info))
7701 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7704 const char *out_sec_name;
7706 if (!arm_dedicated_stub_output_section_required (stub_type))
7709 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7710 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7711 if (out_sec != NULL)
7712 out_sec->flags |= SEC_KEEP;
7716 /* Select a BFD to be used to hold the sections used by the glue code.
7717 This function is called from the linker scripts in ld/emultempl/
7721 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7723 struct elf32_arm_link_hash_table *globals;
7725 /* If we are only performing a partial link
7726 do not bother getting a bfd to hold the glue. */
7727 if (bfd_link_relocatable (info))
7730 /* Make sure we don't attach the glue sections to a dynamic object. */
7731 BFD_ASSERT (!(abfd->flags & DYNAMIC));
7733 globals = elf32_arm_hash_table (info);
7734 BFD_ASSERT (globals != NULL);
7736 if (globals->bfd_of_glue_owner != NULL)
7739 /* Save the bfd for later use. */
7740 globals->bfd_of_glue_owner = abfd;
7746 check_use_blx (struct elf32_arm_link_hash_table *globals)
7750 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7753 if (globals->fix_arm1176)
7755 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7756 globals->use_blx = 1;
7760 if (cpu_arch > TAG_CPU_ARCH_V4T)
7761 globals->use_blx = 1;
7766 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7767 struct bfd_link_info *link_info)
7769 Elf_Internal_Shdr *symtab_hdr;
7770 Elf_Internal_Rela *internal_relocs = NULL;
7771 Elf_Internal_Rela *irel, *irelend;
7772 bfd_byte *contents = NULL;
7775 struct elf32_arm_link_hash_table *globals;
7777 /* If we are only performing a partial link do not bother
7778 to construct any glue. */
7779 if (bfd_link_relocatable (link_info))
7782 /* Here we have a bfd that is to be included on the link. We have a
7783 hook to do reloc rummaging, before section sizes are nailed down. */
7784 globals = elf32_arm_hash_table (link_info);
7785 BFD_ASSERT (globals != NULL);
7787 check_use_blx (globals);
7789 if (globals->byteswap_code && !bfd_big_endian (abfd))
7791 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7796 /* PR 5398: If we have not decided to include any loadable sections in
7797 the output then we will not have a glue owner bfd. This is OK, it
7798 just means that there is nothing else for us to do here. */
7799 if (globals->bfd_of_glue_owner == NULL)
7802 /* Rummage around all the relocs and map the glue vectors. */
7803 sec = abfd->sections;
7808 for (; sec != NULL; sec = sec->next)
7810 if (sec->reloc_count == 0)
7813 if ((sec->flags & SEC_EXCLUDE) != 0)
7816 symtab_hdr = & elf_symtab_hdr (abfd);
7818 /* Load the relocs. */
7820 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
7822 if (internal_relocs == NULL)
7825 irelend = internal_relocs + sec->reloc_count;
7826 for (irel = internal_relocs; irel < irelend; irel++)
7829 unsigned long r_index;
7831 struct elf_link_hash_entry *h;
7833 r_type = ELF32_R_TYPE (irel->r_info);
7834 r_index = ELF32_R_SYM (irel->r_info);
7836 /* These are the only relocation types we care about. */
7837 if ( r_type != R_ARM_PC24
7838 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7841 /* Get the section contents if we haven't done so already. */
7842 if (contents == NULL)
7844 /* Get cached copy if it exists. */
7845 if (elf_section_data (sec)->this_hdr.contents != NULL)
7846 contents = elf_section_data (sec)->this_hdr.contents;
7849 /* Go get them off disk. */
7850 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7855 if (r_type == R_ARM_V4BX)
7859 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7860 record_arm_bx_glue (link_info, reg);
7864 /* If the relocation is not against a symbol it cannot concern us. */
7867 /* We don't care about local symbols. */
7868 if (r_index < symtab_hdr->sh_info)
7871 /* This is an external symbol. */
7872 r_index -= symtab_hdr->sh_info;
7873 h = (struct elf_link_hash_entry *)
7874 elf_sym_hashes (abfd)[r_index];
7876 /* If the relocation is against a static symbol it must be within
7877 the current section and so cannot be a cross ARM/Thumb relocation. */
7881 /* If the call will go through a PLT entry then we do not need
7883 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7889 /* This one is a call from arm code. We need to look up
7890 the target of the call. If it is a thumb target, we
7892 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7893 == ST_BRANCH_TO_THUMB)
7894 record_arm_to_thumb_glue (link_info, h);
7902 if (contents != NULL
7903 && elf_section_data (sec)->this_hdr.contents != contents)
7907 if (internal_relocs != NULL
7908 && elf_section_data (sec)->relocs != internal_relocs)
7909 free (internal_relocs);
7910 internal_relocs = NULL;
7916 if (contents != NULL
7917 && elf_section_data (sec)->this_hdr.contents != contents)
7919 if (internal_relocs != NULL
7920 && elf_section_data (sec)->relocs != internal_relocs)
7921 free (internal_relocs);
7928 /* Initialise maps of ARM/Thumb/data for input BFDs. */
7931 bfd_elf32_arm_init_maps (bfd *abfd)
7933 Elf_Internal_Sym *isymbuf;
7934 Elf_Internal_Shdr *hdr;
7935 unsigned int i, localsyms;
7937 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
7938 if (! is_arm_elf (abfd))
7941 if ((abfd->flags & DYNAMIC) != 0)
7944 hdr = & elf_symtab_hdr (abfd);
7945 localsyms = hdr->sh_info;
7947 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
7948 should contain the number of local symbols, which should come before any
7949 global symbols. Mapping symbols are always local. */
7950 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
7953 /* No internal symbols read? Skip this BFD. */
7954 if (isymbuf == NULL)
7957 for (i = 0; i < localsyms; i++)
7959 Elf_Internal_Sym *isym = &isymbuf[i];
7960 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
7964 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
7966 name = bfd_elf_string_from_elf_section (abfd,
7967 hdr->sh_link, isym->st_name);
7969 if (bfd_is_arm_special_symbol_name (name,
7970 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
7971 elf32_arm_section_map_add (sec, name[1], isym->st_value);
7977 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
7978 say what they wanted. */
7981 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
7983 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7984 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7986 if (globals == NULL)
7989 if (globals->fix_cortex_a8 == -1)
7991 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
7992 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
7993 && (out_attr[Tag_CPU_arch_profile].i == 'A'
7994 || out_attr[Tag_CPU_arch_profile].i == 0))
7995 globals->fix_cortex_a8 = 1;
7997 globals->fix_cortex_a8 = 0;
8003 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8005 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8006 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8008 if (globals == NULL)
8010 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8011 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8013 switch (globals->vfp11_fix)
8015 case BFD_ARM_VFP11_FIX_DEFAULT:
8016 case BFD_ARM_VFP11_FIX_NONE:
8017 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8021 /* Give a warning, but do as the user requests anyway. */
8022 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8023 "workaround is not necessary for target architecture"), obfd);
8026 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8027 /* For earlier architectures, we might need the workaround, but do not
8028 enable it by default. If users is running with broken hardware, they
8029 must enable the erratum fix explicitly. */
8030 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8034 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8036 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8037 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8039 if (globals == NULL)
8042 /* We assume only Cortex-M4 may require the fix. */
8043 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8044 || out_attr[Tag_CPU_arch_profile].i != 'M')
8046 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8047 /* Give a warning, but do as the user requests anyway. */
8049 (_("%pB: warning: selected STM32L4XX erratum "
8050 "workaround is not necessary for target architecture"), obfd);
8054 enum bfd_arm_vfp11_pipe
8062 /* Return a VFP register number. This is encoded as RX:X for single-precision
8063 registers, or X:RX for double-precision registers, where RX is the group of
8064 four bits in the instruction encoding and X is the single extension bit.
8065 RX and X fields are specified using their lowest (starting) bit. The return
8068 0...31: single-precision registers s0...s31
8069 32...63: double-precision registers d0...d31.
8071 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8072 encounter VFP3 instructions, so we allow the full range for DP registers. */
8075 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
8079 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8081 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8084 /* Set bits in *WMASK according to a register number REG as encoded by
8085 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8088 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8093 *wmask |= 3 << ((reg - 32) * 2);
8096 /* Return TRUE if WMASK overwrites anything in REGS. */
8099 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8103 for (i = 0; i < numregs; i++)
8105 unsigned int reg = regs[i];
8107 if (reg < 32 && (wmask & (1 << reg)) != 0)
8115 if ((wmask & (3 << (reg * 2))) != 0)
8122 /* In this function, we're interested in two things: finding input registers
8123 for VFP data-processing instructions, and finding the set of registers which
8124 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8125 hold the written set, so FLDM etc. are easy to deal with (we're only
8126 interested in 32 SP registers or 16 dp registers, due to the VFP version
8127 implemented by the chip in question). DP registers are marked by setting
8128 both SP registers in the write mask). */
8130 static enum bfd_arm_vfp11_pipe
8131 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8134 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8135 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8137 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8140 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8141 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8143 pqrs = ((insn & 0x00800000) >> 20)
8144 | ((insn & 0x00300000) >> 19)
8145 | ((insn & 0x00000040) >> 6);
8149 case 0: /* fmac[sd]. */
8150 case 1: /* fnmac[sd]. */
8151 case 2: /* fmsc[sd]. */
8152 case 3: /* fnmsc[sd]. */
8154 bfd_arm_vfp11_write_mask (destmask, fd);
8156 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8161 case 4: /* fmul[sd]. */
8162 case 5: /* fnmul[sd]. */
8163 case 6: /* fadd[sd]. */
8164 case 7: /* fsub[sd]. */
8168 case 8: /* fdiv[sd]. */
8171 bfd_arm_vfp11_write_mask (destmask, fd);
8172 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8177 case 15: /* extended opcode. */
8179 unsigned int extn = ((insn >> 15) & 0x1e)
8180 | ((insn >> 7) & 1);
8184 case 0: /* fcpy[sd]. */
8185 case 1: /* fabs[sd]. */
8186 case 2: /* fneg[sd]. */
8187 case 8: /* fcmp[sd]. */
8188 case 9: /* fcmpe[sd]. */
8189 case 10: /* fcmpz[sd]. */
8190 case 11: /* fcmpez[sd]. */
8191 case 16: /* fuito[sd]. */
8192 case 17: /* fsito[sd]. */
8193 case 24: /* ftoui[sd]. */
8194 case 25: /* ftouiz[sd]. */
8195 case 26: /* ftosi[sd]. */
8196 case 27: /* ftosiz[sd]. */
8197 /* These instructions will not bounce due to underflow. */
8202 case 3: /* fsqrt[sd]. */
8203 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8204 registers to cause the erratum in previous instructions. */
8205 bfd_arm_vfp11_write_mask (destmask, fd);
8209 case 15: /* fcvt{ds,sd}. */
8213 bfd_arm_vfp11_write_mask (destmask, fd);
8215 /* Only FCVTSD can underflow. */
8216 if ((insn & 0x100) != 0)
8235 /* Two-register transfer. */
8236 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8238 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8240 if ((insn & 0x100000) == 0)
8243 bfd_arm_vfp11_write_mask (destmask, fm);
8246 bfd_arm_vfp11_write_mask (destmask, fm);
8247 bfd_arm_vfp11_write_mask (destmask, fm + 1);
8253 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
8255 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8256 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8260 case 0: /* Two-reg transfer. We should catch these above. */
8263 case 2: /* fldm[sdx]. */
8267 unsigned int i, offset = insn & 0xff;
8272 for (i = fd; i < fd + offset; i++)
8273 bfd_arm_vfp11_write_mask (destmask, i);
8277 case 4: /* fld[sd]. */
8279 bfd_arm_vfp11_write_mask (destmask, fd);
8288 /* Single-register transfer. Note L==0. */
8289 else if ((insn & 0x0f100e10) == 0x0e000a10)
8291 unsigned int opcode = (insn >> 21) & 7;
8292 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8296 case 0: /* fmsr/fmdlr. */
8297 case 1: /* fmdhr. */
8298 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8299 destination register. I don't know if this is exactly right,
8300 but it is the conservative choice. */
8301 bfd_arm_vfp11_write_mask (destmask, fn);
8315 static int elf32_arm_compare_mapping (const void * a, const void * b);
8318 /* Look for potentially-troublesome code sequences which might trigger the
8319 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8320 (available from ARM) for details of the erratum. A short version is
8321 described in ld.texinfo. */
8324 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8327 bfd_byte *contents = NULL;
8329 int regs[3], numregs = 0;
8330 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8331 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8333 if (globals == NULL)
8336 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8337 The states transition as follows:
8339 0 -> 1 (vector) or 0 -> 2 (scalar)
8340 A VFP FMAC-pipeline instruction has been seen. Fill
8341 regs[0]..regs[numregs-1] with its input operands. Remember this
8342 instruction in 'first_fmac'.
8345 Any instruction, except for a VFP instruction which overwrites
8350 A VFP instruction has been seen which overwrites any of regs[*].
8351 We must make a veneer! Reset state to 0 before examining next
8355 If we fail to match anything in state 2, reset to state 0 and reset
8356 the instruction pointer to the instruction after 'first_fmac'.
8358 If the VFP11 vector mode is in use, there must be at least two unrelated
8359 instructions between anti-dependent VFP11 instructions to properly avoid
8360 triggering the erratum, hence the use of the extra state 1. */
8362 /* If we are only performing a partial link do not bother
8363 to construct any glue. */
8364 if (bfd_link_relocatable (link_info))
8367 /* Skip if this bfd does not correspond to an ELF image. */
8368 if (! is_arm_elf (abfd))
8371 /* We should have chosen a fix type by the time we get here. */
8372 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8374 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8377 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8378 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8381 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8383 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8384 struct _arm_elf_section_data *sec_data;
8386 /* If we don't have executable progbits, we're not interested in this
8387 section. Also skip if section is to be excluded. */
8388 if (elf_section_type (sec) != SHT_PROGBITS
8389 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8390 || (sec->flags & SEC_EXCLUDE) != 0
8391 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8392 || sec->output_section == bfd_abs_section_ptr
8393 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8396 sec_data = elf32_arm_section_data (sec);
8398 if (sec_data->mapcount == 0)
8401 if (elf_section_data (sec)->this_hdr.contents != NULL)
8402 contents = elf_section_data (sec)->this_hdr.contents;
8403 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8406 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8407 elf32_arm_compare_mapping);
8409 for (span = 0; span < sec_data->mapcount; span++)
8411 unsigned int span_start = sec_data->map[span].vma;
8412 unsigned int span_end = (span == sec_data->mapcount - 1)
8413 ? sec->size : sec_data->map[span + 1].vma;
8414 char span_type = sec_data->map[span].type;
8416 /* FIXME: Only ARM mode is supported at present. We may need to
8417 support Thumb-2 mode also at some point. */
8418 if (span_type != 'a')
8421 for (i = span_start; i < span_end;)
8423 unsigned int next_i = i + 4;
8424 unsigned int insn = bfd_big_endian (abfd)
8425 ? (contents[i] << 24)
8426 | (contents[i + 1] << 16)
8427 | (contents[i + 2] << 8)
8429 : (contents[i + 3] << 24)
8430 | (contents[i + 2] << 16)
8431 | (contents[i + 1] << 8)
8433 unsigned int writemask = 0;
8434 enum bfd_arm_vfp11_pipe vpipe;
8439 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8441 /* I'm assuming the VFP11 erratum can trigger with denorm
8442 operands on either the FMAC or the DS pipeline. This might
8443 lead to slightly overenthusiastic veneer insertion. */
8444 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8446 state = use_vector ? 1 : 2;
8448 veneer_of_insn = insn;
8454 int other_regs[3], other_numregs;
8455 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8458 if (vpipe != VFP11_BAD
8459 && bfd_arm_vfp11_antidependency (writemask, regs,
8469 int other_regs[3], other_numregs;
8470 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8473 if (vpipe != VFP11_BAD
8474 && bfd_arm_vfp11_antidependency (writemask, regs,
8480 next_i = first_fmac + 4;
8486 abort (); /* Should be unreachable. */
8491 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8492 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8494 elf32_arm_section_data (sec)->erratumcount += 1;
8496 newerr->u.b.vfp_insn = veneer_of_insn;
8501 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8508 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8513 newerr->next = sec_data->erratumlist;
8514 sec_data->erratumlist = newerr;
8523 if (contents != NULL
8524 && elf_section_data (sec)->this_hdr.contents != contents)
8532 if (contents != NULL
8533 && elf_section_data (sec)->this_hdr.contents != contents)
8539 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8540 after sections have been laid out, using specially-named symbols. */
8543 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8544 struct bfd_link_info *link_info)
8547 struct elf32_arm_link_hash_table *globals;
8550 if (bfd_link_relocatable (link_info))
8553 /* Skip if this bfd does not correspond to an ELF image. */
8554 if (! is_arm_elf (abfd))
8557 globals = elf32_arm_hash_table (link_info);
8558 if (globals == NULL)
8561 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8562 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8564 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8566 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8567 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8569 for (; errnode != NULL; errnode = errnode->next)
8571 struct elf_link_hash_entry *myh;
8574 switch (errnode->type)
8576 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8577 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8578 /* Find veneer symbol. */
8579 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8580 errnode->u.b.veneer->u.v.id);
8582 myh = elf_link_hash_lookup
8583 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8586 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8587 abfd, "VFP11", tmp_name);
8589 vma = myh->root.u.def.section->output_section->vma
8590 + myh->root.u.def.section->output_offset
8591 + myh->root.u.def.value;
8593 errnode->u.b.veneer->vma = vma;
8596 case VFP11_ERRATUM_ARM_VENEER:
8597 case VFP11_ERRATUM_THUMB_VENEER:
8598 /* Find return location. */
8599 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8602 myh = elf_link_hash_lookup
8603 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8606 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8607 abfd, "VFP11", tmp_name);
8609 vma = myh->root.u.def.section->output_section->vma
8610 + myh->root.u.def.section->output_offset
8611 + myh->root.u.def.value;
8613 errnode->u.v.branch->vma = vma;
8625 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8626 return locations after sections have been laid out, using
8627 specially-named symbols. */
8630 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8631 struct bfd_link_info *link_info)
8634 struct elf32_arm_link_hash_table *globals;
8637 if (bfd_link_relocatable (link_info))
8640 /* Skip if this bfd does not correspond to an ELF image. */
8641 if (! is_arm_elf (abfd))
8644 globals = elf32_arm_hash_table (link_info);
8645 if (globals == NULL)
8648 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8649 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8651 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8653 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8654 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8656 for (; errnode != NULL; errnode = errnode->next)
8658 struct elf_link_hash_entry *myh;
8661 switch (errnode->type)
8663 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8664 /* Find veneer symbol. */
8665 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8666 errnode->u.b.veneer->u.v.id);
8668 myh = elf_link_hash_lookup
8669 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8672 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8673 abfd, "STM32L4XX", tmp_name);
8675 vma = myh->root.u.def.section->output_section->vma
8676 + myh->root.u.def.section->output_offset
8677 + myh->root.u.def.value;
8679 errnode->u.b.veneer->vma = vma;
8682 case STM32L4XX_ERRATUM_VENEER:
8683 /* Find return location. */
8684 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8687 myh = elf_link_hash_lookup
8688 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8691 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8692 abfd, "STM32L4XX", tmp_name);
8694 vma = myh->root.u.def.section->output_section->vma
8695 + myh->root.u.def.section->output_offset
8696 + myh->root.u.def.value;
8698 errnode->u.v.branch->vma = vma;
8710 static inline bfd_boolean
8711 is_thumb2_ldmia (const insn32 insn)
8713 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8714 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8715 return (insn & 0xffd02000) == 0xe8900000;
8718 static inline bfd_boolean
8719 is_thumb2_ldmdb (const insn32 insn)
8721 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8722 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8723 return (insn & 0xffd02000) == 0xe9100000;
8726 static inline bfd_boolean
8727 is_thumb2_vldm (const insn32 insn)
8729 /* A6.5 Extension register load or store instruction
8731 We look for SP 32-bit and DP 64-bit registers.
8732 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8733 <list> is consecutive 64-bit registers
8734 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8735 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8736 <list> is consecutive 32-bit registers
8737 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8738 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8739 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8741 (((insn & 0xfe100f00) == 0xec100b00) ||
8742 ((insn & 0xfe100f00) == 0xec100a00))
8743 && /* (IA without !). */
8744 (((((insn << 7) >> 28) & 0xd) == 0x4)
8745 /* (IA with !), includes VPOP (when reg number is SP). */
8746 || ((((insn << 7) >> 28) & 0xd) == 0x5)
8748 || ((((insn << 7) >> 28) & 0xd) == 0x9));
8751 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8753 - computes the number and the mode of memory accesses
8754 - decides if the replacement should be done:
8755 . replaces only if > 8-word accesses
8756 . or (testing purposes only) replaces all accesses. */
8759 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8760 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8764 /* The field encoding the register list is the same for both LDMIA
8765 and LDMDB encodings. */
8766 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8767 nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8768 else if (is_thumb2_vldm (insn))
8769 nb_words = (insn & 0xff);
8771 /* DEFAULT mode accounts for the real bug condition situation,
8772 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8774 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
8775 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
8778 /* Look for potentially-troublesome code sequences which might trigger
8779 the STM STM32L4XX erratum. */
8782 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8783 struct bfd_link_info *link_info)
8786 bfd_byte *contents = NULL;
8787 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8789 if (globals == NULL)
8792 /* If we are only performing a partial link do not bother
8793 to construct any glue. */
8794 if (bfd_link_relocatable (link_info))
8797 /* Skip if this bfd does not correspond to an ELF image. */
8798 if (! is_arm_elf (abfd))
8801 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8804 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8805 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8808 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8810 unsigned int i, span;
8811 struct _arm_elf_section_data *sec_data;
8813 /* If we don't have executable progbits, we're not interested in this
8814 section. Also skip if section is to be excluded. */
8815 if (elf_section_type (sec) != SHT_PROGBITS
8816 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8817 || (sec->flags & SEC_EXCLUDE) != 0
8818 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8819 || sec->output_section == bfd_abs_section_ptr
8820 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8823 sec_data = elf32_arm_section_data (sec);
8825 if (sec_data->mapcount == 0)
8828 if (elf_section_data (sec)->this_hdr.contents != NULL)
8829 contents = elf_section_data (sec)->this_hdr.contents;
8830 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8833 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8834 elf32_arm_compare_mapping);
8836 for (span = 0; span < sec_data->mapcount; span++)
8838 unsigned int span_start = sec_data->map[span].vma;
8839 unsigned int span_end = (span == sec_data->mapcount - 1)
8840 ? sec->size : sec_data->map[span + 1].vma;
8841 char span_type = sec_data->map[span].type;
8842 int itblock_current_pos = 0;
8844 /* Only Thumb2 mode need be supported with this CM4 specific
8845 code, we should not encounter any arm mode eg span_type
8847 if (span_type != 't')
8850 for (i = span_start; i < span_end;)
8852 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8853 bfd_boolean insn_32bit = FALSE;
8854 bfd_boolean is_ldm = FALSE;
8855 bfd_boolean is_vldm = FALSE;
8856 bfd_boolean is_not_last_in_it_block = FALSE;
8858 /* The first 16-bits of all 32-bit thumb2 instructions start
8859 with opcode[15..13]=0b111 and the encoded op1 can be anything
8860 except opcode[12..11]!=0b00.
8861 See 32-bit Thumb instruction encoding. */
8862 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8865 /* Compute the predicate that tells if the instruction
8866 is concerned by the IT block
8867 - Creates an error if there is a ldm that is not
8868 last in the IT block thus cannot be replaced
8869 - Otherwise we can create a branch at the end of the
8870 IT block, it will be controlled naturally by IT
8871 with the proper pseudo-predicate
8872 - So the only interesting predicate is the one that
8873 tells that we are not on the last item of an IT
8875 if (itblock_current_pos != 0)
8876 is_not_last_in_it_block = !!--itblock_current_pos;
8880 /* Load the rest of the insn (in manual-friendly order). */
8881 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8882 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8883 is_vldm = is_thumb2_vldm (insn);
8885 /* Veneers are created for (v)ldm depending on
8886 option flags and memory accesses conditions; but
8887 if the instruction is not the last instruction of
8888 an IT block, we cannot create a jump there, so we
8890 if ((is_ldm || is_vldm)
8891 && stm32l4xx_need_create_replacing_stub
8892 (insn, globals->stm32l4xx_fix))
8894 if (is_not_last_in_it_block)
8897 /* xgettext:c-format */
8898 (_("%pB(%pA+%#x): error: multiple load detected"
8899 " in non-last IT block instruction:"
8900 " STM32L4XX veneer cannot be generated; "
8901 "use gcc option -mrestrict-it to generate"
8902 " only one instruction per IT block"),
8907 elf32_stm32l4xx_erratum_list *newerr =
8908 (elf32_stm32l4xx_erratum_list *)
8910 (sizeof (elf32_stm32l4xx_erratum_list));
8912 elf32_arm_section_data (sec)
8913 ->stm32l4xx_erratumcount += 1;
8914 newerr->u.b.insn = insn;
8915 /* We create only thumb branches. */
8917 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
8918 record_stm32l4xx_erratum_veneer
8919 (link_info, newerr, abfd, sec,
8922 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
8923 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
8925 newerr->next = sec_data->stm32l4xx_erratumlist;
8926 sec_data->stm32l4xx_erratumlist = newerr;
8933 IT blocks are only encoded in T1
8934 Encoding T1: IT{x{y{z}}} <firstcond>
8935 1 0 1 1 - 1 1 1 1 - firstcond - mask
8936 if mask = '0000' then see 'related encodings'
8937 We don't deal with UNPREDICTABLE, just ignore these.
8938 There can be no nested IT blocks so an IT block
8939 is naturally a new one for which it is worth
8940 computing its size. */
8941 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00)
8942 && ((insn & 0x000f) != 0x0000);
8943 /* If we have a new IT block we compute its size. */
8946 /* Compute the number of instructions controlled
8947 by the IT block, it will be used to decide
8948 whether we are inside an IT block or not. */
8949 unsigned int mask = insn & 0x000f;
8950 itblock_current_pos = 4 - ctz (mask);
8954 i += insn_32bit ? 4 : 2;
8958 if (contents != NULL
8959 && elf_section_data (sec)->this_hdr.contents != contents)
8967 if (contents != NULL
8968 && elf_section_data (sec)->this_hdr.contents != contents)
8974 /* Set target relocation values needed during linking. */
8977 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
8978 struct bfd_link_info *link_info,
8979 struct elf32_arm_params *params)
8981 struct elf32_arm_link_hash_table *globals;
8983 globals = elf32_arm_hash_table (link_info);
8984 if (globals == NULL)
8987 globals->target1_is_rel = params->target1_is_rel;
8988 if (globals->fdpic_p)
8989 globals->target2_reloc = R_ARM_GOT32;
8990 else if (strcmp (params->target2_type, "rel") == 0)
8991 globals->target2_reloc = R_ARM_REL32;
8992 else if (strcmp (params->target2_type, "abs") == 0)
8993 globals->target2_reloc = R_ARM_ABS32;
8994 else if (strcmp (params->target2_type, "got-rel") == 0)
8995 globals->target2_reloc = R_ARM_GOT_PREL;
8998 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
8999 params->target2_type);
9001 globals->fix_v4bx = params->fix_v4bx;
9002 globals->use_blx |= params->use_blx;
9003 globals->vfp11_fix = params->vfp11_denorm_fix;
9004 globals->stm32l4xx_fix = params->stm32l4xx_fix;
9005 if (globals->fdpic_p)
9006 globals->pic_veneer = 1;
9008 globals->pic_veneer = params->pic_veneer;
9009 globals->fix_cortex_a8 = params->fix_cortex_a8;
9010 globals->fix_arm1176 = params->fix_arm1176;
9011 globals->cmse_implib = params->cmse_implib;
9012 globals->in_implib_bfd = params->in_implib_bfd;
9014 BFD_ASSERT (is_arm_elf (output_bfd));
9015 elf_arm_tdata (output_bfd)->no_enum_size_warning
9016 = params->no_enum_size_warning;
9017 elf_arm_tdata (output_bfd)->no_wchar_size_warning
9018 = params->no_wchar_size_warning;
9021 /* Replace the target offset of a Thumb bl or b.w instruction. */
9024 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9030 BFD_ASSERT ((offset & 1) == 0);
9032 upper = bfd_get_16 (abfd, insn);
9033 lower = bfd_get_16 (abfd, insn + 2);
9034 reloc_sign = (offset < 0) ? 1 : 0;
9035 upper = (upper & ~(bfd_vma) 0x7ff)
9036 | ((offset >> 12) & 0x3ff)
9037 | (reloc_sign << 10);
9038 lower = (lower & ~(bfd_vma) 0x2fff)
9039 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9040 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9041 | ((offset >> 1) & 0x7ff);
9042 bfd_put_16 (abfd, upper, insn);
9043 bfd_put_16 (abfd, lower, insn + 2);
9046 /* Thumb code calling an ARM function. */
9049 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9053 asection * input_section,
9054 bfd_byte * hit_data,
9057 bfd_signed_vma addend,
9059 char **error_message)
9063 long int ret_offset;
9064 struct elf_link_hash_entry * myh;
9065 struct elf32_arm_link_hash_table * globals;
9067 myh = find_thumb_glue (info, name, error_message);
9071 globals = elf32_arm_hash_table (info);
9072 BFD_ASSERT (globals != NULL);
9073 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9075 my_offset = myh->root.u.def.value;
9077 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9078 THUMB2ARM_GLUE_SECTION_NAME);
9080 BFD_ASSERT (s != NULL);
9081 BFD_ASSERT (s->contents != NULL);
9082 BFD_ASSERT (s->output_section != NULL);
9084 if ((my_offset & 0x01) == 0x01)
9087 && sym_sec->owner != NULL
9088 && !INTERWORK_FLAG (sym_sec->owner))
9091 (_("%pB(%s): warning: interworking not enabled;"
9092 " first occurrence: %pB: %s call to %s"),
9093 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9099 myh->root.u.def.value = my_offset;
9101 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9102 s->contents + my_offset);
9104 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9105 s->contents + my_offset + 2);
9108 /* Address of destination of the stub. */
9109 ((bfd_signed_vma) val)
9111 /* Offset from the start of the current section
9112 to the start of the stubs. */
9114 /* Offset of the start of this stub from the start of the stubs. */
9116 /* Address of the start of the current section. */
9117 + s->output_section->vma)
9118 /* The branch instruction is 4 bytes into the stub. */
9120 /* ARM branches work from the pc of the instruction + 8. */
9123 put_arm_insn (globals, output_bfd,
9124 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9125 s->contents + my_offset + 4);
9128 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9130 /* Now go back and fix up the original BL insn to point to here. */
9132 /* Address of where the stub is located. */
9133 (s->output_section->vma + s->output_offset + my_offset)
9134 /* Address of where the BL is located. */
9135 - (input_section->output_section->vma + input_section->output_offset
9137 /* Addend in the relocation. */
9139 /* Biassing for PC-relative addressing. */
9142 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9147 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9149 static struct elf_link_hash_entry *
9150 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9157 char ** error_message)
9160 long int ret_offset;
9161 struct elf_link_hash_entry * myh;
9162 struct elf32_arm_link_hash_table * globals;
9164 myh = find_arm_glue (info, name, error_message);
9168 globals = elf32_arm_hash_table (info);
9169 BFD_ASSERT (globals != NULL);
9170 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9172 my_offset = myh->root.u.def.value;
9174 if ((my_offset & 0x01) == 0x01)
9177 && sym_sec->owner != NULL
9178 && !INTERWORK_FLAG (sym_sec->owner))
9181 (_("%pB(%s): warning: interworking not enabled;"
9182 " first occurrence: %pB: %s call to %s"),
9183 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9187 myh->root.u.def.value = my_offset;
9189 if (bfd_link_pic (info)
9190 || globals->root.is_relocatable_executable
9191 || globals->pic_veneer)
9193 /* For relocatable objects we can't use absolute addresses,
9194 so construct the address from a relative offset. */
9195 /* TODO: If the offset is small it's probably worth
9196 constructing the address with adds. */
9197 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9198 s->contents + my_offset);
9199 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9200 s->contents + my_offset + 4);
9201 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9202 s->contents + my_offset + 8);
9203 /* Adjust the offset by 4 for the position of the add,
9204 and 8 for the pipeline offset. */
9205 ret_offset = (val - (s->output_offset
9206 + s->output_section->vma
9209 bfd_put_32 (output_bfd, ret_offset,
9210 s->contents + my_offset + 12);
9212 else if (globals->use_blx)
9214 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9215 s->contents + my_offset);
9217 /* It's a thumb address. Add the low order bit. */
9218 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9219 s->contents + my_offset + 4);
9223 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9224 s->contents + my_offset);
9226 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9227 s->contents + my_offset + 4);
9229 /* It's a thumb address. Add the low order bit. */
9230 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9231 s->contents + my_offset + 8);
9237 BFD_ASSERT (my_offset <= globals->arm_glue_size);
9242 /* Arm code calling a Thumb function. */
9245 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9249 asection * input_section,
9250 bfd_byte * hit_data,
9253 bfd_signed_vma addend,
9255 char **error_message)
9257 unsigned long int tmp;
9260 long int ret_offset;
9261 struct elf_link_hash_entry * myh;
9262 struct elf32_arm_link_hash_table * globals;
9264 globals = elf32_arm_hash_table (info);
9265 BFD_ASSERT (globals != NULL);
9266 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9268 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9269 ARM2THUMB_GLUE_SECTION_NAME);
9270 BFD_ASSERT (s != NULL);
9271 BFD_ASSERT (s->contents != NULL);
9272 BFD_ASSERT (s->output_section != NULL);
9274 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9275 sym_sec, val, s, error_message);
9279 my_offset = myh->root.u.def.value;
9280 tmp = bfd_get_32 (input_bfd, hit_data);
9281 tmp = tmp & 0xFF000000;
9283 /* Somehow these are both 4 too far, so subtract 8. */
9284 ret_offset = (s->output_offset
9286 + s->output_section->vma
9287 - (input_section->output_offset
9288 + input_section->output_section->vma
9292 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9294 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9299 /* Populate Arm stub for an exported Thumb function. */
9302 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9304 struct bfd_link_info * info = (struct bfd_link_info *) inf;
9306 struct elf_link_hash_entry * myh;
9307 struct elf32_arm_link_hash_entry *eh;
9308 struct elf32_arm_link_hash_table * globals;
9311 char *error_message;
9313 eh = elf32_arm_hash_entry (h);
9314 /* Allocate stubs for exported Thumb functions on v4t. */
9315 if (eh->export_glue == NULL)
9318 globals = elf32_arm_hash_table (info);
9319 BFD_ASSERT (globals != NULL);
9320 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9322 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9323 ARM2THUMB_GLUE_SECTION_NAME);
9324 BFD_ASSERT (s != NULL);
9325 BFD_ASSERT (s->contents != NULL);
9326 BFD_ASSERT (s->output_section != NULL);
9328 sec = eh->export_glue->root.u.def.section;
9330 BFD_ASSERT (sec->output_section != NULL);
9332 val = eh->export_glue->root.u.def.value + sec->output_offset
9333 + sec->output_section->vma;
9335 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9336 h->root.u.def.section->owner,
9337 globals->obfd, sec, val, s,
9343 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9346 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9351 struct elf32_arm_link_hash_table *globals;
9353 globals = elf32_arm_hash_table (info);
9354 BFD_ASSERT (globals != NULL);
9355 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9357 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9358 ARM_BX_GLUE_SECTION_NAME);
9359 BFD_ASSERT (s != NULL);
9360 BFD_ASSERT (s->contents != NULL);
9361 BFD_ASSERT (s->output_section != NULL);
9363 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9365 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9367 if ((globals->bx_glue_offset[reg] & 1) == 0)
9369 p = s->contents + glue_addr;
9370 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9371 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9372 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9373 globals->bx_glue_offset[reg] |= 1;
9376 return glue_addr + s->output_section->vma + s->output_offset;
9379 /* Generate Arm stubs for exported Thumb symbols. */
9381 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9382 struct bfd_link_info *link_info)
9384 struct elf32_arm_link_hash_table * globals;
9386 if (link_info == NULL)
9387 /* Ignore this if we are not called by the ELF backend linker. */
9390 globals = elf32_arm_hash_table (link_info);
9391 if (globals == NULL)
9394 /* If blx is available then exported Thumb symbols are OK and there is
9396 if (globals->use_blx)
9399 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9403 /* Reserve space for COUNT dynamic relocations in relocation selection
9407 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9408 bfd_size_type count)
9410 struct elf32_arm_link_hash_table *htab;
9412 htab = elf32_arm_hash_table (info);
9413 BFD_ASSERT (htab->root.dynamic_sections_created);
9416 sreloc->size += RELOC_SIZE (htab) * count;
9419 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9420 dynamic, the relocations should go in SRELOC, otherwise they should
9421 go in the special .rel.iplt section. */
9424 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9425 bfd_size_type count)
9427 struct elf32_arm_link_hash_table *htab;
9429 htab = elf32_arm_hash_table (info);
9430 if (!htab->root.dynamic_sections_created)
9431 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9434 BFD_ASSERT (sreloc != NULL);
9435 sreloc->size += RELOC_SIZE (htab) * count;
9439 /* Add relocation REL to the end of relocation section SRELOC. */
9442 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9443 asection *sreloc, Elf_Internal_Rela *rel)
9446 struct elf32_arm_link_hash_table *htab;
9448 htab = elf32_arm_hash_table (info);
9449 if (!htab->root.dynamic_sections_created
9450 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9451 sreloc = htab->root.irelplt;
9454 loc = sreloc->contents;
9455 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9456 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9458 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9461 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9462 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9466 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9467 bfd_boolean is_iplt_entry,
9468 union gotplt_union *root_plt,
9469 struct arm_plt_info *arm_plt)
9471 struct elf32_arm_link_hash_table *htab;
9475 htab = elf32_arm_hash_table (info);
9479 splt = htab->root.iplt;
9480 sgotplt = htab->root.igotplt;
9482 /* NaCl uses a special first entry in .iplt too. */
9483 if (htab->nacl_p && splt->size == 0)
9484 splt->size += htab->plt_header_size;
9486 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9487 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9491 splt = htab->root.splt;
9492 sgotplt = htab->root.sgotplt;
9496 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9497 /* For lazy binding, relocations will be put into .rel.plt, in
9498 .rel.got otherwise. */
9499 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9500 if (info->flags & DF_BIND_NOW)
9501 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9503 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9507 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9508 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9511 /* If this is the first .plt entry, make room for the special
9513 if (splt->size == 0)
9514 splt->size += htab->plt_header_size;
9516 htab->next_tls_desc_index++;
9519 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9520 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9521 splt->size += PLT_THUMB_STUB_SIZE;
9522 root_plt->offset = splt->size;
9523 splt->size += htab->plt_entry_size;
9525 if (!htab->symbian_p)
9527 /* We also need to make an entry in the .got.plt section, which
9528 will be placed in the .got section by the linker script. */
9530 arm_plt->got_offset = sgotplt->size;
9532 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9534 /* Function descriptor takes 64 bits in GOT. */
9542 arm_movw_immediate (bfd_vma value)
9544 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9548 arm_movt_immediate (bfd_vma value)
9550 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9553 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9554 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9555 Otherwise, DYNINDX is the index of the symbol in the dynamic
9556 symbol table and SYM_VALUE is undefined.
9558 ROOT_PLT points to the offset of the PLT entry from the start of its
9559 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9560 bookkeeping information.
9562 Returns FALSE if there was a problem. */
9565 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9566 union gotplt_union *root_plt,
9567 struct arm_plt_info *arm_plt,
9568 int dynindx, bfd_vma sym_value)
9570 struct elf32_arm_link_hash_table *htab;
9576 Elf_Internal_Rela rel;
9577 bfd_vma plt_header_size;
9578 bfd_vma got_header_size;
9580 htab = elf32_arm_hash_table (info);
9582 /* Pick the appropriate sections and sizes. */
9585 splt = htab->root.iplt;
9586 sgot = htab->root.igotplt;
9587 srel = htab->root.irelplt;
9589 /* There are no reserved entries in .igot.plt, and no special
9590 first entry in .iplt. */
9591 got_header_size = 0;
9592 plt_header_size = 0;
9596 splt = htab->root.splt;
9597 sgot = htab->root.sgotplt;
9598 srel = htab->root.srelplt;
9600 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9601 plt_header_size = htab->plt_header_size;
9603 BFD_ASSERT (splt != NULL && srel != NULL);
9605 /* Fill in the entry in the procedure linkage table. */
9606 if (htab->symbian_p)
9608 BFD_ASSERT (dynindx >= 0);
9609 put_arm_insn (htab, output_bfd,
9610 elf32_arm_symbian_plt_entry[0],
9611 splt->contents + root_plt->offset);
9612 bfd_put_32 (output_bfd,
9613 elf32_arm_symbian_plt_entry[1],
9614 splt->contents + root_plt->offset + 4);
9616 /* Fill in the entry in the .rel.plt section. */
9617 rel.r_offset = (splt->output_section->vma
9618 + splt->output_offset
9619 + root_plt->offset + 4);
9620 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
9622 /* Get the index in the procedure linkage table which
9623 corresponds to this symbol. This is the index of this symbol
9624 in all the symbols for which we are making plt entries. The
9625 first entry in the procedure linkage table is reserved. */
9626 plt_index = ((root_plt->offset - plt_header_size)
9627 / htab->plt_entry_size);
9631 bfd_vma got_offset, got_address, plt_address;
9632 bfd_vma got_displacement, initial_got_entry;
9635 BFD_ASSERT (sgot != NULL);
9637 /* Get the offset into the .(i)got.plt table of the entry that
9638 corresponds to this function. */
9639 got_offset = (arm_plt->got_offset & -2);
9641 /* Get the index in the procedure linkage table which
9642 corresponds to this symbol. This is the index of this symbol
9643 in all the symbols for which we are making plt entries.
9644 After the reserved .got.plt entries, all symbols appear in
9645 the same order as in .plt. */
9647 /* Function descriptor takes 8 bytes. */
9648 plt_index = (got_offset - got_header_size) / 8;
9650 plt_index = (got_offset - got_header_size) / 4;
9652 /* Calculate the address of the GOT entry. */
9653 got_address = (sgot->output_section->vma
9654 + sgot->output_offset
9657 /* ...and the address of the PLT entry. */
9658 plt_address = (splt->output_section->vma
9659 + splt->output_offset
9660 + root_plt->offset);
9662 ptr = splt->contents + root_plt->offset;
9663 if (htab->vxworks_p && bfd_link_pic (info))
9668 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9670 val = elf32_arm_vxworks_shared_plt_entry[i];
9672 val |= got_address - sgot->output_section->vma;
9674 val |= plt_index * RELOC_SIZE (htab);
9675 if (i == 2 || i == 5)
9676 bfd_put_32 (output_bfd, val, ptr);
9678 put_arm_insn (htab, output_bfd, val, ptr);
9681 else if (htab->vxworks_p)
9686 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9688 val = elf32_arm_vxworks_exec_plt_entry[i];
9692 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9694 val |= plt_index * RELOC_SIZE (htab);
9695 if (i == 2 || i == 5)
9696 bfd_put_32 (output_bfd, val, ptr);
9698 put_arm_insn (htab, output_bfd, val, ptr);
9701 loc = (htab->srelplt2->contents
9702 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9704 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9705 referencing the GOT for this PLT entry. */
9706 rel.r_offset = plt_address + 8;
9707 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9708 rel.r_addend = got_offset;
9709 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9710 loc += RELOC_SIZE (htab);
9712 /* Create the R_ARM_ABS32 relocation referencing the
9713 beginning of the PLT for this GOT entry. */
9714 rel.r_offset = got_address;
9715 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9717 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9719 else if (htab->nacl_p)
9721 /* Calculate the displacement between the PLT slot and the
9722 common tail that's part of the special initial PLT slot. */
9723 int32_t tail_displacement
9724 = ((splt->output_section->vma + splt->output_offset
9725 + ARM_NACL_PLT_TAIL_OFFSET)
9726 - (plt_address + htab->plt_entry_size + 4));
9727 BFD_ASSERT ((tail_displacement & 3) == 0);
9728 tail_displacement >>= 2;
9730 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9731 || (-tail_displacement & 0xff000000) == 0);
9733 /* Calculate the displacement between the PLT slot and the entry
9734 in the GOT. The offset accounts for the value produced by
9735 adding to pc in the penultimate instruction of the PLT stub. */
9736 got_displacement = (got_address
9737 - (plt_address + htab->plt_entry_size));
9739 /* NaCl does not support interworking at all. */
9740 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9742 put_arm_insn (htab, output_bfd,
9743 elf32_arm_nacl_plt_entry[0]
9744 | arm_movw_immediate (got_displacement),
9746 put_arm_insn (htab, output_bfd,
9747 elf32_arm_nacl_plt_entry[1]
9748 | arm_movt_immediate (got_displacement),
9750 put_arm_insn (htab, output_bfd,
9751 elf32_arm_nacl_plt_entry[2],
9753 put_arm_insn (htab, output_bfd,
9754 elf32_arm_nacl_plt_entry[3]
9755 | (tail_displacement & 0x00ffffff),
9758 else if (htab->fdpic_p)
9760 const bfd_vma *plt_entry = using_thumb_only(htab)
9761 ? elf32_arm_fdpic_thumb_plt_entry
9762 : elf32_arm_fdpic_plt_entry;
9764 /* Fill-up Thumb stub if needed. */
9765 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9767 put_thumb_insn (htab, output_bfd,
9768 elf32_arm_plt_thumb_stub[0], ptr - 4);
9769 put_thumb_insn (htab, output_bfd,
9770 elf32_arm_plt_thumb_stub[1], ptr - 2);
9772 /* As we are using 32 bit instructions even for the Thumb
9773 version, we have to use 'put_arm_insn' instead of
9774 'put_thumb_insn'. */
9775 put_arm_insn(htab, output_bfd, plt_entry[0], ptr + 0);
9776 put_arm_insn(htab, output_bfd, plt_entry[1], ptr + 4);
9777 put_arm_insn(htab, output_bfd, plt_entry[2], ptr + 8);
9778 put_arm_insn(htab, output_bfd, plt_entry[3], ptr + 12);
9779 bfd_put_32 (output_bfd, got_offset, ptr + 16);
9781 if (!(info->flags & DF_BIND_NOW))
9783 /* funcdesc_value_reloc_offset. */
9784 bfd_put_32 (output_bfd,
9785 htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9787 put_arm_insn(htab, output_bfd, plt_entry[6], ptr + 24);
9788 put_arm_insn(htab, output_bfd, plt_entry[7], ptr + 28);
9789 put_arm_insn(htab, output_bfd, plt_entry[8], ptr + 32);
9790 put_arm_insn(htab, output_bfd, plt_entry[9], ptr + 36);
9793 else if (using_thumb_only (htab))
9795 /* PR ld/16017: Generate thumb only PLT entries. */
9796 if (!using_thumb2 (htab))
9798 /* FIXME: We ought to be able to generate thumb-1 PLT
9800 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9805 /* Calculate the displacement between the PLT slot and the entry in
9806 the GOT. The 12-byte offset accounts for the value produced by
9807 adding to pc in the 3rd instruction of the PLT stub. */
9808 got_displacement = got_address - (plt_address + 12);
9810 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9811 instead of 'put_thumb_insn'. */
9812 put_arm_insn (htab, output_bfd,
9813 elf32_thumb2_plt_entry[0]
9814 | ((got_displacement & 0x000000ff) << 16)
9815 | ((got_displacement & 0x00000700) << 20)
9816 | ((got_displacement & 0x00000800) >> 1)
9817 | ((got_displacement & 0x0000f000) >> 12),
9819 put_arm_insn (htab, output_bfd,
9820 elf32_thumb2_plt_entry[1]
9821 | ((got_displacement & 0x00ff0000) )
9822 | ((got_displacement & 0x07000000) << 4)
9823 | ((got_displacement & 0x08000000) >> 17)
9824 | ((got_displacement & 0xf0000000) >> 28),
9826 put_arm_insn (htab, output_bfd,
9827 elf32_thumb2_plt_entry[2],
9829 put_arm_insn (htab, output_bfd,
9830 elf32_thumb2_plt_entry[3],
9835 /* Calculate the displacement between the PLT slot and the
9836 entry in the GOT. The eight-byte offset accounts for the
9837 value produced by adding to pc in the first instruction
9839 got_displacement = got_address - (plt_address + 8);
9841 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9843 put_thumb_insn (htab, output_bfd,
9844 elf32_arm_plt_thumb_stub[0], ptr - 4);
9845 put_thumb_insn (htab, output_bfd,
9846 elf32_arm_plt_thumb_stub[1], ptr - 2);
9849 if (!elf32_arm_use_long_plt_entry)
9851 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9853 put_arm_insn (htab, output_bfd,
9854 elf32_arm_plt_entry_short[0]
9855 | ((got_displacement & 0x0ff00000) >> 20),
9857 put_arm_insn (htab, output_bfd,
9858 elf32_arm_plt_entry_short[1]
9859 | ((got_displacement & 0x000ff000) >> 12),
9861 put_arm_insn (htab, output_bfd,
9862 elf32_arm_plt_entry_short[2]
9863 | (got_displacement & 0x00000fff),
9865 #ifdef FOUR_WORD_PLT
9866 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9871 put_arm_insn (htab, output_bfd,
9872 elf32_arm_plt_entry_long[0]
9873 | ((got_displacement & 0xf0000000) >> 28),
9875 put_arm_insn (htab, output_bfd,
9876 elf32_arm_plt_entry_long[1]
9877 | ((got_displacement & 0x0ff00000) >> 20),
9879 put_arm_insn (htab, output_bfd,
9880 elf32_arm_plt_entry_long[2]
9881 | ((got_displacement & 0x000ff000) >> 12),
9883 put_arm_insn (htab, output_bfd,
9884 elf32_arm_plt_entry_long[3]
9885 | (got_displacement & 0x00000fff),
9890 /* Fill in the entry in the .rel(a).(i)plt section. */
9891 rel.r_offset = got_address;
9895 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9896 The dynamic linker or static executable then calls SYM_VALUE
9897 to determine the correct run-time value of the .igot.plt entry. */
9898 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9899 initial_got_entry = sym_value;
9903 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9904 used by PLT entry. */
9907 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9908 initial_got_entry = 0;
9912 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9913 initial_got_entry = (splt->output_section->vma
9914 + splt->output_offset);
9918 /* Fill in the entry in the global offset table. */
9919 bfd_put_32 (output_bfd, initial_got_entry,
9920 sgot->contents + got_offset);
9922 if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
9924 /* Setup initial funcdesc value. */
9925 /* FIXME: we don't support lazy binding because there is a
9926 race condition between both words getting written and
9927 some other thread attempting to read them. The ARM
9928 architecture does not have an atomic 64 bit load/store
9929 instruction that could be used to prevent it; it is
9930 recommended that threaded FDPIC applications run with the
9931 LD_BIND_NOW environment variable set. */
9932 bfd_put_32(output_bfd, plt_address + 0x18,
9933 sgot->contents + got_offset);
9934 bfd_put_32(output_bfd, -1 /*TODO*/,
9935 sgot->contents + got_offset + 4);
9940 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
9945 /* For FDPIC we put PLT relocationss into .rel.got when not
9946 lazy binding otherwise we put them in .rel.plt. For now,
9947 we don't support lazy binding so put it in .rel.got. */
9948 if (info->flags & DF_BIND_NOW)
9949 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelgot, &rel);
9951 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelplt, &rel);
9955 loc = srel->contents + plt_index * RELOC_SIZE (htab);
9956 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9963 /* Some relocations map to different relocations depending on the
9964 target. Return the real relocation. */
9967 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
9973 if (globals->target1_is_rel)
9979 return globals->target2_reloc;
9986 /* Return the base VMA address which should be subtracted from real addresses
9987 when resolving @dtpoff relocation.
9988 This is PT_TLS segment p_vaddr. */
9991 dtpoff_base (struct bfd_link_info *info)
9993 /* If tls_sec is NULL, we should have signalled an error already. */
9994 if (elf_hash_table (info)->tls_sec == NULL)
9996 return elf_hash_table (info)->tls_sec->vma;
9999 /* Return the relocation value for @tpoff relocation
10000 if STT_TLS virtual address is ADDRESS. */
10003 tpoff (struct bfd_link_info *info, bfd_vma address)
10005 struct elf_link_hash_table *htab = elf_hash_table (info);
10008 /* If tls_sec is NULL, we should have signalled an error already. */
10009 if (htab->tls_sec == NULL)
10011 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10012 return address - htab->tls_sec->vma + base;
10015 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10016 VALUE is the relocation value. */
10018 static bfd_reloc_status_type
10019 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10022 return bfd_reloc_overflow;
10024 value |= bfd_get_32 (abfd, data) & 0xfffff000;
10025 bfd_put_32 (abfd, value, data);
10026 return bfd_reloc_ok;
10029 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10030 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10031 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10033 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10034 is to then call final_link_relocate. Return other values in the
10037 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10038 the pre-relaxed code. It would be nice if the relocs were updated
10039 to match the optimization. */
10041 static bfd_reloc_status_type
10042 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10043 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10044 Elf_Internal_Rela *rel, unsigned long is_local)
10046 unsigned long insn;
10048 switch (ELF32_R_TYPE (rel->r_info))
10051 return bfd_reloc_notsupported;
10053 case R_ARM_TLS_GOTDESC:
10058 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10060 insn -= 5; /* THUMB */
10062 insn -= 8; /* ARM */
10064 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10065 return bfd_reloc_continue;
10067 case R_ARM_THM_TLS_DESCSEQ:
10069 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10070 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
10074 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10076 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10080 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10083 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10085 else if ((insn & 0xff87) == 0x4780) /* blx rx */
10089 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10092 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10093 contents + rel->r_offset);
10097 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10098 /* It's a 32 bit instruction, fetch the rest of it for
10099 error generation. */
10100 insn = (insn << 16)
10101 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10103 /* xgettext:c-format */
10104 (_("%pB(%pA+%#" PRIx64 "): "
10105 "unexpected %s instruction '%#lx' in TLS trampoline"),
10106 input_bfd, input_sec, (uint64_t) rel->r_offset,
10108 return bfd_reloc_notsupported;
10112 case R_ARM_TLS_DESCSEQ:
10114 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10115 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10119 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10120 contents + rel->r_offset);
10122 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10126 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10129 bfd_put_32 (input_bfd, insn & 0xfffff000,
10130 contents + rel->r_offset);
10132 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10136 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10139 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10140 contents + rel->r_offset);
10145 /* xgettext:c-format */
10146 (_("%pB(%pA+%#" PRIx64 "): "
10147 "unexpected %s instruction '%#lx' in TLS trampoline"),
10148 input_bfd, input_sec, (uint64_t) rel->r_offset,
10150 return bfd_reloc_notsupported;
10154 case R_ARM_TLS_CALL:
10155 /* GD->IE relaxation, turn the instruction into 'nop' or
10156 'ldr r0, [pc,r0]' */
10157 insn = is_local ? 0xe1a00000 : 0xe79f0000;
10158 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10161 case R_ARM_THM_TLS_CALL:
10162 /* GD->IE relaxation. */
10164 /* add r0,pc; ldr r0, [r0] */
10166 else if (using_thumb2 (globals))
10173 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10174 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10177 return bfd_reloc_ok;
10180 /* For a given value of n, calculate the value of G_n as required to
10181 deal with group relocations. We return it in the form of an
10182 encoded constant-and-rotation, together with the final residual. If n is
10183 specified as less than zero, then final_residual is filled with the
10184 input value and no further action is performed. */
10187 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10191 bfd_vma encoded_g_n = 0;
10192 bfd_vma residual = value; /* Also known as Y_n. */
10194 for (current_n = 0; current_n <= n; current_n++)
10198 /* Calculate which part of the value to mask. */
10205 /* Determine the most significant bit in the residual and
10206 align the resulting value to a 2-bit boundary. */
10207 for (msb = 30; msb >= 0; msb -= 2)
10208 if (residual & (3 << msb))
10211 /* The desired shift is now (msb - 6), or zero, whichever
10218 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10219 g_n = residual & (0xff << shift);
10220 encoded_g_n = (g_n >> shift)
10221 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10223 /* Calculate the residual for the next time around. */
10227 *final_residual = residual;
10229 return encoded_g_n;
10232 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10233 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10236 identify_add_or_sub (bfd_vma insn)
10238 int opcode = insn & 0x1e00000;
10240 if (opcode == 1 << 23) /* ADD */
10243 if (opcode == 1 << 22) /* SUB */
10249 /* Perform a relocation as part of a final link. */
10251 static bfd_reloc_status_type
10252 elf32_arm_final_link_relocate (reloc_howto_type * howto,
10255 asection * input_section,
10256 bfd_byte * contents,
10257 Elf_Internal_Rela * rel,
10259 struct bfd_link_info * info,
10260 asection * sym_sec,
10261 const char * sym_name,
10262 unsigned char st_type,
10263 enum arm_st_branch_type branch_type,
10264 struct elf_link_hash_entry * h,
10265 bfd_boolean * unresolved_reloc_p,
10266 char ** error_message)
10268 unsigned long r_type = howto->type;
10269 unsigned long r_symndx;
10270 bfd_byte * hit_data = contents + rel->r_offset;
10271 bfd_vma * local_got_offsets;
10272 bfd_vma * local_tlsdesc_gotents;
10275 asection * sreloc = NULL;
10276 asection * srelgot;
10278 bfd_signed_vma signed_addend;
10279 unsigned char dynreloc_st_type;
10280 bfd_vma dynreloc_value;
10281 struct elf32_arm_link_hash_table * globals;
10282 struct elf32_arm_link_hash_entry *eh;
10283 union gotplt_union *root_plt;
10284 struct arm_plt_info *arm_plt;
10285 bfd_vma plt_offset;
10286 bfd_vma gotplt_offset;
10287 bfd_boolean has_iplt_entry;
10288 bfd_boolean resolved_to_zero;
10290 globals = elf32_arm_hash_table (info);
10291 if (globals == NULL)
10292 return bfd_reloc_notsupported;
10294 BFD_ASSERT (is_arm_elf (input_bfd));
10295 BFD_ASSERT (howto != NULL);
10297 /* Some relocation types map to different relocations depending on the
10298 target. We pick the right one here. */
10299 r_type = arm_real_reloc_type (globals, r_type);
10301 /* It is possible to have linker relaxations on some TLS access
10302 models. Update our information here. */
10303 r_type = elf32_arm_tls_transition (info, r_type, h);
10305 if (r_type != howto->type)
10306 howto = elf32_arm_howto_from_type (r_type);
10308 eh = (struct elf32_arm_link_hash_entry *) h;
10309 sgot = globals->root.sgot;
10310 local_got_offsets = elf_local_got_offsets (input_bfd);
10311 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10313 if (globals->root.dynamic_sections_created)
10314 srelgot = globals->root.srelgot;
10318 r_symndx = ELF32_R_SYM (rel->r_info);
10320 if (globals->use_rel)
10322 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
10324 if (addend & ((howto->src_mask + 1) >> 1))
10326 signed_addend = -1;
10327 signed_addend &= ~ howto->src_mask;
10328 signed_addend |= addend;
10331 signed_addend = addend;
10334 addend = signed_addend = rel->r_addend;
10336 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10337 are resolving a function call relocation. */
10338 if (using_thumb_only (globals)
10339 && (r_type == R_ARM_THM_CALL
10340 || r_type == R_ARM_THM_JUMP24)
10341 && branch_type == ST_BRANCH_TO_ARM)
10342 branch_type = ST_BRANCH_TO_THUMB;
10344 /* Record the symbol information that should be used in dynamic
10346 dynreloc_st_type = st_type;
10347 dynreloc_value = value;
10348 if (branch_type == ST_BRANCH_TO_THUMB)
10349 dynreloc_value |= 1;
10351 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10352 VALUE appropriately for relocations that we resolve at link time. */
10353 has_iplt_entry = FALSE;
10354 if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10356 && root_plt->offset != (bfd_vma) -1)
10358 plt_offset = root_plt->offset;
10359 gotplt_offset = arm_plt->got_offset;
10361 if (h == NULL || eh->is_iplt)
10363 has_iplt_entry = TRUE;
10364 splt = globals->root.iplt;
10366 /* Populate .iplt entries here, because not all of them will
10367 be seen by finish_dynamic_symbol. The lower bit is set if
10368 we have already populated the entry. */
10369 if (plt_offset & 1)
10373 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10374 -1, dynreloc_value))
10375 root_plt->offset |= 1;
10377 return bfd_reloc_notsupported;
10380 /* Static relocations always resolve to the .iplt entry. */
10381 st_type = STT_FUNC;
10382 value = (splt->output_section->vma
10383 + splt->output_offset
10385 branch_type = ST_BRANCH_TO_ARM;
10387 /* If there are non-call relocations that resolve to the .iplt
10388 entry, then all dynamic ones must too. */
10389 if (arm_plt->noncall_refcount != 0)
10391 dynreloc_st_type = st_type;
10392 dynreloc_value = value;
10396 /* We populate the .plt entry in finish_dynamic_symbol. */
10397 splt = globals->root.splt;
10402 plt_offset = (bfd_vma) -1;
10403 gotplt_offset = (bfd_vma) -1;
10406 resolved_to_zero = (h != NULL
10407 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10412 /* We don't need to find a value for this symbol. It's just a
10414 *unresolved_reloc_p = FALSE;
10415 return bfd_reloc_ok;
10418 if (!globals->vxworks_p)
10419 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10420 /* Fall through. */
10424 case R_ARM_ABS32_NOI:
10426 case R_ARM_REL32_NOI:
10432 /* Handle relocations which should use the PLT entry. ABS32/REL32
10433 will use the symbol's value, which may point to a PLT entry, but we
10434 don't need to handle that here. If we created a PLT entry, all
10435 branches in this object should go to it, except if the PLT is too
10436 far away, in which case a long branch stub should be inserted. */
10437 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10438 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10439 && r_type != R_ARM_CALL
10440 && r_type != R_ARM_JUMP24
10441 && r_type != R_ARM_PLT32)
10442 && plt_offset != (bfd_vma) -1)
10444 /* If we've created a .plt section, and assigned a PLT entry
10445 to this function, it must either be a STT_GNU_IFUNC reference
10446 or not be known to bind locally. In other cases, we should
10447 have cleared the PLT entry by now. */
10448 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10450 value = (splt->output_section->vma
10451 + splt->output_offset
10453 *unresolved_reloc_p = FALSE;
10454 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10455 contents, rel->r_offset, value,
10459 /* When generating a shared object or relocatable executable, these
10460 relocations are copied into the output file to be resolved at
10462 if ((bfd_link_pic (info)
10463 || globals->root.is_relocatable_executable
10464 || globals->fdpic_p)
10465 && (input_section->flags & SEC_ALLOC)
10466 && !(globals->vxworks_p
10467 && strcmp (input_section->output_section->name,
10469 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10470 || !SYMBOL_CALLS_LOCAL (info, h))
10471 && !(input_bfd == globals->stub_bfd
10472 && strstr (input_section->name, STUB_SUFFIX))
10474 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10475 && !resolved_to_zero)
10476 || h->root.type != bfd_link_hash_undefweak)
10477 && r_type != R_ARM_PC24
10478 && r_type != R_ARM_CALL
10479 && r_type != R_ARM_JUMP24
10480 && r_type != R_ARM_PREL31
10481 && r_type != R_ARM_PLT32)
10483 Elf_Internal_Rela outrel;
10484 bfd_boolean skip, relocate;
10487 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10488 && !h->def_regular)
10490 char *v = _("shared object");
10492 if (bfd_link_executable (info))
10493 v = _("PIE executable");
10496 (_("%pB: relocation %s against external or undefined symbol `%s'"
10497 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10498 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10499 return bfd_reloc_notsupported;
10502 *unresolved_reloc_p = FALSE;
10504 if (sreloc == NULL && globals->root.dynamic_sections_created)
10506 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10507 ! globals->use_rel);
10509 if (sreloc == NULL)
10510 return bfd_reloc_notsupported;
10516 outrel.r_addend = addend;
10518 _bfd_elf_section_offset (output_bfd, info, input_section,
10520 if (outrel.r_offset == (bfd_vma) -1)
10522 else if (outrel.r_offset == (bfd_vma) -2)
10523 skip = TRUE, relocate = TRUE;
10524 outrel.r_offset += (input_section->output_section->vma
10525 + input_section->output_offset);
10528 memset (&outrel, 0, sizeof outrel);
10530 && h->dynindx != -1
10531 && (!bfd_link_pic (info)
10532 || !(bfd_link_pie (info)
10533 || SYMBOLIC_BIND (info, h))
10534 || !h->def_regular))
10535 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10540 /* This symbol is local, or marked to become local. */
10541 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10542 || (globals->fdpic_p && !bfd_link_pic(info)));
10543 if (globals->symbian_p)
10547 /* On Symbian OS, the data segment and text segement
10548 can be relocated independently. Therefore, we
10549 must indicate the segment to which this
10550 relocation is relative. The BPABI allows us to
10551 use any symbol in the right segment; we just use
10552 the section symbol as it is convenient. (We
10553 cannot use the symbol given by "h" directly as it
10554 will not appear in the dynamic symbol table.)
10556 Note that the dynamic linker ignores the section
10557 symbol value, so we don't subtract osec->vma
10558 from the emitted reloc addend. */
10560 osec = sym_sec->output_section;
10562 osec = input_section->output_section;
10563 symbol = elf_section_data (osec)->dynindx;
10566 struct elf_link_hash_table *htab = elf_hash_table (info);
10568 if ((osec->flags & SEC_READONLY) == 0
10569 && htab->data_index_section != NULL)
10570 osec = htab->data_index_section;
10572 osec = htab->text_index_section;
10573 symbol = elf_section_data (osec)->dynindx;
10575 BFD_ASSERT (symbol != 0);
10578 /* On SVR4-ish systems, the dynamic loader cannot
10579 relocate the text and data segments independently,
10580 so the symbol does not matter. */
10582 if (dynreloc_st_type == STT_GNU_IFUNC)
10583 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10584 to the .iplt entry. Instead, every non-call reference
10585 must use an R_ARM_IRELATIVE relocation to obtain the
10586 correct run-time address. */
10587 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10588 else if (globals->fdpic_p && !bfd_link_pic(info))
10591 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10592 if (globals->use_rel)
10595 outrel.r_addend += dynreloc_value;
10599 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
10601 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10603 /* If this reloc is against an external symbol, we do not want to
10604 fiddle with the addend. Otherwise, we need to include the symbol
10605 value so that it becomes an addend for the dynamic reloc. */
10607 return bfd_reloc_ok;
10609 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10610 contents, rel->r_offset,
10611 dynreloc_value, (bfd_vma) 0);
10613 else switch (r_type)
10616 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10618 case R_ARM_XPC25: /* Arm BLX instruction. */
10621 case R_ARM_PC24: /* Arm B/BL instruction. */
10624 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10626 if (r_type == R_ARM_XPC25)
10628 /* Check for Arm calling Arm function. */
10629 /* FIXME: Should we translate the instruction into a BL
10630 instruction instead ? */
10631 if (branch_type != ST_BRANCH_TO_THUMB)
10633 (_("\%pB: warning: %s BLX instruction targets"
10634 " %s function '%s'"),
10636 "ARM", h ? h->root.root.string : "(local)");
10638 else if (r_type == R_ARM_PC24)
10640 /* Check for Arm calling Thumb function. */
10641 if (branch_type == ST_BRANCH_TO_THUMB)
10643 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10644 output_bfd, input_section,
10645 hit_data, sym_sec, rel->r_offset,
10646 signed_addend, value,
10648 return bfd_reloc_ok;
10650 return bfd_reloc_dangerous;
10654 /* Check if a stub has to be inserted because the
10655 destination is too far or we are changing mode. */
10656 if ( r_type == R_ARM_CALL
10657 || r_type == R_ARM_JUMP24
10658 || r_type == R_ARM_PLT32)
10660 enum elf32_arm_stub_type stub_type = arm_stub_none;
10661 struct elf32_arm_link_hash_entry *hash;
10663 hash = (struct elf32_arm_link_hash_entry *) h;
10664 stub_type = arm_type_of_stub (info, input_section, rel,
10665 st_type, &branch_type,
10666 hash, value, sym_sec,
10667 input_bfd, sym_name);
10669 if (stub_type != arm_stub_none)
10671 /* The target is out of reach, so redirect the
10672 branch to the local stub for this function. */
10673 stub_entry = elf32_arm_get_stub_entry (input_section,
10678 if (stub_entry != NULL)
10679 value = (stub_entry->stub_offset
10680 + stub_entry->stub_sec->output_offset
10681 + stub_entry->stub_sec->output_section->vma);
10683 if (plt_offset != (bfd_vma) -1)
10684 *unresolved_reloc_p = FALSE;
10689 /* If the call goes through a PLT entry, make sure to
10690 check distance to the right destination address. */
10691 if (plt_offset != (bfd_vma) -1)
10693 value = (splt->output_section->vma
10694 + splt->output_offset
10696 *unresolved_reloc_p = FALSE;
10697 /* The PLT entry is in ARM mode, regardless of the
10698 target function. */
10699 branch_type = ST_BRANCH_TO_ARM;
10704 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10706 S is the address of the symbol in the relocation.
10707 P is address of the instruction being relocated.
10708 A is the addend (extracted from the instruction) in bytes.
10710 S is held in 'value'.
10711 P is the base address of the section containing the
10712 instruction plus the offset of the reloc into that
10714 (input_section->output_section->vma +
10715 input_section->output_offset +
10717 A is the addend, converted into bytes, ie:
10718 (signed_addend * 4)
10720 Note: None of these operations have knowledge of the pipeline
10721 size of the processor, thus it is up to the assembler to
10722 encode this information into the addend. */
10723 value -= (input_section->output_section->vma
10724 + input_section->output_offset);
10725 value -= rel->r_offset;
10726 if (globals->use_rel)
10727 value += (signed_addend << howto->size);
10729 /* RELA addends do not have to be adjusted by howto->size. */
10730 value += signed_addend;
10732 signed_addend = value;
10733 signed_addend >>= howto->rightshift;
10735 /* A branch to an undefined weak symbol is turned into a jump to
10736 the next instruction unless a PLT entry will be created.
10737 Do the same for local undefined symbols (but not for STN_UNDEF).
10738 The jump to the next instruction is optimized as a NOP depending
10739 on the architecture. */
10740 if (h ? (h->root.type == bfd_link_hash_undefweak
10741 && plt_offset == (bfd_vma) -1)
10742 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10744 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10746 if (arch_has_arm_nop (globals))
10747 value |= 0x0320f000;
10749 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10753 /* Perform a signed range check. */
10754 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
10755 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10756 return bfd_reloc_overflow;
10758 addend = (value & 2);
10760 value = (signed_addend & howto->dst_mask)
10761 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10763 if (r_type == R_ARM_CALL)
10765 /* Set the H bit in the BLX instruction. */
10766 if (branch_type == ST_BRANCH_TO_THUMB)
10769 value |= (1 << 24);
10771 value &= ~(bfd_vma)(1 << 24);
10774 /* Select the correct instruction (BL or BLX). */
10775 /* Only if we are not handling a BL to a stub. In this
10776 case, mode switching is performed by the stub. */
10777 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10778 value |= (1 << 28);
10779 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10781 value &= ~(bfd_vma)(1 << 28);
10782 value |= (1 << 24);
10791 if (branch_type == ST_BRANCH_TO_THUMB)
10795 case R_ARM_ABS32_NOI:
10801 if (branch_type == ST_BRANCH_TO_THUMB)
10803 value -= (input_section->output_section->vma
10804 + input_section->output_offset + rel->r_offset);
10807 case R_ARM_REL32_NOI:
10809 value -= (input_section->output_section->vma
10810 + input_section->output_offset + rel->r_offset);
10814 value -= (input_section->output_section->vma
10815 + input_section->output_offset + rel->r_offset);
10816 value += signed_addend;
10817 if (! h || h->root.type != bfd_link_hash_undefweak)
10819 /* Check for overflow. */
10820 if ((value ^ (value >> 1)) & (1 << 30))
10821 return bfd_reloc_overflow;
10823 value &= 0x7fffffff;
10824 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10825 if (branch_type == ST_BRANCH_TO_THUMB)
10830 bfd_put_32 (input_bfd, value, hit_data);
10831 return bfd_reloc_ok;
10834 /* PR 16202: Refectch the addend using the correct size. */
10835 if (globals->use_rel)
10836 addend = bfd_get_8 (input_bfd, hit_data);
10839 /* There is no way to tell whether the user intended to use a signed or
10840 unsigned addend. When checking for overflow we accept either,
10841 as specified by the AAELF. */
10842 if ((long) value > 0xff || (long) value < -0x80)
10843 return bfd_reloc_overflow;
10845 bfd_put_8 (input_bfd, value, hit_data);
10846 return bfd_reloc_ok;
10849 /* PR 16202: Refectch the addend using the correct size. */
10850 if (globals->use_rel)
10851 addend = bfd_get_16 (input_bfd, hit_data);
10854 /* See comment for R_ARM_ABS8. */
10855 if ((long) value > 0xffff || (long) value < -0x8000)
10856 return bfd_reloc_overflow;
10858 bfd_put_16 (input_bfd, value, hit_data);
10859 return bfd_reloc_ok;
10861 case R_ARM_THM_ABS5:
10862 /* Support ldr and str instructions for the thumb. */
10863 if (globals->use_rel)
10865 /* Need to refetch addend. */
10866 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10867 /* ??? Need to determine shift amount from operand size. */
10868 addend >>= howto->rightshift;
10872 /* ??? Isn't value unsigned? */
10873 if ((long) value > 0x1f || (long) value < -0x10)
10874 return bfd_reloc_overflow;
10876 /* ??? Value needs to be properly shifted into place first. */
10877 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10878 bfd_put_16 (input_bfd, value, hit_data);
10879 return bfd_reloc_ok;
10881 case R_ARM_THM_ALU_PREL_11_0:
10882 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10885 bfd_signed_vma relocation;
10887 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10888 | bfd_get_16 (input_bfd, hit_data + 2);
10890 if (globals->use_rel)
10892 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10893 | ((insn & (1 << 26)) >> 15);
10894 if (insn & 0xf00000)
10895 signed_addend = -signed_addend;
10898 relocation = value + signed_addend;
10899 relocation -= Pa (input_section->output_section->vma
10900 + input_section->output_offset
10903 /* PR 21523: Use an absolute value. The user of this reloc will
10904 have already selected an ADD or SUB insn appropriately. */
10905 value = labs (relocation);
10907 if (value >= 0x1000)
10908 return bfd_reloc_overflow;
10910 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10911 if (branch_type == ST_BRANCH_TO_THUMB)
10914 insn = (insn & 0xfb0f8f00) | (value & 0xff)
10915 | ((value & 0x700) << 4)
10916 | ((value & 0x800) << 15);
10917 if (relocation < 0)
10920 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10921 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10923 return bfd_reloc_ok;
10926 case R_ARM_THM_PC8:
10927 /* PR 10073: This reloc is not generated by the GNU toolchain,
10928 but it is supported for compatibility with third party libraries
10929 generated by other compilers, specifically the ARM/IAR. */
10932 bfd_signed_vma relocation;
10934 insn = bfd_get_16 (input_bfd, hit_data);
10936 if (globals->use_rel)
10937 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
10939 relocation = value + addend;
10940 relocation -= Pa (input_section->output_section->vma
10941 + input_section->output_offset
10944 value = relocation;
10946 /* We do not check for overflow of this reloc. Although strictly
10947 speaking this is incorrect, it appears to be necessary in order
10948 to work with IAR generated relocs. Since GCC and GAS do not
10949 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10950 a problem for them. */
10953 insn = (insn & 0xff00) | (value >> 2);
10955 bfd_put_16 (input_bfd, insn, hit_data);
10957 return bfd_reloc_ok;
10960 case R_ARM_THM_PC12:
10961 /* Corresponds to: ldr.w reg, [pc, #offset]. */
10964 bfd_signed_vma relocation;
10966 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10967 | bfd_get_16 (input_bfd, hit_data + 2);
10969 if (globals->use_rel)
10971 signed_addend = insn & 0xfff;
10972 if (!(insn & (1 << 23)))
10973 signed_addend = -signed_addend;
10976 relocation = value + signed_addend;
10977 relocation -= Pa (input_section->output_section->vma
10978 + input_section->output_offset
10981 value = relocation;
10983 if (value >= 0x1000)
10984 return bfd_reloc_overflow;
10986 insn = (insn & 0xff7ff000) | value;
10987 if (relocation >= 0)
10990 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10991 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10993 return bfd_reloc_ok;
10996 case R_ARM_THM_XPC22:
10997 case R_ARM_THM_CALL:
10998 case R_ARM_THM_JUMP24:
10999 /* Thumb BL (branch long instruction). */
11001 bfd_vma relocation;
11002 bfd_vma reloc_sign;
11003 bfd_boolean overflow = FALSE;
11004 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11005 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11006 bfd_signed_vma reloc_signed_max;
11007 bfd_signed_vma reloc_signed_min;
11009 bfd_signed_vma signed_check;
11011 const int thumb2 = using_thumb2 (globals);
11012 const int thumb2_bl = using_thumb2_bl (globals);
11014 /* A branch to an undefined weak symbol is turned into a jump to
11015 the next instruction unless a PLT entry will be created.
11016 The jump to the next instruction is optimized as a NOP.W for
11017 Thumb-2 enabled architectures. */
11018 if (h && h->root.type == bfd_link_hash_undefweak
11019 && plt_offset == (bfd_vma) -1)
11023 bfd_put_16 (input_bfd, 0xf3af, hit_data);
11024 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
11028 bfd_put_16 (input_bfd, 0xe000, hit_data);
11029 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
11031 return bfd_reloc_ok;
11034 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11035 with Thumb-1) involving the J1 and J2 bits. */
11036 if (globals->use_rel)
11038 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
11039 bfd_vma upper = upper_insn & 0x3ff;
11040 bfd_vma lower = lower_insn & 0x7ff;
11041 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11042 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11043 bfd_vma i1 = j1 ^ s ? 0 : 1;
11044 bfd_vma i2 = j2 ^ s ? 0 : 1;
11046 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11048 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11050 signed_addend = addend;
11053 if (r_type == R_ARM_THM_XPC22)
11055 /* Check for Thumb to Thumb call. */
11056 /* FIXME: Should we translate the instruction into a BL
11057 instruction instead ? */
11058 if (branch_type == ST_BRANCH_TO_THUMB)
11060 (_("%pB: warning: %s BLX instruction targets"
11061 " %s function '%s'"),
11062 input_bfd, "Thumb",
11063 "Thumb", h ? h->root.root.string : "(local)");
11067 /* If it is not a call to Thumb, assume call to Arm.
11068 If it is a call relative to a section name, then it is not a
11069 function call at all, but rather a long jump. Calls through
11070 the PLT do not require stubs. */
11071 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11073 if (globals->use_blx && r_type == R_ARM_THM_CALL)
11075 /* Convert BL to BLX. */
11076 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11078 else if (( r_type != R_ARM_THM_CALL)
11079 && (r_type != R_ARM_THM_JUMP24))
11081 if (elf32_thumb_to_arm_stub
11082 (info, sym_name, input_bfd, output_bfd, input_section,
11083 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11085 return bfd_reloc_ok;
11087 return bfd_reloc_dangerous;
11090 else if (branch_type == ST_BRANCH_TO_THUMB
11091 && globals->use_blx
11092 && r_type == R_ARM_THM_CALL)
11094 /* Make sure this is a BL. */
11095 lower_insn |= 0x1800;
11099 enum elf32_arm_stub_type stub_type = arm_stub_none;
11100 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11102 /* Check if a stub has to be inserted because the destination
11104 struct elf32_arm_stub_hash_entry *stub_entry;
11105 struct elf32_arm_link_hash_entry *hash;
11107 hash = (struct elf32_arm_link_hash_entry *) h;
11109 stub_type = arm_type_of_stub (info, input_section, rel,
11110 st_type, &branch_type,
11111 hash, value, sym_sec,
11112 input_bfd, sym_name);
11114 if (stub_type != arm_stub_none)
11116 /* The target is out of reach or we are changing modes, so
11117 redirect the branch to the local stub for this
11119 stub_entry = elf32_arm_get_stub_entry (input_section,
11123 if (stub_entry != NULL)
11125 value = (stub_entry->stub_offset
11126 + stub_entry->stub_sec->output_offset
11127 + stub_entry->stub_sec->output_section->vma);
11129 if (plt_offset != (bfd_vma) -1)
11130 *unresolved_reloc_p = FALSE;
11133 /* If this call becomes a call to Arm, force BLX. */
11134 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11137 && !arm_stub_is_thumb (stub_entry->stub_type))
11138 || branch_type != ST_BRANCH_TO_THUMB)
11139 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11144 /* Handle calls via the PLT. */
11145 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11147 value = (splt->output_section->vma
11148 + splt->output_offset
11151 if (globals->use_blx
11152 && r_type == R_ARM_THM_CALL
11153 && ! using_thumb_only (globals))
11155 /* If the Thumb BLX instruction is available, convert
11156 the BL to a BLX instruction to call the ARM-mode
11158 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11159 branch_type = ST_BRANCH_TO_ARM;
11163 if (! using_thumb_only (globals))
11164 /* Target the Thumb stub before the ARM PLT entry. */
11165 value -= PLT_THUMB_STUB_SIZE;
11166 branch_type = ST_BRANCH_TO_THUMB;
11168 *unresolved_reloc_p = FALSE;
11171 relocation = value + signed_addend;
11173 relocation -= (input_section->output_section->vma
11174 + input_section->output_offset
11177 check = relocation >> howto->rightshift;
11179 /* If this is a signed value, the rightshift just dropped
11180 leading 1 bits (assuming twos complement). */
11181 if ((bfd_signed_vma) relocation >= 0)
11182 signed_check = check;
11184 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11186 /* Calculate the permissable maximum and minimum values for
11187 this relocation according to whether we're relocating for
11189 bitsize = howto->bitsize;
11192 reloc_signed_max = (1 << (bitsize - 1)) - 1;
11193 reloc_signed_min = ~reloc_signed_max;
11195 /* Assumes two's complement. */
11196 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11199 if ((lower_insn & 0x5000) == 0x4000)
11200 /* For a BLX instruction, make sure that the relocation is rounded up
11201 to a word boundary. This follows the semantics of the instruction
11202 which specifies that bit 1 of the target address will come from bit
11203 1 of the base address. */
11204 relocation = (relocation + 2) & ~ 3;
11206 /* Put RELOCATION back into the insn. Assumes two's complement.
11207 We use the Thumb-2 encoding, which is safe even if dealing with
11208 a Thumb-1 instruction by virtue of our overflow check above. */
11209 reloc_sign = (signed_check < 0) ? 1 : 0;
11210 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11211 | ((relocation >> 12) & 0x3ff)
11212 | (reloc_sign << 10);
11213 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11214 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11215 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11216 | ((relocation >> 1) & 0x7ff);
11218 /* Put the relocated value back in the object file: */
11219 bfd_put_16 (input_bfd, upper_insn, hit_data);
11220 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11222 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11226 case R_ARM_THM_JUMP19:
11227 /* Thumb32 conditional branch instruction. */
11229 bfd_vma relocation;
11230 bfd_boolean overflow = FALSE;
11231 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11232 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11233 bfd_signed_vma reloc_signed_max = 0xffffe;
11234 bfd_signed_vma reloc_signed_min = -0x100000;
11235 bfd_signed_vma signed_check;
11236 enum elf32_arm_stub_type stub_type = arm_stub_none;
11237 struct elf32_arm_stub_hash_entry *stub_entry;
11238 struct elf32_arm_link_hash_entry *hash;
11240 /* Need to refetch the addend, reconstruct the top three bits,
11241 and squish the two 11 bit pieces together. */
11242 if (globals->use_rel)
11244 bfd_vma S = (upper_insn & 0x0400) >> 10;
11245 bfd_vma upper = (upper_insn & 0x003f);
11246 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
11247 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
11248 bfd_vma lower = (lower_insn & 0x07ff);
11252 upper |= (!S) << 8;
11253 upper -= 0x0100; /* Sign extend. */
11255 addend = (upper << 12) | (lower << 1);
11256 signed_addend = addend;
11259 /* Handle calls via the PLT. */
11260 if (plt_offset != (bfd_vma) -1)
11262 value = (splt->output_section->vma
11263 + splt->output_offset
11265 /* Target the Thumb stub before the ARM PLT entry. */
11266 value -= PLT_THUMB_STUB_SIZE;
11267 *unresolved_reloc_p = FALSE;
11270 hash = (struct elf32_arm_link_hash_entry *)h;
11272 stub_type = arm_type_of_stub (info, input_section, rel,
11273 st_type, &branch_type,
11274 hash, value, sym_sec,
11275 input_bfd, sym_name);
11276 if (stub_type != arm_stub_none)
11278 stub_entry = elf32_arm_get_stub_entry (input_section,
11282 if (stub_entry != NULL)
11284 value = (stub_entry->stub_offset
11285 + stub_entry->stub_sec->output_offset
11286 + stub_entry->stub_sec->output_section->vma);
11290 relocation = value + signed_addend;
11291 relocation -= (input_section->output_section->vma
11292 + input_section->output_offset
11294 signed_check = (bfd_signed_vma) relocation;
11296 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11299 /* Put RELOCATION back into the insn. */
11301 bfd_vma S = (relocation & 0x00100000) >> 20;
11302 bfd_vma J2 = (relocation & 0x00080000) >> 19;
11303 bfd_vma J1 = (relocation & 0x00040000) >> 18;
11304 bfd_vma hi = (relocation & 0x0003f000) >> 12;
11305 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
11307 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11308 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11311 /* Put the relocated value back in the object file: */
11312 bfd_put_16 (input_bfd, upper_insn, hit_data);
11313 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11315 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11318 case R_ARM_THM_JUMP11:
11319 case R_ARM_THM_JUMP8:
11320 case R_ARM_THM_JUMP6:
11321 /* Thumb B (branch) instruction). */
11323 bfd_signed_vma relocation;
11324 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11325 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11326 bfd_signed_vma signed_check;
11328 /* CZB cannot jump backward. */
11329 if (r_type == R_ARM_THM_JUMP6)
11330 reloc_signed_min = 0;
11332 if (globals->use_rel)
11334 /* Need to refetch addend. */
11335 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
11336 if (addend & ((howto->src_mask + 1) >> 1))
11338 signed_addend = -1;
11339 signed_addend &= ~ howto->src_mask;
11340 signed_addend |= addend;
11343 signed_addend = addend;
11344 /* The value in the insn has been right shifted. We need to
11345 undo this, so that we can perform the address calculation
11346 in terms of bytes. */
11347 signed_addend <<= howto->rightshift;
11349 relocation = value + signed_addend;
11351 relocation -= (input_section->output_section->vma
11352 + input_section->output_offset
11355 relocation >>= howto->rightshift;
11356 signed_check = relocation;
11358 if (r_type == R_ARM_THM_JUMP6)
11359 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11361 relocation &= howto->dst_mask;
11362 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11364 bfd_put_16 (input_bfd, relocation, hit_data);
11366 /* Assumes two's complement. */
11367 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11368 return bfd_reloc_overflow;
11370 return bfd_reloc_ok;
11373 case R_ARM_ALU_PCREL7_0:
11374 case R_ARM_ALU_PCREL15_8:
11375 case R_ARM_ALU_PCREL23_15:
11378 bfd_vma relocation;
11380 insn = bfd_get_32 (input_bfd, hit_data);
11381 if (globals->use_rel)
11383 /* Extract the addend. */
11384 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11385 signed_addend = addend;
11387 relocation = value + signed_addend;
11389 relocation -= (input_section->output_section->vma
11390 + input_section->output_offset
11392 insn = (insn & ~0xfff)
11393 | ((howto->bitpos << 7) & 0xf00)
11394 | ((relocation >> howto->bitpos) & 0xff);
11395 bfd_put_32 (input_bfd, value, hit_data);
11397 return bfd_reloc_ok;
11399 case R_ARM_GNU_VTINHERIT:
11400 case R_ARM_GNU_VTENTRY:
11401 return bfd_reloc_ok;
11403 case R_ARM_GOTOFF32:
11404 /* Relocation is relative to the start of the
11405 global offset table. */
11407 BFD_ASSERT (sgot != NULL);
11409 return bfd_reloc_notsupported;
11411 /* If we are addressing a Thumb function, we need to adjust the
11412 address by one, so that attempts to call the function pointer will
11413 correctly interpret it as Thumb code. */
11414 if (branch_type == ST_BRANCH_TO_THUMB)
11417 /* Note that sgot->output_offset is not involved in this
11418 calculation. We always want the start of .got. If we
11419 define _GLOBAL_OFFSET_TABLE in a different way, as is
11420 permitted by the ABI, we might have to change this
11422 value -= sgot->output_section->vma;
11423 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11424 contents, rel->r_offset, value,
11428 /* Use global offset table as symbol value. */
11429 BFD_ASSERT (sgot != NULL);
11432 return bfd_reloc_notsupported;
11434 *unresolved_reloc_p = FALSE;
11435 value = sgot->output_section->vma;
11436 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11437 contents, rel->r_offset, value,
11441 case R_ARM_GOT_PREL:
11442 /* Relocation is to the entry for this symbol in the
11443 global offset table. */
11445 return bfd_reloc_notsupported;
11447 if (dynreloc_st_type == STT_GNU_IFUNC
11448 && plt_offset != (bfd_vma) -1
11449 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11451 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11452 symbol, and the relocation resolves directly to the runtime
11453 target rather than to the .iplt entry. This means that any
11454 .got entry would be the same value as the .igot.plt entry,
11455 so there's no point creating both. */
11456 sgot = globals->root.igotplt;
11457 value = sgot->output_offset + gotplt_offset;
11459 else if (h != NULL)
11463 off = h->got.offset;
11464 BFD_ASSERT (off != (bfd_vma) -1);
11465 if ((off & 1) != 0)
11467 /* We have already processsed one GOT relocation against
11470 if (globals->root.dynamic_sections_created
11471 && !SYMBOL_REFERENCES_LOCAL (info, h))
11472 *unresolved_reloc_p = FALSE;
11476 Elf_Internal_Rela outrel;
11479 if (((h->dynindx != -1) || globals->fdpic_p)
11480 && !SYMBOL_REFERENCES_LOCAL (info, h))
11482 /* If the symbol doesn't resolve locally in a static
11483 object, we have an undefined reference. If the
11484 symbol doesn't resolve locally in a dynamic object,
11485 it should be resolved by the dynamic linker. */
11486 if (globals->root.dynamic_sections_created)
11488 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11489 *unresolved_reloc_p = FALSE;
11493 outrel.r_addend = 0;
11497 if (dynreloc_st_type == STT_GNU_IFUNC)
11498 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11499 else if (bfd_link_pic (info)
11500 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11501 || h->root.type != bfd_link_hash_undefweak))
11502 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11503 else if (globals->fdpic_p)
11507 outrel.r_addend = dynreloc_value;
11510 /* The GOT entry is initialized to zero by default.
11511 See if we should install a different value. */
11512 if (outrel.r_addend != 0
11513 && (outrel.r_info == 0 || globals->use_rel || isrofixup))
11515 bfd_put_32 (output_bfd, outrel.r_addend,
11516 sgot->contents + off);
11517 outrel.r_addend = 0;
11520 if (outrel.r_info != 0 && !isrofixup)
11522 outrel.r_offset = (sgot->output_section->vma
11523 + sgot->output_offset
11525 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11527 else if (isrofixup)
11529 arm_elf_add_rofixup(output_bfd,
11530 elf32_arm_hash_table(info)->srofixup,
11531 sgot->output_section->vma
11532 + sgot->output_offset + off);
11534 h->got.offset |= 1;
11536 value = sgot->output_offset + off;
11542 BFD_ASSERT (local_got_offsets != NULL
11543 && local_got_offsets[r_symndx] != (bfd_vma) -1);
11545 off = local_got_offsets[r_symndx];
11547 /* The offset must always be a multiple of 4. We use the
11548 least significant bit to record whether we have already
11549 generated the necessary reloc. */
11550 if ((off & 1) != 0)
11554 if (globals->use_rel)
11555 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11557 if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC)
11559 Elf_Internal_Rela outrel;
11561 outrel.r_addend = addend + dynreloc_value;
11562 outrel.r_offset = (sgot->output_section->vma
11563 + sgot->output_offset
11565 if (dynreloc_st_type == STT_GNU_IFUNC)
11566 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11568 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11569 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11571 else if (globals->fdpic_p)
11573 /* For FDPIC executables, we use rofixup to fix
11574 address at runtime. */
11575 arm_elf_add_rofixup(output_bfd, globals->srofixup,
11576 sgot->output_section->vma + sgot->output_offset
11580 local_got_offsets[r_symndx] |= 1;
11583 value = sgot->output_offset + off;
11585 if (r_type != R_ARM_GOT32)
11586 value += sgot->output_section->vma;
11588 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11589 contents, rel->r_offset, value,
11592 case R_ARM_TLS_LDO32:
11593 value = value - dtpoff_base (info);
11595 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11596 contents, rel->r_offset, value,
11599 case R_ARM_TLS_LDM32:
11600 case R_ARM_TLS_LDM32_FDPIC:
11607 off = globals->tls_ldm_got.offset;
11609 if ((off & 1) != 0)
11613 /* If we don't know the module number, create a relocation
11615 if (bfd_link_pic (info))
11617 Elf_Internal_Rela outrel;
11619 if (srelgot == NULL)
11622 outrel.r_addend = 0;
11623 outrel.r_offset = (sgot->output_section->vma
11624 + sgot->output_offset + off);
11625 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11627 if (globals->use_rel)
11628 bfd_put_32 (output_bfd, outrel.r_addend,
11629 sgot->contents + off);
11631 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11634 bfd_put_32 (output_bfd, 1, sgot->contents + off);
11636 globals->tls_ldm_got.offset |= 1;
11639 if (r_type == R_ARM_TLS_LDM32_FDPIC)
11641 bfd_put_32(output_bfd,
11642 globals->root.sgot->output_offset + off,
11643 contents + rel->r_offset);
11645 return bfd_reloc_ok;
11649 value = sgot->output_section->vma + sgot->output_offset + off
11650 - (input_section->output_section->vma
11651 + input_section->output_offset + rel->r_offset);
11653 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11654 contents, rel->r_offset, value,
11659 case R_ARM_TLS_CALL:
11660 case R_ARM_THM_TLS_CALL:
11661 case R_ARM_TLS_GD32:
11662 case R_ARM_TLS_GD32_FDPIC:
11663 case R_ARM_TLS_IE32:
11664 case R_ARM_TLS_IE32_FDPIC:
11665 case R_ARM_TLS_GOTDESC:
11666 case R_ARM_TLS_DESCSEQ:
11667 case R_ARM_THM_TLS_DESCSEQ:
11669 bfd_vma off, offplt;
11673 BFD_ASSERT (sgot != NULL);
11678 dyn = globals->root.dynamic_sections_created;
11679 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11680 bfd_link_pic (info),
11682 && (!bfd_link_pic (info)
11683 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11685 *unresolved_reloc_p = FALSE;
11688 off = h->got.offset;
11689 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11690 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11694 BFD_ASSERT (local_got_offsets != NULL);
11695 off = local_got_offsets[r_symndx];
11696 offplt = local_tlsdesc_gotents[r_symndx];
11697 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11700 /* Linker relaxations happens from one of the
11701 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11702 if (ELF32_R_TYPE(rel->r_info) != r_type)
11703 tls_type = GOT_TLS_IE;
11705 BFD_ASSERT (tls_type != GOT_UNKNOWN);
11707 if ((off & 1) != 0)
11711 bfd_boolean need_relocs = FALSE;
11712 Elf_Internal_Rela outrel;
11715 /* The GOT entries have not been initialized yet. Do it
11716 now, and emit any relocations. If both an IE GOT and a
11717 GD GOT are necessary, we emit the GD first. */
11719 if ((bfd_link_pic (info) || indx != 0)
11721 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11722 && !resolved_to_zero)
11723 || h->root.type != bfd_link_hash_undefweak))
11725 need_relocs = TRUE;
11726 BFD_ASSERT (srelgot != NULL);
11729 if (tls_type & GOT_TLS_GDESC)
11733 /* We should have relaxed, unless this is an undefined
11735 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11736 || bfd_link_pic (info));
11737 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11738 <= globals->root.sgotplt->size);
11740 outrel.r_addend = 0;
11741 outrel.r_offset = (globals->root.sgotplt->output_section->vma
11742 + globals->root.sgotplt->output_offset
11744 + globals->sgotplt_jump_table_size);
11746 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11747 sreloc = globals->root.srelplt;
11748 loc = sreloc->contents;
11749 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11750 BFD_ASSERT (loc + RELOC_SIZE (globals)
11751 <= sreloc->contents + sreloc->size);
11753 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11755 /* For globals, the first word in the relocation gets
11756 the relocation index and the top bit set, or zero,
11757 if we're binding now. For locals, it gets the
11758 symbol's offset in the tls section. */
11759 bfd_put_32 (output_bfd,
11760 !h ? value - elf_hash_table (info)->tls_sec->vma
11761 : info->flags & DF_BIND_NOW ? 0
11762 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11763 globals->root.sgotplt->contents + offplt
11764 + globals->sgotplt_jump_table_size);
11766 /* Second word in the relocation is always zero. */
11767 bfd_put_32 (output_bfd, 0,
11768 globals->root.sgotplt->contents + offplt
11769 + globals->sgotplt_jump_table_size + 4);
11771 if (tls_type & GOT_TLS_GD)
11775 outrel.r_addend = 0;
11776 outrel.r_offset = (sgot->output_section->vma
11777 + sgot->output_offset
11779 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11781 if (globals->use_rel)
11782 bfd_put_32 (output_bfd, outrel.r_addend,
11783 sgot->contents + cur_off);
11785 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11788 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11789 sgot->contents + cur_off + 4);
11792 outrel.r_addend = 0;
11793 outrel.r_info = ELF32_R_INFO (indx,
11794 R_ARM_TLS_DTPOFF32);
11795 outrel.r_offset += 4;
11797 if (globals->use_rel)
11798 bfd_put_32 (output_bfd, outrel.r_addend,
11799 sgot->contents + cur_off + 4);
11801 elf32_arm_add_dynreloc (output_bfd, info,
11807 /* If we are not emitting relocations for a
11808 general dynamic reference, then we must be in a
11809 static link or an executable link with the
11810 symbol binding locally. Mark it as belonging
11811 to module 1, the executable. */
11812 bfd_put_32 (output_bfd, 1,
11813 sgot->contents + cur_off);
11814 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11815 sgot->contents + cur_off + 4);
11821 if (tls_type & GOT_TLS_IE)
11826 outrel.r_addend = value - dtpoff_base (info);
11828 outrel.r_addend = 0;
11829 outrel.r_offset = (sgot->output_section->vma
11830 + sgot->output_offset
11832 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11834 if (globals->use_rel)
11835 bfd_put_32 (output_bfd, outrel.r_addend,
11836 sgot->contents + cur_off);
11838 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11841 bfd_put_32 (output_bfd, tpoff (info, value),
11842 sgot->contents + cur_off);
11847 h->got.offset |= 1;
11849 local_got_offsets[r_symndx] |= 1;
11852 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11854 else if (tls_type & GOT_TLS_GDESC)
11857 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
11858 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
11860 bfd_signed_vma offset;
11861 /* TLS stubs are arm mode. The original symbol is a
11862 data object, so branch_type is bogus. */
11863 branch_type = ST_BRANCH_TO_ARM;
11864 enum elf32_arm_stub_type stub_type
11865 = arm_type_of_stub (info, input_section, rel,
11866 st_type, &branch_type,
11867 (struct elf32_arm_link_hash_entry *)h,
11868 globals->tls_trampoline, globals->root.splt,
11869 input_bfd, sym_name);
11871 if (stub_type != arm_stub_none)
11873 struct elf32_arm_stub_hash_entry *stub_entry
11874 = elf32_arm_get_stub_entry
11875 (input_section, globals->root.splt, 0, rel,
11876 globals, stub_type);
11877 offset = (stub_entry->stub_offset
11878 + stub_entry->stub_sec->output_offset
11879 + stub_entry->stub_sec->output_section->vma);
11882 offset = (globals->root.splt->output_section->vma
11883 + globals->root.splt->output_offset
11884 + globals->tls_trampoline);
11886 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
11888 unsigned long inst;
11890 offset -= (input_section->output_section->vma
11891 + input_section->output_offset
11892 + rel->r_offset + 8);
11894 inst = offset >> 2;
11895 inst &= 0x00ffffff;
11896 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11900 /* Thumb blx encodes the offset in a complicated
11902 unsigned upper_insn, lower_insn;
11905 offset -= (input_section->output_section->vma
11906 + input_section->output_offset
11907 + rel->r_offset + 4);
11909 if (stub_type != arm_stub_none
11910 && arm_stub_is_thumb (stub_type))
11912 lower_insn = 0xd000;
11916 lower_insn = 0xc000;
11917 /* Round up the offset to a word boundary. */
11918 offset = (offset + 2) & ~2;
11922 upper_insn = (0xf000
11923 | ((offset >> 12) & 0x3ff)
11925 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
11926 | (((!((offset >> 22) & 1)) ^ neg) << 11)
11927 | ((offset >> 1) & 0x7ff);
11928 bfd_put_16 (input_bfd, upper_insn, hit_data);
11929 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11930 return bfd_reloc_ok;
11933 /* These relocations needs special care, as besides the fact
11934 they point somewhere in .gotplt, the addend must be
11935 adjusted accordingly depending on the type of instruction
11937 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
11939 unsigned long data, insn;
11942 data = bfd_get_32 (input_bfd, hit_data);
11948 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
11949 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
11950 insn = (insn << 16)
11951 | bfd_get_16 (input_bfd,
11952 contents + rel->r_offset - data + 2);
11953 if ((insn & 0xf800c000) == 0xf000c000)
11956 else if ((insn & 0xffffff00) == 0x4400)
11962 /* xgettext:c-format */
11963 (_("%pB(%pA+%#" PRIx64 "): "
11964 "unexpected %s instruction '%#lx' "
11965 "referenced by TLS_GOTDESC"),
11966 input_bfd, input_section, (uint64_t) rel->r_offset,
11968 return bfd_reloc_notsupported;
11973 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
11975 switch (insn >> 24)
11977 case 0xeb: /* bl */
11978 case 0xfa: /* blx */
11982 case 0xe0: /* add */
11988 /* xgettext:c-format */
11989 (_("%pB(%pA+%#" PRIx64 "): "
11990 "unexpected %s instruction '%#lx' "
11991 "referenced by TLS_GOTDESC"),
11992 input_bfd, input_section, (uint64_t) rel->r_offset,
11994 return bfd_reloc_notsupported;
11998 value += ((globals->root.sgotplt->output_section->vma
11999 + globals->root.sgotplt->output_offset + off)
12000 - (input_section->output_section->vma
12001 + input_section->output_offset
12003 + globals->sgotplt_jump_table_size);
12006 value = ((globals->root.sgot->output_section->vma
12007 + globals->root.sgot->output_offset + off)
12008 - (input_section->output_section->vma
12009 + input_section->output_offset + rel->r_offset));
12011 if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
12012 r_type == R_ARM_TLS_IE32_FDPIC))
12014 /* For FDPIC relocations, resolve to the offset of the GOT
12015 entry from the start of GOT. */
12016 bfd_put_32(output_bfd,
12017 globals->root.sgot->output_offset + off,
12018 contents + rel->r_offset);
12020 return bfd_reloc_ok;
12024 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12025 contents, rel->r_offset, value,
12030 case R_ARM_TLS_LE32:
12031 if (bfd_link_dll (info))
12034 /* xgettext:c-format */
12035 (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
12036 "in shared object"),
12037 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
12038 return bfd_reloc_notsupported;
12041 value = tpoff (info, value);
12043 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12044 contents, rel->r_offset, value,
12048 if (globals->fix_v4bx)
12050 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12052 /* Ensure that we have a BX instruction. */
12053 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12055 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12057 /* Branch to veneer. */
12059 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12060 glue_addr -= input_section->output_section->vma
12061 + input_section->output_offset
12062 + rel->r_offset + 8;
12063 insn = (insn & 0xf0000000) | 0x0a000000
12064 | ((glue_addr >> 2) & 0x00ffffff);
12068 /* Preserve Rm (lowest four bits) and the condition code
12069 (highest four bits). Other bits encode MOV PC,Rm. */
12070 insn = (insn & 0xf000000f) | 0x01a0f000;
12073 bfd_put_32 (input_bfd, insn, hit_data);
12075 return bfd_reloc_ok;
12077 case R_ARM_MOVW_ABS_NC:
12078 case R_ARM_MOVT_ABS:
12079 case R_ARM_MOVW_PREL_NC:
12080 case R_ARM_MOVT_PREL:
12081 /* Until we properly support segment-base-relative addressing then
12082 we assume the segment base to be zero, as for the group relocations.
12083 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12084 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12085 case R_ARM_MOVW_BREL_NC:
12086 case R_ARM_MOVW_BREL:
12087 case R_ARM_MOVT_BREL:
12089 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12091 if (globals->use_rel)
12093 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12094 signed_addend = (addend ^ 0x8000) - 0x8000;
12097 value += signed_addend;
12099 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12100 value -= (input_section->output_section->vma
12101 + input_section->output_offset + rel->r_offset);
12103 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12104 return bfd_reloc_overflow;
12106 if (branch_type == ST_BRANCH_TO_THUMB)
12109 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12110 || r_type == R_ARM_MOVT_BREL)
12113 insn &= 0xfff0f000;
12114 insn |= value & 0xfff;
12115 insn |= (value & 0xf000) << 4;
12116 bfd_put_32 (input_bfd, insn, hit_data);
12118 return bfd_reloc_ok;
12120 case R_ARM_THM_MOVW_ABS_NC:
12121 case R_ARM_THM_MOVT_ABS:
12122 case R_ARM_THM_MOVW_PREL_NC:
12123 case R_ARM_THM_MOVT_PREL:
12124 /* Until we properly support segment-base-relative addressing then
12125 we assume the segment base to be zero, as for the above relocations.
12126 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12127 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12128 as R_ARM_THM_MOVT_ABS. */
12129 case R_ARM_THM_MOVW_BREL_NC:
12130 case R_ARM_THM_MOVW_BREL:
12131 case R_ARM_THM_MOVT_BREL:
12135 insn = bfd_get_16 (input_bfd, hit_data) << 16;
12136 insn |= bfd_get_16 (input_bfd, hit_data + 2);
12138 if (globals->use_rel)
12140 addend = ((insn >> 4) & 0xf000)
12141 | ((insn >> 15) & 0x0800)
12142 | ((insn >> 4) & 0x0700)
12144 signed_addend = (addend ^ 0x8000) - 0x8000;
12147 value += signed_addend;
12149 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12150 value -= (input_section->output_section->vma
12151 + input_section->output_offset + rel->r_offset);
12153 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12154 return bfd_reloc_overflow;
12156 if (branch_type == ST_BRANCH_TO_THUMB)
12159 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12160 || r_type == R_ARM_THM_MOVT_BREL)
12163 insn &= 0xfbf08f00;
12164 insn |= (value & 0xf000) << 4;
12165 insn |= (value & 0x0800) << 15;
12166 insn |= (value & 0x0700) << 4;
12167 insn |= (value & 0x00ff);
12169 bfd_put_16 (input_bfd, insn >> 16, hit_data);
12170 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12172 return bfd_reloc_ok;
12174 case R_ARM_ALU_PC_G0_NC:
12175 case R_ARM_ALU_PC_G1_NC:
12176 case R_ARM_ALU_PC_G0:
12177 case R_ARM_ALU_PC_G1:
12178 case R_ARM_ALU_PC_G2:
12179 case R_ARM_ALU_SB_G0_NC:
12180 case R_ARM_ALU_SB_G1_NC:
12181 case R_ARM_ALU_SB_G0:
12182 case R_ARM_ALU_SB_G1:
12183 case R_ARM_ALU_SB_G2:
12185 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12186 bfd_vma pc = input_section->output_section->vma
12187 + input_section->output_offset + rel->r_offset;
12188 /* sb is the origin of the *segment* containing the symbol. */
12189 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12192 bfd_signed_vma signed_value;
12195 /* Determine which group of bits to select. */
12198 case R_ARM_ALU_PC_G0_NC:
12199 case R_ARM_ALU_PC_G0:
12200 case R_ARM_ALU_SB_G0_NC:
12201 case R_ARM_ALU_SB_G0:
12205 case R_ARM_ALU_PC_G1_NC:
12206 case R_ARM_ALU_PC_G1:
12207 case R_ARM_ALU_SB_G1_NC:
12208 case R_ARM_ALU_SB_G1:
12212 case R_ARM_ALU_PC_G2:
12213 case R_ARM_ALU_SB_G2:
12221 /* If REL, extract the addend from the insn. If RELA, it will
12222 have already been fetched for us. */
12223 if (globals->use_rel)
12226 bfd_vma constant = insn & 0xff;
12227 bfd_vma rotation = (insn & 0xf00) >> 8;
12230 signed_addend = constant;
12233 /* Compensate for the fact that in the instruction, the
12234 rotation is stored in multiples of 2 bits. */
12237 /* Rotate "constant" right by "rotation" bits. */
12238 signed_addend = (constant >> rotation) |
12239 (constant << (8 * sizeof (bfd_vma) - rotation));
12242 /* Determine if the instruction is an ADD or a SUB.
12243 (For REL, this determines the sign of the addend.) */
12244 negative = identify_add_or_sub (insn);
12248 /* xgettext:c-format */
12249 (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12250 "are allowed for ALU group relocations"),
12251 input_bfd, input_section, (uint64_t) rel->r_offset);
12252 return bfd_reloc_overflow;
12255 signed_addend *= negative;
12258 /* Compute the value (X) to go in the place. */
12259 if (r_type == R_ARM_ALU_PC_G0_NC
12260 || r_type == R_ARM_ALU_PC_G1_NC
12261 || r_type == R_ARM_ALU_PC_G0
12262 || r_type == R_ARM_ALU_PC_G1
12263 || r_type == R_ARM_ALU_PC_G2)
12265 signed_value = value - pc + signed_addend;
12267 /* Section base relative. */
12268 signed_value = value - sb + signed_addend;
12270 /* If the target symbol is a Thumb function, then set the
12271 Thumb bit in the address. */
12272 if (branch_type == ST_BRANCH_TO_THUMB)
12275 /* Calculate the value of the relevant G_n, in encoded
12276 constant-with-rotation format. */
12277 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12280 /* Check for overflow if required. */
12281 if ((r_type == R_ARM_ALU_PC_G0
12282 || r_type == R_ARM_ALU_PC_G1
12283 || r_type == R_ARM_ALU_PC_G2
12284 || r_type == R_ARM_ALU_SB_G0
12285 || r_type == R_ARM_ALU_SB_G1
12286 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12289 /* xgettext:c-format */
12290 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12291 "splitting %#" PRIx64 " for group relocation %s"),
12292 input_bfd, input_section, (uint64_t) rel->r_offset,
12293 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12295 return bfd_reloc_overflow;
12298 /* Mask out the value and the ADD/SUB part of the opcode; take care
12299 not to destroy the S bit. */
12300 insn &= 0xff1ff000;
12302 /* Set the opcode according to whether the value to go in the
12303 place is negative. */
12304 if (signed_value < 0)
12309 /* Encode the offset. */
12312 bfd_put_32 (input_bfd, insn, hit_data);
12314 return bfd_reloc_ok;
12316 case R_ARM_LDR_PC_G0:
12317 case R_ARM_LDR_PC_G1:
12318 case R_ARM_LDR_PC_G2:
12319 case R_ARM_LDR_SB_G0:
12320 case R_ARM_LDR_SB_G1:
12321 case R_ARM_LDR_SB_G2:
12323 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12324 bfd_vma pc = input_section->output_section->vma
12325 + input_section->output_offset + rel->r_offset;
12326 /* sb is the origin of the *segment* containing the symbol. */
12327 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12329 bfd_signed_vma signed_value;
12332 /* Determine which groups of bits to calculate. */
12335 case R_ARM_LDR_PC_G0:
12336 case R_ARM_LDR_SB_G0:
12340 case R_ARM_LDR_PC_G1:
12341 case R_ARM_LDR_SB_G1:
12345 case R_ARM_LDR_PC_G2:
12346 case R_ARM_LDR_SB_G2:
12354 /* If REL, extract the addend from the insn. If RELA, it will
12355 have already been fetched for us. */
12356 if (globals->use_rel)
12358 int negative = (insn & (1 << 23)) ? 1 : -1;
12359 signed_addend = negative * (insn & 0xfff);
12362 /* Compute the value (X) to go in the place. */
12363 if (r_type == R_ARM_LDR_PC_G0
12364 || r_type == R_ARM_LDR_PC_G1
12365 || r_type == R_ARM_LDR_PC_G2)
12367 signed_value = value - pc + signed_addend;
12369 /* Section base relative. */
12370 signed_value = value - sb + signed_addend;
12372 /* Calculate the value of the relevant G_{n-1} to obtain
12373 the residual at that stage. */
12374 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12375 group - 1, &residual);
12377 /* Check for overflow. */
12378 if (residual >= 0x1000)
12381 /* xgettext:c-format */
12382 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12383 "splitting %#" PRIx64 " for group relocation %s"),
12384 input_bfd, input_section, (uint64_t) rel->r_offset,
12385 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12387 return bfd_reloc_overflow;
12390 /* Mask out the value and U bit. */
12391 insn &= 0xff7ff000;
12393 /* Set the U bit if the value to go in the place is non-negative. */
12394 if (signed_value >= 0)
12397 /* Encode the offset. */
12400 bfd_put_32 (input_bfd, insn, hit_data);
12402 return bfd_reloc_ok;
12404 case R_ARM_LDRS_PC_G0:
12405 case R_ARM_LDRS_PC_G1:
12406 case R_ARM_LDRS_PC_G2:
12407 case R_ARM_LDRS_SB_G0:
12408 case R_ARM_LDRS_SB_G1:
12409 case R_ARM_LDRS_SB_G2:
12411 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12412 bfd_vma pc = input_section->output_section->vma
12413 + input_section->output_offset + rel->r_offset;
12414 /* sb is the origin of the *segment* containing the symbol. */
12415 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12417 bfd_signed_vma signed_value;
12420 /* Determine which groups of bits to calculate. */
12423 case R_ARM_LDRS_PC_G0:
12424 case R_ARM_LDRS_SB_G0:
12428 case R_ARM_LDRS_PC_G1:
12429 case R_ARM_LDRS_SB_G1:
12433 case R_ARM_LDRS_PC_G2:
12434 case R_ARM_LDRS_SB_G2:
12442 /* If REL, extract the addend from the insn. If RELA, it will
12443 have already been fetched for us. */
12444 if (globals->use_rel)
12446 int negative = (insn & (1 << 23)) ? 1 : -1;
12447 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12450 /* Compute the value (X) to go in the place. */
12451 if (r_type == R_ARM_LDRS_PC_G0
12452 || r_type == R_ARM_LDRS_PC_G1
12453 || r_type == R_ARM_LDRS_PC_G2)
12455 signed_value = value - pc + signed_addend;
12457 /* Section base relative. */
12458 signed_value = value - sb + signed_addend;
12460 /* Calculate the value of the relevant G_{n-1} to obtain
12461 the residual at that stage. */
12462 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12463 group - 1, &residual);
12465 /* Check for overflow. */
12466 if (residual >= 0x100)
12469 /* xgettext:c-format */
12470 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12471 "splitting %#" PRIx64 " for group relocation %s"),
12472 input_bfd, input_section, (uint64_t) rel->r_offset,
12473 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12475 return bfd_reloc_overflow;
12478 /* Mask out the value and U bit. */
12479 insn &= 0xff7ff0f0;
12481 /* Set the U bit if the value to go in the place is non-negative. */
12482 if (signed_value >= 0)
12485 /* Encode the offset. */
12486 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12488 bfd_put_32 (input_bfd, insn, hit_data);
12490 return bfd_reloc_ok;
12492 case R_ARM_LDC_PC_G0:
12493 case R_ARM_LDC_PC_G1:
12494 case R_ARM_LDC_PC_G2:
12495 case R_ARM_LDC_SB_G0:
12496 case R_ARM_LDC_SB_G1:
12497 case R_ARM_LDC_SB_G2:
12499 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12500 bfd_vma pc = input_section->output_section->vma
12501 + input_section->output_offset + rel->r_offset;
12502 /* sb is the origin of the *segment* containing the symbol. */
12503 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12505 bfd_signed_vma signed_value;
12508 /* Determine which groups of bits to calculate. */
12511 case R_ARM_LDC_PC_G0:
12512 case R_ARM_LDC_SB_G0:
12516 case R_ARM_LDC_PC_G1:
12517 case R_ARM_LDC_SB_G1:
12521 case R_ARM_LDC_PC_G2:
12522 case R_ARM_LDC_SB_G2:
12530 /* If REL, extract the addend from the insn. If RELA, it will
12531 have already been fetched for us. */
12532 if (globals->use_rel)
12534 int negative = (insn & (1 << 23)) ? 1 : -1;
12535 signed_addend = negative * ((insn & 0xff) << 2);
12538 /* Compute the value (X) to go in the place. */
12539 if (r_type == R_ARM_LDC_PC_G0
12540 || r_type == R_ARM_LDC_PC_G1
12541 || r_type == R_ARM_LDC_PC_G2)
12543 signed_value = value - pc + signed_addend;
12545 /* Section base relative. */
12546 signed_value = value - sb + signed_addend;
12548 /* Calculate the value of the relevant G_{n-1} to obtain
12549 the residual at that stage. */
12550 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12551 group - 1, &residual);
12553 /* Check for overflow. (The absolute value to go in the place must be
12554 divisible by four and, after having been divided by four, must
12555 fit in eight bits.) */
12556 if ((residual & 0x3) != 0 || residual >= 0x400)
12559 /* xgettext:c-format */
12560 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12561 "splitting %#" PRIx64 " for group relocation %s"),
12562 input_bfd, input_section, (uint64_t) rel->r_offset,
12563 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12565 return bfd_reloc_overflow;
12568 /* Mask out the value and U bit. */
12569 insn &= 0xff7fff00;
12571 /* Set the U bit if the value to go in the place is non-negative. */
12572 if (signed_value >= 0)
12575 /* Encode the offset. */
12576 insn |= residual >> 2;
12578 bfd_put_32 (input_bfd, insn, hit_data);
12580 return bfd_reloc_ok;
12582 case R_ARM_THM_ALU_ABS_G0_NC:
12583 case R_ARM_THM_ALU_ABS_G1_NC:
12584 case R_ARM_THM_ALU_ABS_G2_NC:
12585 case R_ARM_THM_ALU_ABS_G3_NC:
12587 const int shift_array[4] = {0, 8, 16, 24};
12588 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12589 bfd_vma addr = value;
12590 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12592 /* Compute address. */
12593 if (globals->use_rel)
12594 signed_addend = insn & 0xff;
12595 addr += signed_addend;
12596 if (branch_type == ST_BRANCH_TO_THUMB)
12598 /* Clean imm8 insn. */
12600 /* And update with correct part of address. */
12601 insn |= (addr >> shift) & 0xff;
12603 bfd_put_16 (input_bfd, insn, hit_data);
12606 *unresolved_reloc_p = FALSE;
12607 return bfd_reloc_ok;
12609 case R_ARM_GOTOFFFUNCDESC:
12613 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12614 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12615 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12616 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12619 if (bfd_link_pic(info) && dynindx == 0)
12622 /* Resolve relocation. */
12623 bfd_put_32(output_bfd, (offset + sgot->output_offset)
12624 , contents + rel->r_offset);
12625 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12627 arm_elf_fill_funcdesc(output_bfd, info,
12628 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12629 dynindx, offset, addr, dynreloc_value, seg);
12634 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12638 /* For static binaries, sym_sec can be null. */
12641 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12642 addr = dynreloc_value - sym_sec->output_section->vma;
12650 if (bfd_link_pic(info) && dynindx == 0)
12653 /* This case cannot occur since funcdesc is allocated by
12654 the dynamic loader so we cannot resolve the relocation. */
12655 if (h->dynindx != -1)
12658 /* Resolve relocation. */
12659 bfd_put_32(output_bfd, (offset + sgot->output_offset),
12660 contents + rel->r_offset);
12661 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12662 arm_elf_fill_funcdesc(output_bfd, info,
12663 &eh->fdpic_cnts.funcdesc_offset,
12664 dynindx, offset, addr, dynreloc_value, seg);
12667 *unresolved_reloc_p = FALSE;
12668 return bfd_reloc_ok;
12670 case R_ARM_GOTFUNCDESC:
12674 Elf_Internal_Rela outrel;
12676 /* Resolve relocation. */
12677 bfd_put_32(output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12678 + sgot->output_offset),
12679 contents + rel->r_offset);
12680 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12681 if(h->dynindx == -1)
12684 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12688 /* For static binaries sym_sec can be null. */
12691 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12692 addr = dynreloc_value - sym_sec->output_section->vma;
12700 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12701 arm_elf_fill_funcdesc(output_bfd, info,
12702 &eh->fdpic_cnts.funcdesc_offset,
12703 dynindx, offset, addr, dynreloc_value, seg);
12706 /* Add a dynamic relocation on GOT entry if not already done. */
12707 if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12709 if (h->dynindx == -1)
12711 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12712 if (h->root.type == bfd_link_hash_undefweak)
12713 bfd_put_32(output_bfd, 0, sgot->contents
12714 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12716 bfd_put_32(output_bfd, sgot->output_section->vma
12717 + sgot->output_offset
12718 + (eh->fdpic_cnts.funcdesc_offset & ~1),
12720 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12724 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12726 outrel.r_offset = sgot->output_section->vma
12727 + sgot->output_offset
12728 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12729 outrel.r_addend = 0;
12730 if (h->dynindx == -1 && !bfd_link_pic(info))
12731 if (h->root.type == bfd_link_hash_undefweak)
12732 arm_elf_add_rofixup(output_bfd, globals->srofixup, -1);
12734 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12736 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12737 eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12742 /* Such relocation on static function should not have been
12743 emitted by the compiler. */
12747 *unresolved_reloc_p = FALSE;
12748 return bfd_reloc_ok;
12750 case R_ARM_FUNCDESC:
12754 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12755 Elf_Internal_Rela outrel;
12756 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12757 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12758 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12761 if (bfd_link_pic(info) && dynindx == 0)
12764 /* Replace static FUNCDESC relocation with a
12765 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12767 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12768 outrel.r_offset = input_section->output_section->vma
12769 + input_section->output_offset + rel->r_offset;
12770 outrel.r_addend = 0;
12771 if (bfd_link_pic(info))
12772 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12774 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12776 bfd_put_32 (input_bfd, sgot->output_section->vma
12777 + sgot->output_offset + offset, hit_data);
12779 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12780 arm_elf_fill_funcdesc(output_bfd, info,
12781 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12782 dynindx, offset, addr, dynreloc_value, seg);
12786 if (h->dynindx == -1)
12789 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12792 Elf_Internal_Rela outrel;
12794 /* For static binaries sym_sec can be null. */
12797 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12798 addr = dynreloc_value - sym_sec->output_section->vma;
12806 if (bfd_link_pic(info) && dynindx == 0)
12809 /* Replace static FUNCDESC relocation with a
12810 R_ARM_RELATIVE dynamic relocation. */
12811 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12812 outrel.r_offset = input_section->output_section->vma
12813 + input_section->output_offset + rel->r_offset;
12814 outrel.r_addend = 0;
12815 if (bfd_link_pic(info))
12816 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12818 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12820 bfd_put_32 (input_bfd, sgot->output_section->vma
12821 + sgot->output_offset + offset, hit_data);
12823 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12824 arm_elf_fill_funcdesc(output_bfd, info,
12825 &eh->fdpic_cnts.funcdesc_offset,
12826 dynindx, offset, addr, dynreloc_value, seg);
12830 Elf_Internal_Rela outrel;
12832 /* Add a dynamic relocation. */
12833 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12834 outrel.r_offset = input_section->output_section->vma
12835 + input_section->output_offset + rel->r_offset;
12836 outrel.r_addend = 0;
12837 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12841 *unresolved_reloc_p = FALSE;
12842 return bfd_reloc_ok;
12845 return bfd_reloc_notsupported;
12849 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
12851 arm_add_to_rel (bfd * abfd,
12852 bfd_byte * address,
12853 reloc_howto_type * howto,
12854 bfd_signed_vma increment)
12856 bfd_signed_vma addend;
12858 if (howto->type == R_ARM_THM_CALL
12859 || howto->type == R_ARM_THM_JUMP24)
12861 int upper_insn, lower_insn;
12864 upper_insn = bfd_get_16 (abfd, address);
12865 lower_insn = bfd_get_16 (abfd, address + 2);
12866 upper = upper_insn & 0x7ff;
12867 lower = lower_insn & 0x7ff;
12869 addend = (upper << 12) | (lower << 1);
12870 addend += increment;
12873 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
12874 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
12876 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
12877 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
12883 contents = bfd_get_32 (abfd, address);
12885 /* Get the (signed) value from the instruction. */
12886 addend = contents & howto->src_mask;
12887 if (addend & ((howto->src_mask + 1) >> 1))
12889 bfd_signed_vma mask;
12892 mask &= ~ howto->src_mask;
12896 /* Add in the increment, (which is a byte value). */
12897 switch (howto->type)
12900 addend += increment;
12907 addend <<= howto->size;
12908 addend += increment;
12910 /* Should we check for overflow here ? */
12912 /* Drop any undesired bits. */
12913 addend >>= howto->rightshift;
12917 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
12919 bfd_put_32 (abfd, contents, address);
12923 #define IS_ARM_TLS_RELOC(R_TYPE) \
12924 ((R_TYPE) == R_ARM_TLS_GD32 \
12925 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
12926 || (R_TYPE) == R_ARM_TLS_LDO32 \
12927 || (R_TYPE) == R_ARM_TLS_LDM32 \
12928 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
12929 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
12930 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
12931 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
12932 || (R_TYPE) == R_ARM_TLS_LE32 \
12933 || (R_TYPE) == R_ARM_TLS_IE32 \
12934 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
12935 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
12937 /* Specific set of relocations for the gnu tls dialect. */
12938 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
12939 ((R_TYPE) == R_ARM_TLS_GOTDESC \
12940 || (R_TYPE) == R_ARM_TLS_CALL \
12941 || (R_TYPE) == R_ARM_THM_TLS_CALL \
12942 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
12943 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
12945 /* Relocate an ARM ELF section. */
12948 elf32_arm_relocate_section (bfd * output_bfd,
12949 struct bfd_link_info * info,
12951 asection * input_section,
12952 bfd_byte * contents,
12953 Elf_Internal_Rela * relocs,
12954 Elf_Internal_Sym * local_syms,
12955 asection ** local_sections)
12957 Elf_Internal_Shdr *symtab_hdr;
12958 struct elf_link_hash_entry **sym_hashes;
12959 Elf_Internal_Rela *rel;
12960 Elf_Internal_Rela *relend;
12962 struct elf32_arm_link_hash_table * globals;
12964 globals = elf32_arm_hash_table (info);
12965 if (globals == NULL)
12968 symtab_hdr = & elf_symtab_hdr (input_bfd);
12969 sym_hashes = elf_sym_hashes (input_bfd);
12972 relend = relocs + input_section->reloc_count;
12973 for (; rel < relend; rel++)
12976 reloc_howto_type * howto;
12977 unsigned long r_symndx;
12978 Elf_Internal_Sym * sym;
12980 struct elf_link_hash_entry * h;
12981 bfd_vma relocation;
12982 bfd_reloc_status_type r;
12985 bfd_boolean unresolved_reloc = FALSE;
12986 char *error_message = NULL;
12988 r_symndx = ELF32_R_SYM (rel->r_info);
12989 r_type = ELF32_R_TYPE (rel->r_info);
12990 r_type = arm_real_reloc_type (globals, r_type);
12992 if ( r_type == R_ARM_GNU_VTENTRY
12993 || r_type == R_ARM_GNU_VTINHERIT)
12996 howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
12999 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13005 if (r_symndx < symtab_hdr->sh_info)
13007 sym = local_syms + r_symndx;
13008 sym_type = ELF32_ST_TYPE (sym->st_info);
13009 sec = local_sections[r_symndx];
13011 /* An object file might have a reference to a local
13012 undefined symbol. This is a daft object file, but we
13013 should at least do something about it. V4BX & NONE
13014 relocations do not use the symbol and are explicitly
13015 allowed to use the undefined symbol, so allow those.
13016 Likewise for relocations against STN_UNDEF. */
13017 if (r_type != R_ARM_V4BX
13018 && r_type != R_ARM_NONE
13019 && r_symndx != STN_UNDEF
13020 && bfd_is_und_section (sec)
13021 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13022 (*info->callbacks->undefined_symbol)
13023 (info, bfd_elf_string_from_elf_section
13024 (input_bfd, symtab_hdr->sh_link, sym->st_name),
13025 input_bfd, input_section,
13026 rel->r_offset, TRUE);
13028 if (globals->use_rel)
13030 relocation = (sec->output_section->vma
13031 + sec->output_offset
13033 if (!bfd_link_relocatable (info)
13034 && (sec->flags & SEC_MERGE)
13035 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13038 bfd_vma addend, value;
13042 case R_ARM_MOVW_ABS_NC:
13043 case R_ARM_MOVT_ABS:
13044 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13045 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13046 addend = (addend ^ 0x8000) - 0x8000;
13049 case R_ARM_THM_MOVW_ABS_NC:
13050 case R_ARM_THM_MOVT_ABS:
13051 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13053 value |= bfd_get_16 (input_bfd,
13054 contents + rel->r_offset + 2);
13055 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13056 | ((value & 0x04000000) >> 15);
13057 addend = (addend ^ 0x8000) - 0x8000;
13061 if (howto->rightshift
13062 || (howto->src_mask & (howto->src_mask + 1)))
13065 /* xgettext:c-format */
13066 (_("%pB(%pA+%#" PRIx64 "): "
13067 "%s relocation against SEC_MERGE section"),
13068 input_bfd, input_section,
13069 (uint64_t) rel->r_offset, howto->name);
13073 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13075 /* Get the (signed) value from the instruction. */
13076 addend = value & howto->src_mask;
13077 if (addend & ((howto->src_mask + 1) >> 1))
13079 bfd_signed_vma mask;
13082 mask &= ~ howto->src_mask;
13090 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13092 addend += msec->output_section->vma + msec->output_offset;
13094 /* Cases here must match those in the preceding
13095 switch statement. */
13098 case R_ARM_MOVW_ABS_NC:
13099 case R_ARM_MOVT_ABS:
13100 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13101 | (addend & 0xfff);
13102 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13105 case R_ARM_THM_MOVW_ABS_NC:
13106 case R_ARM_THM_MOVT_ABS:
13107 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13108 | (addend & 0xff) | ((addend & 0x0800) << 15);
13109 bfd_put_16 (input_bfd, value >> 16,
13110 contents + rel->r_offset);
13111 bfd_put_16 (input_bfd, value,
13112 contents + rel->r_offset + 2);
13116 value = (value & ~ howto->dst_mask)
13117 | (addend & howto->dst_mask);
13118 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13124 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13128 bfd_boolean warned, ignored;
13130 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13131 r_symndx, symtab_hdr, sym_hashes,
13132 h, sec, relocation,
13133 unresolved_reloc, warned, ignored);
13135 sym_type = h->type;
13138 if (sec != NULL && discarded_section (sec))
13139 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13140 rel, 1, relend, howto, 0, contents);
13142 if (bfd_link_relocatable (info))
13144 /* This is a relocatable link. We don't have to change
13145 anything, unless the reloc is against a section symbol,
13146 in which case we have to adjust according to where the
13147 section symbol winds up in the output section. */
13148 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13150 if (globals->use_rel)
13151 arm_add_to_rel (input_bfd, contents + rel->r_offset,
13152 howto, (bfd_signed_vma) sec->output_offset);
13154 rel->r_addend += sec->output_offset;
13160 name = h->root.root.string;
13163 name = (bfd_elf_string_from_elf_section
13164 (input_bfd, symtab_hdr->sh_link, sym->st_name));
13165 if (name == NULL || *name == '\0')
13166 name = bfd_section_name (input_bfd, sec);
13169 if (r_symndx != STN_UNDEF
13170 && r_type != R_ARM_NONE
13172 || h->root.type == bfd_link_hash_defined
13173 || h->root.type == bfd_link_hash_defweak)
13174 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13177 ((sym_type == STT_TLS
13178 /* xgettext:c-format */
13179 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13180 /* xgettext:c-format */
13181 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13184 (uint64_t) rel->r_offset,
13189 /* We call elf32_arm_final_link_relocate unless we're completely
13190 done, i.e., the relaxation produced the final output we want,
13191 and we won't let anybody mess with it. Also, we have to do
13192 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13193 both in relaxed and non-relaxed cases. */
13194 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13195 || (IS_ARM_TLS_GNU_RELOC (r_type)
13196 && !((h ? elf32_arm_hash_entry (h)->tls_type :
13197 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13200 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13201 contents, rel, h == NULL);
13202 /* This may have been marked unresolved because it came from
13203 a shared library. But we've just dealt with that. */
13204 unresolved_reloc = 0;
13207 r = bfd_reloc_continue;
13209 if (r == bfd_reloc_continue)
13211 unsigned char branch_type =
13212 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13213 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13215 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13216 input_section, contents, rel,
13217 relocation, info, sec, name,
13218 sym_type, branch_type, h,
13223 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13224 because such sections are not SEC_ALLOC and thus ld.so will
13225 not process them. */
13226 if (unresolved_reloc
13227 && !((input_section->flags & SEC_DEBUGGING) != 0
13229 && _bfd_elf_section_offset (output_bfd, info, input_section,
13230 rel->r_offset) != (bfd_vma) -1)
13233 /* xgettext:c-format */
13234 (_("%pB(%pA+%#" PRIx64 "): "
13235 "unresolvable %s relocation against symbol `%s'"),
13238 (uint64_t) rel->r_offset,
13240 h->root.root.string);
13244 if (r != bfd_reloc_ok)
13248 case bfd_reloc_overflow:
13249 /* If the overflowing reloc was to an undefined symbol,
13250 we have already printed one error message and there
13251 is no point complaining again. */
13252 if (!h || h->root.type != bfd_link_hash_undefined)
13253 (*info->callbacks->reloc_overflow)
13254 (info, (h ? &h->root : NULL), name, howto->name,
13255 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13258 case bfd_reloc_undefined:
13259 (*info->callbacks->undefined_symbol)
13260 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
13263 case bfd_reloc_outofrange:
13264 error_message = _("out of range");
13267 case bfd_reloc_notsupported:
13268 error_message = _("unsupported relocation");
13271 case bfd_reloc_dangerous:
13272 /* error_message should already be set. */
13276 error_message = _("unknown error");
13277 /* Fall through. */
13280 BFD_ASSERT (error_message != NULL);
13281 (*info->callbacks->reloc_dangerous)
13282 (info, error_message, input_bfd, input_section, rel->r_offset);
13291 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13292 adds the edit to the start of the list. (The list must be built in order of
13293 ascending TINDEX: the function's callers are primarily responsible for
13294 maintaining that condition). */
13297 add_unwind_table_edit (arm_unwind_table_edit **head,
13298 arm_unwind_table_edit **tail,
13299 arm_unwind_edit_type type,
13300 asection *linked_section,
13301 unsigned int tindex)
13303 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13304 xmalloc (sizeof (arm_unwind_table_edit));
13306 new_edit->type = type;
13307 new_edit->linked_section = linked_section;
13308 new_edit->index = tindex;
13312 new_edit->next = NULL;
13315 (*tail)->next = new_edit;
13317 (*tail) = new_edit;
13320 (*head) = new_edit;
13324 new_edit->next = *head;
13333 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13335 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13337 adjust_exidx_size(asection *exidx_sec, int adjust)
13341 if (!exidx_sec->rawsize)
13342 exidx_sec->rawsize = exidx_sec->size;
13344 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
13345 out_sec = exidx_sec->output_section;
13346 /* Adjust size of output section. */
13347 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
13350 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13352 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
13354 struct _arm_elf_section_data *exidx_arm_data;
13356 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13357 add_unwind_table_edit (
13358 &exidx_arm_data->u.exidx.unwind_edit_list,
13359 &exidx_arm_data->u.exidx.unwind_edit_tail,
13360 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13362 exidx_arm_data->additional_reloc_count++;
13364 adjust_exidx_size(exidx_sec, 8);
13367 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13368 made to those tables, such that:
13370 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13371 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13372 codes which have been inlined into the index).
13374 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13376 The edits are applied when the tables are written
13377 (in elf32_arm_write_section). */
13380 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13381 unsigned int num_text_sections,
13382 struct bfd_link_info *info,
13383 bfd_boolean merge_exidx_entries)
13386 unsigned int last_second_word = 0, i;
13387 asection *last_exidx_sec = NULL;
13388 asection *last_text_sec = NULL;
13389 int last_unwind_type = -1;
13391 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13393 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13397 for (sec = inp->sections; sec != NULL; sec = sec->next)
13399 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13400 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13402 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13405 if (elf_sec->linked_to)
13407 Elf_Internal_Shdr *linked_hdr
13408 = &elf_section_data (elf_sec->linked_to)->this_hdr;
13409 struct _arm_elf_section_data *linked_sec_arm_data
13410 = get_arm_elf_section_data (linked_hdr->bfd_section);
13412 if (linked_sec_arm_data == NULL)
13415 /* Link this .ARM.exidx section back from the text section it
13417 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13422 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13423 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13424 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13426 for (i = 0; i < num_text_sections; i++)
13428 asection *sec = text_section_order[i];
13429 asection *exidx_sec;
13430 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13431 struct _arm_elf_section_data *exidx_arm_data;
13432 bfd_byte *contents = NULL;
13433 int deleted_exidx_bytes = 0;
13435 arm_unwind_table_edit *unwind_edit_head = NULL;
13436 arm_unwind_table_edit *unwind_edit_tail = NULL;
13437 Elf_Internal_Shdr *hdr;
13440 if (arm_data == NULL)
13443 exidx_sec = arm_data->u.text.arm_exidx_sec;
13444 if (exidx_sec == NULL)
13446 /* Section has no unwind data. */
13447 if (last_unwind_type == 0 || !last_exidx_sec)
13450 /* Ignore zero sized sections. */
13451 if (sec->size == 0)
13454 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13455 last_unwind_type = 0;
13459 /* Skip /DISCARD/ sections. */
13460 if (bfd_is_abs_section (exidx_sec->output_section))
13463 hdr = &elf_section_data (exidx_sec)->this_hdr;
13464 if (hdr->sh_type != SHT_ARM_EXIDX)
13467 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13468 if (exidx_arm_data == NULL)
13471 ibfd = exidx_sec->owner;
13473 if (hdr->contents != NULL)
13474 contents = hdr->contents;
13475 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13479 if (last_unwind_type > 0)
13481 unsigned int first_word = bfd_get_32 (ibfd, contents);
13482 /* Add cantunwind if first unwind item does not match section
13484 if (first_word != sec->vma)
13486 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13487 last_unwind_type = 0;
13491 for (j = 0; j < hdr->sh_size; j += 8)
13493 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13497 /* An EXIDX_CANTUNWIND entry. */
13498 if (second_word == 1)
13500 if (last_unwind_type == 0)
13504 /* Inlined unwinding data. Merge if equal to previous. */
13505 else if ((second_word & 0x80000000) != 0)
13507 if (merge_exidx_entries
13508 && last_second_word == second_word && last_unwind_type == 1)
13511 last_second_word = second_word;
13513 /* Normal table entry. In theory we could merge these too,
13514 but duplicate entries are likely to be much less common. */
13518 if (elide && !bfd_link_relocatable (info))
13520 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13521 DELETE_EXIDX_ENTRY, NULL, j / 8);
13523 deleted_exidx_bytes += 8;
13526 last_unwind_type = unwind_type;
13529 /* Free contents if we allocated it ourselves. */
13530 if (contents != hdr->contents)
13533 /* Record edits to be applied later (in elf32_arm_write_section). */
13534 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13535 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13537 if (deleted_exidx_bytes > 0)
13538 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
13540 last_exidx_sec = exidx_sec;
13541 last_text_sec = sec;
13544 /* Add terminating CANTUNWIND entry. */
13545 if (!bfd_link_relocatable (info) && last_exidx_sec
13546 && last_unwind_type != 0)
13547 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13553 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13554 bfd *ibfd, const char *name)
13556 asection *sec, *osec;
13558 sec = bfd_get_linker_section (ibfd, name);
13559 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13562 osec = sec->output_section;
13563 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13566 if (! bfd_set_section_contents (obfd, osec, sec->contents,
13567 sec->output_offset, sec->size))
13574 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13576 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13577 asection *sec, *osec;
13579 if (globals == NULL)
13582 /* Invoke the regular ELF backend linker to do all the work. */
13583 if (!bfd_elf_final_link (abfd, info))
13586 /* Process stub sections (eg BE8 encoding, ...). */
13587 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13589 for (i=0; i<htab->top_id; i++)
13591 sec = htab->stub_group[i].stub_sec;
13592 /* Only process it once, in its link_sec slot. */
13593 if (sec && i == htab->stub_group[i].link_sec->id)
13595 osec = sec->output_section;
13596 elf32_arm_write_section (abfd, info, sec, sec->contents);
13597 if (! bfd_set_section_contents (abfd, osec, sec->contents,
13598 sec->output_offset, sec->size))
13603 /* Write out any glue sections now that we have created all the
13605 if (globals->bfd_of_glue_owner != NULL)
13607 if (! elf32_arm_output_glue_section (info, abfd,
13608 globals->bfd_of_glue_owner,
13609 ARM2THUMB_GLUE_SECTION_NAME))
13612 if (! elf32_arm_output_glue_section (info, abfd,
13613 globals->bfd_of_glue_owner,
13614 THUMB2ARM_GLUE_SECTION_NAME))
13617 if (! elf32_arm_output_glue_section (info, abfd,
13618 globals->bfd_of_glue_owner,
13619 VFP11_ERRATUM_VENEER_SECTION_NAME))
13622 if (! elf32_arm_output_glue_section (info, abfd,
13623 globals->bfd_of_glue_owner,
13624 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13627 if (! elf32_arm_output_glue_section (info, abfd,
13628 globals->bfd_of_glue_owner,
13629 ARM_BX_GLUE_SECTION_NAME))
13636 /* Return a best guess for the machine number based on the attributes. */
13638 static unsigned int
13639 bfd_arm_get_mach_from_attributes (bfd * abfd)
13641 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13645 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13646 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13647 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13649 case TAG_CPU_ARCH_V5TE:
13653 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13654 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13658 if (strcmp (name, "IWMMXT2") == 0)
13659 return bfd_mach_arm_iWMMXt2;
13661 if (strcmp (name, "IWMMXT") == 0)
13662 return bfd_mach_arm_iWMMXt;
13664 if (strcmp (name, "XSCALE") == 0)
13668 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13669 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13672 case 1: return bfd_mach_arm_iWMMXt;
13673 case 2: return bfd_mach_arm_iWMMXt2;
13674 default: return bfd_mach_arm_XScale;
13679 return bfd_mach_arm_5TE;
13683 return bfd_mach_arm_unknown;
13687 /* Set the right machine number. */
13690 elf32_arm_object_p (bfd *abfd)
13694 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13696 if (mach == bfd_mach_arm_unknown)
13698 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13699 mach = bfd_mach_arm_ep9312;
13701 mach = bfd_arm_get_mach_from_attributes (abfd);
13704 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13708 /* Function to keep ARM specific flags in the ELF header. */
13711 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13713 if (elf_flags_init (abfd)
13714 && elf_elfheader (abfd)->e_flags != flags)
13716 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13718 if (flags & EF_ARM_INTERWORK)
13720 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13724 (_("warning: clearing the interworking flag of %pB due to outside request"),
13730 elf_elfheader (abfd)->e_flags = flags;
13731 elf_flags_init (abfd) = TRUE;
13737 /* Copy backend specific data from one object module to another. */
13740 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13743 flagword out_flags;
13745 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13748 in_flags = elf_elfheader (ibfd)->e_flags;
13749 out_flags = elf_elfheader (obfd)->e_flags;
13751 if (elf_flags_init (obfd)
13752 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
13753 && in_flags != out_flags)
13755 /* Cannot mix APCS26 and APCS32 code. */
13756 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13759 /* Cannot mix float APCS and non-float APCS code. */
13760 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13763 /* If the src and dest have different interworking flags
13764 then turn off the interworking bit. */
13765 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
13767 if (out_flags & EF_ARM_INTERWORK)
13769 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13772 in_flags &= ~EF_ARM_INTERWORK;
13775 /* Likewise for PIC, though don't warn for this case. */
13776 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
13777 in_flags &= ~EF_ARM_PIC;
13780 elf_elfheader (obfd)->e_flags = in_flags;
13781 elf_flags_init (obfd) = TRUE;
13783 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
13786 /* Values for Tag_ABI_PCS_R9_use. */
13795 /* Values for Tag_ABI_PCS_RW_data. */
13798 AEABI_PCS_RW_data_absolute,
13799 AEABI_PCS_RW_data_PCrel,
13800 AEABI_PCS_RW_data_SBrel,
13801 AEABI_PCS_RW_data_unused
13804 /* Values for Tag_ABI_enum_size. */
13810 AEABI_enum_forced_wide
13813 /* Determine whether an object attribute tag takes an integer, a
13817 elf32_arm_obj_attrs_arg_type (int tag)
13819 if (tag == Tag_compatibility)
13820 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
13821 else if (tag == Tag_nodefaults)
13822 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
13823 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
13824 return ATTR_TYPE_FLAG_STR_VAL;
13826 return ATTR_TYPE_FLAG_INT_VAL;
13828 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
13831 /* The ABI defines that Tag_conformance should be emitted first, and that
13832 Tag_nodefaults should be second (if either is defined). This sets those
13833 two positions, and bumps up the position of all the remaining tags to
13836 elf32_arm_obj_attrs_order (int num)
13838 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
13839 return Tag_conformance;
13840 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
13841 return Tag_nodefaults;
13842 if ((num - 2) < Tag_nodefaults)
13844 if ((num - 1) < Tag_conformance)
13849 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
13851 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
13853 if ((tag & 127) < 64)
13856 (_("%pB: unknown mandatory EABI object attribute %d"),
13858 bfd_set_error (bfd_error_bad_value);
13864 (_("warning: %pB: unknown EABI object attribute %d"),
13870 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
13871 Returns -1 if no architecture could be read. */
13874 get_secondary_compatible_arch (bfd *abfd)
13876 obj_attribute *attr =
13877 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
13879 /* Note: the tag and its argument below are uleb128 values, though
13880 currently-defined values fit in one byte for each. */
13882 && attr->s[0] == Tag_CPU_arch
13883 && (attr->s[1] & 128) != 128
13884 && attr->s[2] == 0)
13887 /* This tag is "safely ignorable", so don't complain if it looks funny. */
13891 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
13892 The tag is removed if ARCH is -1. */
13895 set_secondary_compatible_arch (bfd *abfd, int arch)
13897 obj_attribute *attr =
13898 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
13906 /* Note: the tag and its argument below are uleb128 values, though
13907 currently-defined values fit in one byte for each. */
13909 attr->s = (char *) bfd_alloc (abfd, 3);
13910 attr->s[0] = Tag_CPU_arch;
13915 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
13919 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
13920 int newtag, int secondary_compat)
13922 #define T(X) TAG_CPU_ARCH_##X
13923 int tagl, tagh, result;
13926 T(V6T2), /* PRE_V4. */
13928 T(V6T2), /* V4T. */
13929 T(V6T2), /* V5T. */
13930 T(V6T2), /* V5TE. */
13931 T(V6T2), /* V5TEJ. */
13934 T(V6T2) /* V6T2. */
13938 T(V6K), /* PRE_V4. */
13942 T(V6K), /* V5TE. */
13943 T(V6K), /* V5TEJ. */
13945 T(V6KZ), /* V6KZ. */
13951 T(V7), /* PRE_V4. */
13956 T(V7), /* V5TEJ. */
13969 T(V6K), /* V5TE. */
13970 T(V6K), /* V5TEJ. */
13972 T(V6KZ), /* V6KZ. */
13976 T(V6_M) /* V6_M. */
13978 const int v6s_m[] =
13984 T(V6K), /* V5TE. */
13985 T(V6K), /* V5TEJ. */
13987 T(V6KZ), /* V6KZ. */
13991 T(V6S_M), /* V6_M. */
13992 T(V6S_M) /* V6S_M. */
13994 const int v7e_m[] =
13998 T(V7E_M), /* V4T. */
13999 T(V7E_M), /* V5T. */
14000 T(V7E_M), /* V5TE. */
14001 T(V7E_M), /* V5TEJ. */
14002 T(V7E_M), /* V6. */
14003 T(V7E_M), /* V6KZ. */
14004 T(V7E_M), /* V6T2. */
14005 T(V7E_M), /* V6K. */
14006 T(V7E_M), /* V7. */
14007 T(V7E_M), /* V6_M. */
14008 T(V7E_M), /* V6S_M. */
14009 T(V7E_M) /* V7E_M. */
14013 T(V8), /* PRE_V4. */
14018 T(V8), /* V5TEJ. */
14025 T(V8), /* V6S_M. */
14026 T(V8), /* V7E_M. */
14031 T(V8R), /* PRE_V4. */
14035 T(V8R), /* V5TE. */
14036 T(V8R), /* V5TEJ. */
14038 T(V8R), /* V6KZ. */
14039 T(V8R), /* V6T2. */
14042 T(V8R), /* V6_M. */
14043 T(V8R), /* V6S_M. */
14044 T(V8R), /* V7E_M. */
14048 const int v8m_baseline[] =
14061 T(V8M_BASE), /* V6_M. */
14062 T(V8M_BASE), /* V6S_M. */
14066 T(V8M_BASE) /* V8-M BASELINE. */
14068 const int v8m_mainline[] =
14080 T(V8M_MAIN), /* V7. */
14081 T(V8M_MAIN), /* V6_M. */
14082 T(V8M_MAIN), /* V6S_M. */
14083 T(V8M_MAIN), /* V7E_M. */
14086 T(V8M_MAIN), /* V8-M BASELINE. */
14087 T(V8M_MAIN) /* V8-M MAINLINE. */
14089 const int v4t_plus_v6_m[] =
14095 T(V5TE), /* V5TE. */
14096 T(V5TEJ), /* V5TEJ. */
14098 T(V6KZ), /* V6KZ. */
14099 T(V6T2), /* V6T2. */
14102 T(V6_M), /* V6_M. */
14103 T(V6S_M), /* V6S_M. */
14104 T(V7E_M), /* V7E_M. */
14107 T(V8M_BASE), /* V8-M BASELINE. */
14108 T(V8M_MAIN), /* V8-M MAINLINE. */
14109 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
14111 const int *comb[] =
14123 /* Pseudo-architecture. */
14127 /* Check we've not got a higher architecture than we know about. */
14129 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14131 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14135 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14137 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14138 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14139 oldtag = T(V4T_PLUS_V6_M);
14141 /* And override the new tag if we have a Tag_also_compatible_with on the
14144 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14145 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14146 newtag = T(V4T_PLUS_V6_M);
14148 tagl = (oldtag < newtag) ? oldtag : newtag;
14149 result = tagh = (oldtag > newtag) ? oldtag : newtag;
14151 /* Architectures before V6KZ add features monotonically. */
14152 if (tagh <= TAG_CPU_ARCH_V6KZ)
14155 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14157 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14158 as the canonical version. */
14159 if (result == T(V4T_PLUS_V6_M))
14162 *secondary_compat_out = T(V6_M);
14165 *secondary_compat_out = -1;
14169 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14170 ibfd, oldtag, newtag);
14178 /* Query attributes object to see if integer divide instructions may be
14179 present in an object. */
14181 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14183 int arch = attr[Tag_CPU_arch].i;
14184 int profile = attr[Tag_CPU_arch_profile].i;
14186 switch (attr[Tag_DIV_use].i)
14189 /* Integer divide allowed if instruction contained in archetecture. */
14190 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14192 else if (arch >= TAG_CPU_ARCH_V7E_M)
14198 /* Integer divide explicitly prohibited. */
14202 /* Unrecognised case - treat as allowing divide everywhere. */
14204 /* Integer divide allowed in ARM state. */
14209 /* Query attributes object to see if integer divide instructions are
14210 forbidden to be in the object. This is not the inverse of
14211 elf32_arm_attributes_accept_div. */
14213 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14215 return attr[Tag_DIV_use].i == 1;
14218 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14219 are conflicting attributes. */
14222 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14224 bfd *obfd = info->output_bfd;
14225 obj_attribute *in_attr;
14226 obj_attribute *out_attr;
14227 /* Some tags have 0 = don't care, 1 = strong requirement,
14228 2 = weak requirement. */
14229 static const int order_021[3] = {0, 2, 1};
14231 bfd_boolean result = TRUE;
14232 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14234 /* Skip the linker stubs file. This preserves previous behavior
14235 of accepting unknown attributes in the first input file - but
14237 if (ibfd->flags & BFD_LINKER_CREATED)
14240 /* Skip any input that hasn't attribute section.
14241 This enables to link object files without attribute section with
14243 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14246 if (!elf_known_obj_attributes_proc (obfd)[0].i)
14248 /* This is the first object. Copy the attributes. */
14249 _bfd_elf_copy_obj_attributes (ibfd, obfd);
14251 out_attr = elf_known_obj_attributes_proc (obfd);
14253 /* Use the Tag_null value to indicate the attributes have been
14257 /* We do not output objects with Tag_MPextension_use_legacy - we move
14258 the attribute's value to Tag_MPextension_use. */
14259 if (out_attr[Tag_MPextension_use_legacy].i != 0)
14261 if (out_attr[Tag_MPextension_use].i != 0
14262 && out_attr[Tag_MPextension_use_legacy].i
14263 != out_attr[Tag_MPextension_use].i)
14266 (_("Error: %pB has both the current and legacy "
14267 "Tag_MPextension_use attributes"), ibfd);
14271 out_attr[Tag_MPextension_use] =
14272 out_attr[Tag_MPextension_use_legacy];
14273 out_attr[Tag_MPextension_use_legacy].type = 0;
14274 out_attr[Tag_MPextension_use_legacy].i = 0;
14280 in_attr = elf_known_obj_attributes_proc (ibfd);
14281 out_attr = elf_known_obj_attributes_proc (obfd);
14282 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14283 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14285 /* Ignore mismatches if the object doesn't use floating point or is
14286 floating point ABI independent. */
14287 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14288 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14289 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14290 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14291 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14292 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14295 (_("error: %pB uses VFP register arguments, %pB does not"),
14296 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14297 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14302 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14304 /* Merge this attribute with existing attributes. */
14307 case Tag_CPU_raw_name:
14309 /* These are merged after Tag_CPU_arch. */
14312 case Tag_ABI_optimization_goals:
14313 case Tag_ABI_FP_optimization_goals:
14314 /* Use the first value seen. */
14319 int secondary_compat = -1, secondary_compat_out = -1;
14320 unsigned int saved_out_attr = out_attr[i].i;
14322 static const char *name_table[] =
14324 /* These aren't real CPU names, but we can't guess
14325 that from the architecture version alone. */
14341 "ARM v8-M.baseline",
14342 "ARM v8-M.mainline",
14345 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14346 secondary_compat = get_secondary_compatible_arch (ibfd);
14347 secondary_compat_out = get_secondary_compatible_arch (obfd);
14348 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14349 &secondary_compat_out,
14353 /* Return with error if failed to merge. */
14354 if (arch_attr == -1)
14357 out_attr[i].i = arch_attr;
14359 set_secondary_compatible_arch (obfd, secondary_compat_out);
14361 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14362 if (out_attr[i].i == saved_out_attr)
14363 ; /* Leave the names alone. */
14364 else if (out_attr[i].i == in_attr[i].i)
14366 /* The output architecture has been changed to match the
14367 input architecture. Use the input names. */
14368 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14369 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14371 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14372 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14377 out_attr[Tag_CPU_name].s = NULL;
14378 out_attr[Tag_CPU_raw_name].s = NULL;
14381 /* If we still don't have a value for Tag_CPU_name,
14382 make one up now. Tag_CPU_raw_name remains blank. */
14383 if (out_attr[Tag_CPU_name].s == NULL
14384 && out_attr[i].i < ARRAY_SIZE (name_table))
14385 out_attr[Tag_CPU_name].s =
14386 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14390 case Tag_ARM_ISA_use:
14391 case Tag_THUMB_ISA_use:
14392 case Tag_WMMX_arch:
14393 case Tag_Advanced_SIMD_arch:
14394 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14395 case Tag_ABI_FP_rounding:
14396 case Tag_ABI_FP_exceptions:
14397 case Tag_ABI_FP_user_exceptions:
14398 case Tag_ABI_FP_number_model:
14399 case Tag_FP_HP_extension:
14400 case Tag_CPU_unaligned_access:
14402 case Tag_MPextension_use:
14403 /* Use the largest value specified. */
14404 if (in_attr[i].i > out_attr[i].i)
14405 out_attr[i].i = in_attr[i].i;
14408 case Tag_ABI_align_preserved:
14409 case Tag_ABI_PCS_RO_data:
14410 /* Use the smallest value specified. */
14411 if (in_attr[i].i < out_attr[i].i)
14412 out_attr[i].i = in_attr[i].i;
14415 case Tag_ABI_align_needed:
14416 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14417 && (in_attr[Tag_ABI_align_preserved].i == 0
14418 || out_attr[Tag_ABI_align_preserved].i == 0))
14420 /* This error message should be enabled once all non-conformant
14421 binaries in the toolchain have had the attributes set
14424 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14428 /* Fall through. */
14429 case Tag_ABI_FP_denormal:
14430 case Tag_ABI_PCS_GOT_use:
14431 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14432 value if greater than 2 (for future-proofing). */
14433 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14434 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14435 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14436 out_attr[i].i = in_attr[i].i;
14439 case Tag_Virtualization_use:
14440 /* The virtualization tag effectively stores two bits of
14441 information: the intended use of TrustZone (in bit 0), and the
14442 intended use of Virtualization (in bit 1). */
14443 if (out_attr[i].i == 0)
14444 out_attr[i].i = in_attr[i].i;
14445 else if (in_attr[i].i != 0
14446 && in_attr[i].i != out_attr[i].i)
14448 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14453 (_("error: %pB: unable to merge virtualization attributes "
14461 case Tag_CPU_arch_profile:
14462 if (out_attr[i].i != in_attr[i].i)
14464 /* 0 will merge with anything.
14465 'A' and 'S' merge to 'A'.
14466 'R' and 'S' merge to 'R'.
14467 'M' and 'A|R|S' is an error. */
14468 if (out_attr[i].i == 0
14469 || (out_attr[i].i == 'S'
14470 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14471 out_attr[i].i = in_attr[i].i;
14472 else if (in_attr[i].i == 0
14473 || (in_attr[i].i == 'S'
14474 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14475 ; /* Do nothing. */
14479 (_("error: %pB: conflicting architecture profiles %c/%c"),
14481 in_attr[i].i ? in_attr[i].i : '0',
14482 out_attr[i].i ? out_attr[i].i : '0');
14488 case Tag_DSP_extension:
14489 /* No need to change output value if any of:
14490 - pre (<=) ARMv5T input architecture (do not have DSP)
14491 - M input profile not ARMv7E-M and do not have DSP. */
14492 if (in_attr[Tag_CPU_arch].i <= 3
14493 || (in_attr[Tag_CPU_arch_profile].i == 'M'
14494 && in_attr[Tag_CPU_arch].i != 13
14495 && in_attr[i].i == 0))
14496 ; /* Do nothing. */
14497 /* Output value should be 0 if DSP part of architecture, ie.
14498 - post (>=) ARMv5te architecture output
14499 - A, R or S profile output or ARMv7E-M output architecture. */
14500 else if (out_attr[Tag_CPU_arch].i >= 4
14501 && (out_attr[Tag_CPU_arch_profile].i == 'A'
14502 || out_attr[Tag_CPU_arch_profile].i == 'R'
14503 || out_attr[Tag_CPU_arch_profile].i == 'S'
14504 || out_attr[Tag_CPU_arch].i == 13))
14506 /* Otherwise, DSP instructions are added and not part of output
14514 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14515 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14516 when it's 0. It might mean absence of FP hardware if
14517 Tag_FP_arch is zero. */
14519 #define VFP_VERSION_COUNT 9
14520 static const struct
14524 } vfp_versions[VFP_VERSION_COUNT] =
14540 /* If the output has no requirement about FP hardware,
14541 follow the requirement of the input. */
14542 if (out_attr[i].i == 0)
14544 /* This assert is still reasonable, we shouldn't
14545 produce the suspicious build attribute
14546 combination (See below for in_attr). */
14547 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14548 out_attr[i].i = in_attr[i].i;
14549 out_attr[Tag_ABI_HardFP_use].i
14550 = in_attr[Tag_ABI_HardFP_use].i;
14553 /* If the input has no requirement about FP hardware, do
14555 else if (in_attr[i].i == 0)
14557 /* We used to assert that Tag_ABI_HardFP_use was
14558 zero here, but we should never assert when
14559 consuming an object file that has suspicious
14560 build attributes. The single precision variant
14561 of 'no FP architecture' is still 'no FP
14562 architecture', so we just ignore the tag in this
14567 /* Both the input and the output have nonzero Tag_FP_arch.
14568 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14570 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14572 if (in_attr[Tag_ABI_HardFP_use].i == 0
14573 && out_attr[Tag_ABI_HardFP_use].i == 0)
14575 /* If the input and the output have different Tag_ABI_HardFP_use,
14576 the combination of them is 0 (implied by Tag_FP_arch). */
14577 else if (in_attr[Tag_ABI_HardFP_use].i
14578 != out_attr[Tag_ABI_HardFP_use].i)
14579 out_attr[Tag_ABI_HardFP_use].i = 0;
14581 /* Now we can handle Tag_FP_arch. */
14583 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14584 pick the biggest. */
14585 if (in_attr[i].i >= VFP_VERSION_COUNT
14586 && in_attr[i].i > out_attr[i].i)
14588 out_attr[i] = in_attr[i];
14591 /* The output uses the superset of input features
14592 (ISA version) and registers. */
14593 ver = vfp_versions[in_attr[i].i].ver;
14594 if (ver < vfp_versions[out_attr[i].i].ver)
14595 ver = vfp_versions[out_attr[i].i].ver;
14596 regs = vfp_versions[in_attr[i].i].regs;
14597 if (regs < vfp_versions[out_attr[i].i].regs)
14598 regs = vfp_versions[out_attr[i].i].regs;
14599 /* This assumes all possible supersets are also a valid
14601 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14603 if (regs == vfp_versions[newval].regs
14604 && ver == vfp_versions[newval].ver)
14607 out_attr[i].i = newval;
14610 case Tag_PCS_config:
14611 if (out_attr[i].i == 0)
14612 out_attr[i].i = in_attr[i].i;
14613 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14615 /* It's sometimes ok to mix different configs, so this is only
14618 (_("warning: %pB: conflicting platform configuration"), ibfd);
14621 case Tag_ABI_PCS_R9_use:
14622 if (in_attr[i].i != out_attr[i].i
14623 && out_attr[i].i != AEABI_R9_unused
14624 && in_attr[i].i != AEABI_R9_unused)
14627 (_("error: %pB: conflicting use of R9"), ibfd);
14630 if (out_attr[i].i == AEABI_R9_unused)
14631 out_attr[i].i = in_attr[i].i;
14633 case Tag_ABI_PCS_RW_data:
14634 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14635 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14636 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14639 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14643 /* Use the smallest value specified. */
14644 if (in_attr[i].i < out_attr[i].i)
14645 out_attr[i].i = in_attr[i].i;
14647 case Tag_ABI_PCS_wchar_t:
14648 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14649 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14652 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14653 ibfd, in_attr[i].i, out_attr[i].i);
14655 else if (in_attr[i].i && !out_attr[i].i)
14656 out_attr[i].i = in_attr[i].i;
14658 case Tag_ABI_enum_size:
14659 if (in_attr[i].i != AEABI_enum_unused)
14661 if (out_attr[i].i == AEABI_enum_unused
14662 || out_attr[i].i == AEABI_enum_forced_wide)
14664 /* The existing object is compatible with anything.
14665 Use whatever requirements the new object has. */
14666 out_attr[i].i = in_attr[i].i;
14668 else if (in_attr[i].i != AEABI_enum_forced_wide
14669 && out_attr[i].i != in_attr[i].i
14670 && !elf_arm_tdata (obfd)->no_enum_size_warning)
14672 static const char *aeabi_enum_names[] =
14673 { "", "variable-size", "32-bit", "" };
14674 const char *in_name =
14675 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14676 ? aeabi_enum_names[in_attr[i].i]
14678 const char *out_name =
14679 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14680 ? aeabi_enum_names[out_attr[i].i]
14683 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14684 ibfd, in_name, out_name);
14688 case Tag_ABI_VFP_args:
14691 case Tag_ABI_WMMX_args:
14692 if (in_attr[i].i != out_attr[i].i)
14695 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14700 case Tag_compatibility:
14701 /* Merged in target-independent code. */
14703 case Tag_ABI_HardFP_use:
14704 /* This is handled along with Tag_FP_arch. */
14706 case Tag_ABI_FP_16bit_format:
14707 if (in_attr[i].i != 0 && out_attr[i].i != 0)
14709 if (in_attr[i].i != out_attr[i].i)
14712 (_("error: fp16 format mismatch between %pB and %pB"),
14717 if (in_attr[i].i != 0)
14718 out_attr[i].i = in_attr[i].i;
14722 /* A value of zero on input means that the divide instruction may
14723 be used if available in the base architecture as specified via
14724 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
14725 the user did not want divide instructions. A value of 2
14726 explicitly means that divide instructions were allowed in ARM
14727 and Thumb state. */
14728 if (in_attr[i].i == out_attr[i].i)
14729 /* Do nothing. */ ;
14730 else if (elf32_arm_attributes_forbid_div (in_attr)
14731 && !elf32_arm_attributes_accept_div (out_attr))
14733 else if (elf32_arm_attributes_forbid_div (out_attr)
14734 && elf32_arm_attributes_accept_div (in_attr))
14735 out_attr[i].i = in_attr[i].i;
14736 else if (in_attr[i].i == 2)
14737 out_attr[i].i = in_attr[i].i;
14740 case Tag_MPextension_use_legacy:
14741 /* We don't output objects with Tag_MPextension_use_legacy - we
14742 move the value to Tag_MPextension_use. */
14743 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
14745 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
14748 (_("%pB has both the current and legacy "
14749 "Tag_MPextension_use attributes"),
14755 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
14756 out_attr[Tag_MPextension_use] = in_attr[i];
14760 case Tag_nodefaults:
14761 /* This tag is set if it exists, but the value is unused (and is
14762 typically zero). We don't actually need to do anything here -
14763 the merge happens automatically when the type flags are merged
14766 case Tag_also_compatible_with:
14767 /* Already done in Tag_CPU_arch. */
14769 case Tag_conformance:
14770 /* Keep the attribute if it matches. Throw it away otherwise.
14771 No attribute means no claim to conform. */
14772 if (!in_attr[i].s || !out_attr[i].s
14773 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
14774 out_attr[i].s = NULL;
14779 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
14782 /* If out_attr was copied from in_attr then it won't have a type yet. */
14783 if (in_attr[i].type && !out_attr[i].type)
14784 out_attr[i].type = in_attr[i].type;
14787 /* Merge Tag_compatibility attributes and any common GNU ones. */
14788 if (!_bfd_elf_merge_object_attributes (ibfd, info))
14791 /* Check for any attributes not known on ARM. */
14792 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
14798 /* Return TRUE if the two EABI versions are incompatible. */
14801 elf32_arm_versions_compatible (unsigned iver, unsigned over)
14803 /* v4 and v5 are the same spec before and after it was released,
14804 so allow mixing them. */
14805 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
14806 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
14809 return (iver == over);
14812 /* Merge backend specific data from an object file to the output
14813 object file when linking. */
14816 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
14818 /* Display the flags field. */
14821 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
14823 FILE * file = (FILE *) ptr;
14824 unsigned long flags;
14826 BFD_ASSERT (abfd != NULL && ptr != NULL);
14828 /* Print normal ELF private data. */
14829 _bfd_elf_print_private_bfd_data (abfd, ptr);
14831 flags = elf_elfheader (abfd)->e_flags;
14832 /* Ignore init flag - it may not be set, despite the flags field
14833 containing valid data. */
14835 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
14837 switch (EF_ARM_EABI_VERSION (flags))
14839 case EF_ARM_EABI_UNKNOWN:
14840 /* The following flag bits are GNU extensions and not part of the
14841 official ARM ELF extended ABI. Hence they are only decoded if
14842 the EABI version is not set. */
14843 if (flags & EF_ARM_INTERWORK)
14844 fprintf (file, _(" [interworking enabled]"));
14846 if (flags & EF_ARM_APCS_26)
14847 fprintf (file, " [APCS-26]");
14849 fprintf (file, " [APCS-32]");
14851 if (flags & EF_ARM_VFP_FLOAT)
14852 fprintf (file, _(" [VFP float format]"));
14853 else if (flags & EF_ARM_MAVERICK_FLOAT)
14854 fprintf (file, _(" [Maverick float format]"));
14856 fprintf (file, _(" [FPA float format]"));
14858 if (flags & EF_ARM_APCS_FLOAT)
14859 fprintf (file, _(" [floats passed in float registers]"));
14861 if (flags & EF_ARM_PIC)
14862 fprintf (file, _(" [position independent]"));
14864 if (flags & EF_ARM_NEW_ABI)
14865 fprintf (file, _(" [new ABI]"));
14867 if (flags & EF_ARM_OLD_ABI)
14868 fprintf (file, _(" [old ABI]"));
14870 if (flags & EF_ARM_SOFT_FLOAT)
14871 fprintf (file, _(" [software FP]"));
14873 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
14874 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
14875 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
14876 | EF_ARM_MAVERICK_FLOAT);
14879 case EF_ARM_EABI_VER1:
14880 fprintf (file, _(" [Version1 EABI]"));
14882 if (flags & EF_ARM_SYMSARESORTED)
14883 fprintf (file, _(" [sorted symbol table]"));
14885 fprintf (file, _(" [unsorted symbol table]"));
14887 flags &= ~ EF_ARM_SYMSARESORTED;
14890 case EF_ARM_EABI_VER2:
14891 fprintf (file, _(" [Version2 EABI]"));
14893 if (flags & EF_ARM_SYMSARESORTED)
14894 fprintf (file, _(" [sorted symbol table]"));
14896 fprintf (file, _(" [unsorted symbol table]"));
14898 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
14899 fprintf (file, _(" [dynamic symbols use segment index]"));
14901 if (flags & EF_ARM_MAPSYMSFIRST)
14902 fprintf (file, _(" [mapping symbols precede others]"));
14904 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
14905 | EF_ARM_MAPSYMSFIRST);
14908 case EF_ARM_EABI_VER3:
14909 fprintf (file, _(" [Version3 EABI]"));
14912 case EF_ARM_EABI_VER4:
14913 fprintf (file, _(" [Version4 EABI]"));
14916 case EF_ARM_EABI_VER5:
14917 fprintf (file, _(" [Version5 EABI]"));
14919 if (flags & EF_ARM_ABI_FLOAT_SOFT)
14920 fprintf (file, _(" [soft-float ABI]"));
14922 if (flags & EF_ARM_ABI_FLOAT_HARD)
14923 fprintf (file, _(" [hard-float ABI]"));
14925 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
14928 if (flags & EF_ARM_BE8)
14929 fprintf (file, _(" [BE8]"));
14931 if (flags & EF_ARM_LE8)
14932 fprintf (file, _(" [LE8]"));
14934 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
14938 fprintf (file, _(" <EABI version unrecognised>"));
14942 flags &= ~ EF_ARM_EABIMASK;
14944 if (flags & EF_ARM_RELEXEC)
14945 fprintf (file, _(" [relocatable executable]"));
14947 if (flags & EF_ARM_PIC)
14948 fprintf (file, _(" [position independent]"));
14950 if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
14951 fprintf (file, _(" [FDPIC ABI supplement]"));
14953 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
14956 fprintf (file, _("<Unrecognised flag bits set>"));
14958 fputc ('\n', file);
14964 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
14966 switch (ELF_ST_TYPE (elf_sym->st_info))
14968 case STT_ARM_TFUNC:
14969 return ELF_ST_TYPE (elf_sym->st_info);
14971 case STT_ARM_16BIT:
14972 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
14973 This allows us to distinguish between data used by Thumb instructions
14974 and non-data (which is probably code) inside Thumb regions of an
14976 if (type != STT_OBJECT && type != STT_TLS)
14977 return ELF_ST_TYPE (elf_sym->st_info);
14988 elf32_arm_gc_mark_hook (asection *sec,
14989 struct bfd_link_info *info,
14990 Elf_Internal_Rela *rel,
14991 struct elf_link_hash_entry *h,
14992 Elf_Internal_Sym *sym)
14995 switch (ELF32_R_TYPE (rel->r_info))
14997 case R_ARM_GNU_VTINHERIT:
14998 case R_ARM_GNU_VTENTRY:
15002 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15005 /* Look through the relocs for a section during the first phase. */
15008 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15009 asection *sec, const Elf_Internal_Rela *relocs)
15011 Elf_Internal_Shdr *symtab_hdr;
15012 struct elf_link_hash_entry **sym_hashes;
15013 const Elf_Internal_Rela *rel;
15014 const Elf_Internal_Rela *rel_end;
15017 struct elf32_arm_link_hash_table *htab;
15018 bfd_boolean call_reloc_p;
15019 bfd_boolean may_become_dynamic_p;
15020 bfd_boolean may_need_local_target_p;
15021 unsigned long nsyms;
15023 if (bfd_link_relocatable (info))
15026 BFD_ASSERT (is_arm_elf (abfd));
15028 htab = elf32_arm_hash_table (info);
15034 /* Create dynamic sections for relocatable executables so that we can
15035 copy relocations. */
15036 if (htab->root.is_relocatable_executable
15037 && ! htab->root.dynamic_sections_created)
15039 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
15043 if (htab->root.dynobj == NULL)
15044 htab->root.dynobj = abfd;
15045 if (!create_ifunc_sections (info))
15048 dynobj = htab->root.dynobj;
15050 symtab_hdr = & elf_symtab_hdr (abfd);
15051 sym_hashes = elf_sym_hashes (abfd);
15052 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15054 rel_end = relocs + sec->reloc_count;
15055 for (rel = relocs; rel < rel_end; rel++)
15057 Elf_Internal_Sym *isym;
15058 struct elf_link_hash_entry *h;
15059 struct elf32_arm_link_hash_entry *eh;
15060 unsigned int r_symndx;
15063 r_symndx = ELF32_R_SYM (rel->r_info);
15064 r_type = ELF32_R_TYPE (rel->r_info);
15065 r_type = arm_real_reloc_type (htab, r_type);
15067 if (r_symndx >= nsyms
15068 /* PR 9934: It is possible to have relocations that do not
15069 refer to symbols, thus it is also possible to have an
15070 object file containing relocations but no symbol table. */
15071 && (r_symndx > STN_UNDEF || nsyms > 0))
15073 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15082 if (r_symndx < symtab_hdr->sh_info)
15084 /* A local symbol. */
15085 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
15092 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15093 while (h->root.type == bfd_link_hash_indirect
15094 || h->root.type == bfd_link_hash_warning)
15095 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15099 eh = (struct elf32_arm_link_hash_entry *) h;
15101 call_reloc_p = FALSE;
15102 may_become_dynamic_p = FALSE;
15103 may_need_local_target_p = FALSE;
15105 /* Could be done earlier, if h were already available. */
15106 r_type = elf32_arm_tls_transition (info, r_type, h);
15109 case R_ARM_GOTOFFFUNCDESC:
15113 if (!elf32_arm_allocate_local_sym_info (abfd))
15115 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].gotofffuncdesc_cnt += 1;
15116 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15120 eh->fdpic_cnts.gotofffuncdesc_cnt++;
15125 case R_ARM_GOTFUNCDESC:
15129 /* Such a relocation is not supposed to be generated
15130 by gcc on a static function. */
15131 /* Anyway if needed it could be handled. */
15136 eh->fdpic_cnts.gotfuncdesc_cnt++;
15141 case R_ARM_FUNCDESC:
15145 if (!elf32_arm_allocate_local_sym_info (abfd))
15147 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_cnt += 1;
15148 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15152 eh->fdpic_cnts.funcdesc_cnt++;
15158 case R_ARM_GOT_PREL:
15159 case R_ARM_TLS_GD32:
15160 case R_ARM_TLS_GD32_FDPIC:
15161 case R_ARM_TLS_IE32:
15162 case R_ARM_TLS_IE32_FDPIC:
15163 case R_ARM_TLS_GOTDESC:
15164 case R_ARM_TLS_DESCSEQ:
15165 case R_ARM_THM_TLS_DESCSEQ:
15166 case R_ARM_TLS_CALL:
15167 case R_ARM_THM_TLS_CALL:
15168 /* This symbol requires a global offset table entry. */
15170 int tls_type, old_tls_type;
15174 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15175 case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15177 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15178 case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15180 case R_ARM_TLS_GOTDESC:
15181 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15182 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15183 tls_type = GOT_TLS_GDESC; break;
15185 default: tls_type = GOT_NORMAL; break;
15188 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15189 info->flags |= DF_STATIC_TLS;
15194 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15198 /* This is a global offset table entry for a local symbol. */
15199 if (!elf32_arm_allocate_local_sym_info (abfd))
15201 elf_local_got_refcounts (abfd)[r_symndx] += 1;
15202 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15205 /* If a variable is accessed with both tls methods, two
15206 slots may be created. */
15207 if (GOT_TLS_GD_ANY_P (old_tls_type)
15208 && GOT_TLS_GD_ANY_P (tls_type))
15209 tls_type |= old_tls_type;
15211 /* We will already have issued an error message if there
15212 is a TLS/non-TLS mismatch, based on the symbol
15213 type. So just combine any TLS types needed. */
15214 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15215 && tls_type != GOT_NORMAL)
15216 tls_type |= old_tls_type;
15218 /* If the symbol is accessed in both IE and GDESC
15219 method, we're able to relax. Turn off the GDESC flag,
15220 without messing up with any other kind of tls types
15221 that may be involved. */
15222 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15223 tls_type &= ~GOT_TLS_GDESC;
15225 if (old_tls_type != tls_type)
15228 elf32_arm_hash_entry (h)->tls_type = tls_type;
15230 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15233 /* Fall through. */
15235 case R_ARM_TLS_LDM32:
15236 case R_ARM_TLS_LDM32_FDPIC:
15237 if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15238 htab->tls_ldm_got.refcount++;
15239 /* Fall through. */
15241 case R_ARM_GOTOFF32:
15243 if (htab->root.sgot == NULL
15244 && !create_got_section (htab->root.dynobj, info))
15253 case R_ARM_THM_CALL:
15254 case R_ARM_THM_JUMP24:
15255 case R_ARM_THM_JUMP19:
15256 call_reloc_p = TRUE;
15257 may_need_local_target_p = TRUE;
15261 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15262 ldr __GOTT_INDEX__ offsets. */
15263 if (!htab->vxworks_p)
15265 may_need_local_target_p = TRUE;
15268 else goto jump_over;
15270 /* Fall through. */
15272 case R_ARM_MOVW_ABS_NC:
15273 case R_ARM_MOVT_ABS:
15274 case R_ARM_THM_MOVW_ABS_NC:
15275 case R_ARM_THM_MOVT_ABS:
15276 if (bfd_link_pic (info))
15279 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15280 abfd, elf32_arm_howto_table_1[r_type].name,
15281 (h) ? h->root.root.string : "a local symbol");
15282 bfd_set_error (bfd_error_bad_value);
15286 /* Fall through. */
15288 case R_ARM_ABS32_NOI:
15290 if (h != NULL && bfd_link_executable (info))
15292 h->pointer_equality_needed = 1;
15294 /* Fall through. */
15296 case R_ARM_REL32_NOI:
15297 case R_ARM_MOVW_PREL_NC:
15298 case R_ARM_MOVT_PREL:
15299 case R_ARM_THM_MOVW_PREL_NC:
15300 case R_ARM_THM_MOVT_PREL:
15302 /* Should the interworking branches be listed here? */
15303 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
15305 && (sec->flags & SEC_ALLOC) != 0)
15308 && elf32_arm_howto_from_type (r_type)->pc_relative)
15310 /* In shared libraries and relocatable executables,
15311 we treat local relative references as calls;
15312 see the related SYMBOL_CALLS_LOCAL code in
15313 allocate_dynrelocs. */
15314 call_reloc_p = TRUE;
15315 may_need_local_target_p = TRUE;
15318 /* We are creating a shared library or relocatable
15319 executable, and this is a reloc against a global symbol,
15320 or a non-PC-relative reloc against a local symbol.
15321 We may need to copy the reloc into the output. */
15322 may_become_dynamic_p = TRUE;
15325 may_need_local_target_p = TRUE;
15328 /* This relocation describes the C++ object vtable hierarchy.
15329 Reconstruct it for later use during GC. */
15330 case R_ARM_GNU_VTINHERIT:
15331 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15335 /* This relocation describes which C++ vtable entries are actually
15336 used. Record for later use during GC. */
15337 case R_ARM_GNU_VTENTRY:
15338 BFD_ASSERT (h != NULL);
15340 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15348 /* We may need a .plt entry if the function this reloc
15349 refers to is in a different object, regardless of the
15350 symbol's type. We can't tell for sure yet, because
15351 something later might force the symbol local. */
15353 else if (may_need_local_target_p)
15354 /* If this reloc is in a read-only section, we might
15355 need a copy reloc. We can't check reliably at this
15356 stage whether the section is read-only, as input
15357 sections have not yet been mapped to output sections.
15358 Tentatively set the flag for now, and correct in
15359 adjust_dynamic_symbol. */
15360 h->non_got_ref = 1;
15363 if (may_need_local_target_p
15364 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15366 union gotplt_union *root_plt;
15367 struct arm_plt_info *arm_plt;
15368 struct arm_local_iplt_info *local_iplt;
15372 root_plt = &h->plt;
15373 arm_plt = &eh->plt;
15377 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15378 if (local_iplt == NULL)
15380 root_plt = &local_iplt->root;
15381 arm_plt = &local_iplt->arm;
15384 /* If the symbol is a function that doesn't bind locally,
15385 this relocation will need a PLT entry. */
15386 if (root_plt->refcount != -1)
15387 root_plt->refcount += 1;
15390 arm_plt->noncall_refcount++;
15392 /* It's too early to use htab->use_blx here, so we have to
15393 record possible blx references separately from
15394 relocs that definitely need a thumb stub. */
15396 if (r_type == R_ARM_THM_CALL)
15397 arm_plt->maybe_thumb_refcount += 1;
15399 if (r_type == R_ARM_THM_JUMP24
15400 || r_type == R_ARM_THM_JUMP19)
15401 arm_plt->thumb_refcount += 1;
15404 if (may_become_dynamic_p)
15406 struct elf_dyn_relocs *p, **head;
15408 /* Create a reloc section in dynobj. */
15409 if (sreloc == NULL)
15411 sreloc = _bfd_elf_make_dynamic_reloc_section
15412 (sec, dynobj, 2, abfd, ! htab->use_rel);
15414 if (sreloc == NULL)
15417 /* BPABI objects never have dynamic relocations mapped. */
15418 if (htab->symbian_p)
15422 flags = bfd_get_section_flags (dynobj, sreloc);
15423 flags &= ~(SEC_LOAD | SEC_ALLOC);
15424 bfd_set_section_flags (dynobj, sreloc, flags);
15428 /* If this is a global symbol, count the number of
15429 relocations we need for this symbol. */
15431 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
15434 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15440 if (p == NULL || p->sec != sec)
15442 bfd_size_type amt = sizeof *p;
15444 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15454 if (elf32_arm_howto_from_type (r_type)->pc_relative)
15457 if (h == NULL && htab->fdpic_p && !bfd_link_pic(info)
15458 && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI) {
15459 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15460 that will become rofixup. */
15461 /* This is due to the fact that we suppose all will become rofixup. */
15462 fprintf(stderr, "FDPIC does not yet support %d relocation to become dynamic for executable\n", r_type);
15464 (_("FDPIC does not yet support %s relocation"
15465 " to become dynamic for executable"),
15466 elf32_arm_howto_table_1[r_type].name);
15476 elf32_arm_update_relocs (asection *o,
15477 struct bfd_elf_section_reloc_data *reldata)
15479 void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15480 void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15481 const struct elf_backend_data *bed;
15482 _arm_elf_section_data *eado;
15483 struct bfd_link_order *p;
15484 bfd_byte *erela_head, *erela;
15485 Elf_Internal_Rela *irela_head, *irela;
15486 Elf_Internal_Shdr *rel_hdr;
15488 unsigned int count;
15490 eado = get_arm_elf_section_data (o);
15492 if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15496 bed = get_elf_backend_data (abfd);
15497 rel_hdr = reldata->hdr;
15499 if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15501 swap_in = bed->s->swap_reloc_in;
15502 swap_out = bed->s->swap_reloc_out;
15504 else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15506 swap_in = bed->s->swap_reloca_in;
15507 swap_out = bed->s->swap_reloca_out;
15512 erela_head = rel_hdr->contents;
15513 irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15514 ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15516 erela = erela_head;
15517 irela = irela_head;
15520 for (p = o->map_head.link_order; p; p = p->next)
15522 if (p->type == bfd_section_reloc_link_order
15523 || p->type == bfd_symbol_reloc_link_order)
15525 (*swap_in) (abfd, erela, irela);
15526 erela += rel_hdr->sh_entsize;
15530 else if (p->type == bfd_indirect_link_order)
15532 struct bfd_elf_section_reloc_data *input_reldata;
15533 arm_unwind_table_edit *edit_list, *edit_tail;
15534 _arm_elf_section_data *eadi;
15539 i = p->u.indirect.section;
15541 eadi = get_arm_elf_section_data (i);
15542 edit_list = eadi->u.exidx.unwind_edit_list;
15543 edit_tail = eadi->u.exidx.unwind_edit_tail;
15544 offset = o->vma + i->output_offset;
15546 if (eadi->elf.rel.hdr &&
15547 eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15548 input_reldata = &eadi->elf.rel;
15549 else if (eadi->elf.rela.hdr &&
15550 eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15551 input_reldata = &eadi->elf.rela;
15557 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15559 arm_unwind_table_edit *edit_node, *edit_next;
15561 bfd_vma reloc_index;
15563 (*swap_in) (abfd, erela, irela);
15564 reloc_index = (irela->r_offset - offset) / 8;
15567 edit_node = edit_list;
15568 for (edit_next = edit_list;
15569 edit_next && edit_next->index <= reloc_index;
15570 edit_next = edit_node->next)
15573 edit_node = edit_next;
15576 if (edit_node->type != DELETE_EXIDX_ENTRY
15577 || edit_node->index != reloc_index)
15579 irela->r_offset -= bias * 8;
15584 erela += rel_hdr->sh_entsize;
15587 if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15589 /* New relocation entity. */
15590 asection *text_sec = edit_tail->linked_section;
15591 asection *text_out = text_sec->output_section;
15592 bfd_vma exidx_offset = offset + i->size - 8;
15594 irela->r_addend = 0;
15595 irela->r_offset = exidx_offset;
15596 irela->r_info = ELF32_R_INFO
15597 (text_out->target_index, R_ARM_PREL31);
15604 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15606 (*swap_in) (abfd, erela, irela);
15607 erela += rel_hdr->sh_entsize;
15611 count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15616 reldata->count = count;
15617 rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15619 erela = erela_head;
15620 irela = irela_head;
15623 (*swap_out) (abfd, irela, erela);
15624 erela += rel_hdr->sh_entsize;
15631 /* Hashes are no longer valid. */
15632 free (reldata->hashes);
15633 reldata->hashes = NULL;
15636 /* Unwinding tables are not referenced directly. This pass marks them as
15637 required if the corresponding code section is marked. Similarly, ARMv8-M
15638 secure entry functions can only be referenced by SG veneers which are
15639 created after the GC process. They need to be marked in case they reside in
15640 their own section (as would be the case if code was compiled with
15641 -ffunction-sections). */
15644 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15645 elf_gc_mark_hook_fn gc_mark_hook)
15648 Elf_Internal_Shdr **elf_shdrp;
15649 asection *cmse_sec;
15650 obj_attribute *out_attr;
15651 Elf_Internal_Shdr *symtab_hdr;
15652 unsigned i, sym_count, ext_start;
15653 const struct elf_backend_data *bed;
15654 struct elf_link_hash_entry **sym_hashes;
15655 struct elf32_arm_link_hash_entry *cmse_hash;
15656 bfd_boolean again, is_v8m, first_bfd_browse = TRUE;
15658 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15660 out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15661 is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15662 && out_attr[Tag_CPU_arch_profile].i == 'M';
15664 /* Marking EH data may cause additional code sections to be marked,
15665 requiring multiple passes. */
15670 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15674 if (! is_arm_elf (sub))
15677 elf_shdrp = elf_elfsections (sub);
15678 for (o = sub->sections; o != NULL; o = o->next)
15680 Elf_Internal_Shdr *hdr;
15682 hdr = &elf_section_data (o)->this_hdr;
15683 if (hdr->sh_type == SHT_ARM_EXIDX
15685 && hdr->sh_link < elf_numsections (sub)
15687 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15690 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15695 /* Mark section holding ARMv8-M secure entry functions. We mark all
15696 of them so no need for a second browsing. */
15697 if (is_v8m && first_bfd_browse)
15699 sym_hashes = elf_sym_hashes (sub);
15700 bed = get_elf_backend_data (sub);
15701 symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15702 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15703 ext_start = symtab_hdr->sh_info;
15705 /* Scan symbols. */
15706 for (i = ext_start; i < sym_count; i++)
15708 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15710 /* Assume it is a special symbol. If not, cmse_scan will
15711 warn about it and user can do something about it. */
15712 if (ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
15714 cmse_sec = cmse_hash->root.root.u.def.section;
15715 if (!cmse_sec->gc_mark
15716 && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
15722 first_bfd_browse = FALSE;
15728 /* Treat mapping symbols as special target symbols. */
15731 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
15733 return bfd_is_arm_special_symbol_name (sym->name,
15734 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
15737 /* This is a copy of elf_find_function() from elf.c except that
15738 ARM mapping symbols are ignored when looking for function names
15739 and STT_ARM_TFUNC is considered to a function type. */
15742 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
15743 asymbol ** symbols,
15744 asection * section,
15746 const char ** filename_ptr,
15747 const char ** functionname_ptr)
15749 const char * filename = NULL;
15750 asymbol * func = NULL;
15751 bfd_vma low_func = 0;
15754 for (p = symbols; *p != NULL; p++)
15756 elf_symbol_type *q;
15758 q = (elf_symbol_type *) *p;
15760 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
15765 filename = bfd_asymbol_name (&q->symbol);
15768 case STT_ARM_TFUNC:
15770 /* Skip mapping symbols. */
15771 if ((q->symbol.flags & BSF_LOCAL)
15772 && bfd_is_arm_special_symbol_name (q->symbol.name,
15773 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
15775 /* Fall through. */
15776 if (bfd_get_section (&q->symbol) == section
15777 && q->symbol.value >= low_func
15778 && q->symbol.value <= offset)
15780 func = (asymbol *) q;
15781 low_func = q->symbol.value;
15791 *filename_ptr = filename;
15792 if (functionname_ptr)
15793 *functionname_ptr = bfd_asymbol_name (func);
15799 /* Find the nearest line to a particular section and offset, for error
15800 reporting. This code is a duplicate of the code in elf.c, except
15801 that it uses arm_elf_find_function. */
15804 elf32_arm_find_nearest_line (bfd * abfd,
15805 asymbol ** symbols,
15806 asection * section,
15808 const char ** filename_ptr,
15809 const char ** functionname_ptr,
15810 unsigned int * line_ptr,
15811 unsigned int * discriminator_ptr)
15813 bfd_boolean found = FALSE;
15815 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
15816 filename_ptr, functionname_ptr,
15817 line_ptr, discriminator_ptr,
15818 dwarf_debug_sections, 0,
15819 & elf_tdata (abfd)->dwarf2_find_line_info))
15821 if (!*functionname_ptr)
15822 arm_elf_find_function (abfd, symbols, section, offset,
15823 *filename_ptr ? NULL : filename_ptr,
15829 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
15832 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
15833 & found, filename_ptr,
15834 functionname_ptr, line_ptr,
15835 & elf_tdata (abfd)->line_info))
15838 if (found && (*functionname_ptr || *line_ptr))
15841 if (symbols == NULL)
15844 if (! arm_elf_find_function (abfd, symbols, section, offset,
15845 filename_ptr, functionname_ptr))
15853 elf32_arm_find_inliner_info (bfd * abfd,
15854 const char ** filename_ptr,
15855 const char ** functionname_ptr,
15856 unsigned int * line_ptr)
15859 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
15860 functionname_ptr, line_ptr,
15861 & elf_tdata (abfd)->dwarf2_find_line_info);
15865 /* Find dynamic relocs for H that apply to read-only sections. */
15868 readonly_dynrelocs (struct elf_link_hash_entry *h)
15870 struct elf_dyn_relocs *p;
15872 for (p = elf32_arm_hash_entry (h)->dyn_relocs; p != NULL; p = p->next)
15874 asection *s = p->sec->output_section;
15876 if (s != NULL && (s->flags & SEC_READONLY) != 0)
15882 /* Adjust a symbol defined by a dynamic object and referenced by a
15883 regular object. The current definition is in some section of the
15884 dynamic object, but we're not including those sections. We have to
15885 change the definition to something the rest of the link can
15889 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
15890 struct elf_link_hash_entry * h)
15893 asection *s, *srel;
15894 struct elf32_arm_link_hash_entry * eh;
15895 struct elf32_arm_link_hash_table *globals;
15897 globals = elf32_arm_hash_table (info);
15898 if (globals == NULL)
15901 dynobj = elf_hash_table (info)->dynobj;
15903 /* Make sure we know what is going on here. */
15904 BFD_ASSERT (dynobj != NULL
15906 || h->type == STT_GNU_IFUNC
15910 && !h->def_regular)));
15912 eh = (struct elf32_arm_link_hash_entry *) h;
15914 /* If this is a function, put it in the procedure linkage table. We
15915 will fill in the contents of the procedure linkage table later,
15916 when we know the address of the .got section. */
15917 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
15919 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
15920 symbol binds locally. */
15921 if (h->plt.refcount <= 0
15922 || (h->type != STT_GNU_IFUNC
15923 && (SYMBOL_CALLS_LOCAL (info, h)
15924 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
15925 && h->root.type == bfd_link_hash_undefweak))))
15927 /* This case can occur if we saw a PLT32 reloc in an input
15928 file, but the symbol was never referred to by a dynamic
15929 object, or if all references were garbage collected. In
15930 such a case, we don't actually need to build a procedure
15931 linkage table, and we can just do a PC24 reloc instead. */
15932 h->plt.offset = (bfd_vma) -1;
15933 eh->plt.thumb_refcount = 0;
15934 eh->plt.maybe_thumb_refcount = 0;
15935 eh->plt.noncall_refcount = 0;
15943 /* It's possible that we incorrectly decided a .plt reloc was
15944 needed for an R_ARM_PC24 or similar reloc to a non-function sym
15945 in check_relocs. We can't decide accurately between function
15946 and non-function syms in check-relocs; Objects loaded later in
15947 the link may change h->type. So fix it now. */
15948 h->plt.offset = (bfd_vma) -1;
15949 eh->plt.thumb_refcount = 0;
15950 eh->plt.maybe_thumb_refcount = 0;
15951 eh->plt.noncall_refcount = 0;
15954 /* If this is a weak symbol, and there is a real definition, the
15955 processor independent code will have arranged for us to see the
15956 real definition first, and we can just use the same value. */
15957 if (h->is_weakalias)
15959 struct elf_link_hash_entry *def = weakdef (h);
15960 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
15961 h->root.u.def.section = def->root.u.def.section;
15962 h->root.u.def.value = def->root.u.def.value;
15966 /* If there are no non-GOT references, we do not need a copy
15968 if (!h->non_got_ref)
15971 /* This is a reference to a symbol defined by a dynamic object which
15972 is not a function. */
15974 /* If we are creating a shared library, we must presume that the
15975 only references to the symbol are via the global offset table.
15976 For such cases we need not do anything here; the relocations will
15977 be handled correctly by relocate_section. Relocatable executables
15978 can reference data in shared objects directly, so we don't need to
15979 do anything here. */
15980 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
15983 /* We must allocate the symbol in our .dynbss section, which will
15984 become part of the .bss section of the executable. There will be
15985 an entry for this symbol in the .dynsym section. The dynamic
15986 object will contain position independent code, so all references
15987 from the dynamic object to this symbol will go through the global
15988 offset table. The dynamic linker will use the .dynsym entry to
15989 determine the address it must put in the global offset table, so
15990 both the dynamic object and the regular object will refer to the
15991 same memory location for the variable. */
15992 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
15993 linker to copy the initial value out of the dynamic object and into
15994 the runtime process image. We need to remember the offset into the
15995 .rel(a).bss section we are going to use. */
15996 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
15998 s = globals->root.sdynrelro;
15999 srel = globals->root.sreldynrelro;
16003 s = globals->root.sdynbss;
16004 srel = globals->root.srelbss;
16006 if (info->nocopyreloc == 0
16007 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16010 elf32_arm_allocate_dynrelocs (info, srel, 1);
16014 return _bfd_elf_adjust_dynamic_copy (info, h, s);
16017 /* Allocate space in .plt, .got and associated reloc sections for
16021 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16023 struct bfd_link_info *info;
16024 struct elf32_arm_link_hash_table *htab;
16025 struct elf32_arm_link_hash_entry *eh;
16026 struct elf_dyn_relocs *p;
16028 if (h->root.type == bfd_link_hash_indirect)
16031 eh = (struct elf32_arm_link_hash_entry *) h;
16033 info = (struct bfd_link_info *) inf;
16034 htab = elf32_arm_hash_table (info);
16038 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16039 && h->plt.refcount > 0)
16041 /* Make sure this symbol is output as a dynamic symbol.
16042 Undefined weak syms won't yet be marked as dynamic. */
16043 if (h->dynindx == -1 && !h->forced_local
16044 && h->root.type == bfd_link_hash_undefweak)
16046 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16050 /* If the call in the PLT entry binds locally, the associated
16051 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16052 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16053 than the .plt section. */
16054 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16057 if (eh->plt.noncall_refcount == 0
16058 && SYMBOL_REFERENCES_LOCAL (info, h))
16059 /* All non-call references can be resolved directly.
16060 This means that they can (and in some cases, must)
16061 resolve directly to the run-time target, rather than
16062 to the PLT. That in turns means that any .got entry
16063 would be equal to the .igot.plt entry, so there's
16064 no point having both. */
16065 h->got.refcount = 0;
16068 if (bfd_link_pic (info)
16070 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16072 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16074 /* If this symbol is not defined in a regular file, and we are
16075 not generating a shared library, then set the symbol to this
16076 location in the .plt. This is required to make function
16077 pointers compare as equal between the normal executable and
16078 the shared library. */
16079 if (! bfd_link_pic (info)
16080 && !h->def_regular)
16082 h->root.u.def.section = htab->root.splt;
16083 h->root.u.def.value = h->plt.offset;
16085 /* Make sure the function is not marked as Thumb, in case
16086 it is the target of an ABS32 relocation, which will
16087 point to the PLT entry. */
16088 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16091 /* VxWorks executables have a second set of relocations for
16092 each PLT entry. They go in a separate relocation section,
16093 which is processed by the kernel loader. */
16094 if (htab->vxworks_p && !bfd_link_pic (info))
16096 /* There is a relocation for the initial PLT entry:
16097 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16098 if (h->plt.offset == htab->plt_header_size)
16099 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16101 /* There are two extra relocations for each subsequent
16102 PLT entry: an R_ARM_32 relocation for the GOT entry,
16103 and an R_ARM_32 relocation for the PLT entry. */
16104 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16109 h->plt.offset = (bfd_vma) -1;
16115 h->plt.offset = (bfd_vma) -1;
16119 eh = (struct elf32_arm_link_hash_entry *) h;
16120 eh->tlsdesc_got = (bfd_vma) -1;
16122 if (h->got.refcount > 0)
16126 int tls_type = elf32_arm_hash_entry (h)->tls_type;
16129 /* Make sure this symbol is output as a dynamic symbol.
16130 Undefined weak syms won't yet be marked as dynamic. */
16131 if (htab->root.dynamic_sections_created && h->dynindx == -1 && !h->forced_local
16132 && h->root.type == bfd_link_hash_undefweak)
16134 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16138 if (!htab->symbian_p)
16140 s = htab->root.sgot;
16141 h->got.offset = s->size;
16143 if (tls_type == GOT_UNKNOWN)
16146 if (tls_type == GOT_NORMAL)
16147 /* Non-TLS symbols need one GOT slot. */
16151 if (tls_type & GOT_TLS_GDESC)
16153 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16155 = (htab->root.sgotplt->size
16156 - elf32_arm_compute_jump_table_size (htab));
16157 htab->root.sgotplt->size += 8;
16158 h->got.offset = (bfd_vma) -2;
16159 /* plt.got_offset needs to know there's a TLS_DESC
16160 reloc in the middle of .got.plt. */
16161 htab->num_tls_desc++;
16164 if (tls_type & GOT_TLS_GD)
16166 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16167 consecutive GOT slots. If the symbol is both GD
16168 and GDESC, got.offset may have been
16170 h->got.offset = s->size;
16174 if (tls_type & GOT_TLS_IE)
16175 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16180 dyn = htab->root.dynamic_sections_created;
16183 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
16184 bfd_link_pic (info),
16186 && (!bfd_link_pic (info)
16187 || !SYMBOL_REFERENCES_LOCAL (info, h)))
16190 if (tls_type != GOT_NORMAL
16191 && (bfd_link_pic (info) || indx != 0)
16192 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16193 || h->root.type != bfd_link_hash_undefweak))
16195 if (tls_type & GOT_TLS_IE)
16196 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16198 if (tls_type & GOT_TLS_GD)
16199 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16201 if (tls_type & GOT_TLS_GDESC)
16203 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16204 /* GDESC needs a trampoline to jump to. */
16205 htab->tls_trampoline = -1;
16208 /* Only GD needs it. GDESC just emits one relocation per
16210 if ((tls_type & GOT_TLS_GD) && indx != 0)
16211 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16213 else if (((indx != -1) || htab->fdpic_p)
16214 && !SYMBOL_REFERENCES_LOCAL (info, h))
16216 if (htab->root.dynamic_sections_created)
16217 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16218 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16220 else if (h->type == STT_GNU_IFUNC
16221 && eh->plt.noncall_refcount == 0)
16222 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16223 they all resolve dynamically instead. Reserve room for the
16224 GOT entry's R_ARM_IRELATIVE relocation. */
16225 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16226 else if (bfd_link_pic (info)
16227 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16228 || h->root.type != bfd_link_hash_undefweak))
16229 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16230 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16231 else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16232 /* Reserve room for rofixup for FDPIC executable. */
16233 /* TLS relocs do not need space since they are completely
16235 htab->srofixup->size += 4;
16239 h->got.offset = (bfd_vma) -1;
16241 /* FDPIC support. */
16242 if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16244 /* Symbol musn't be exported. */
16245 if (h->dynindx != -1)
16248 /* We only allocate one function descriptor with its associated relocation. */
16249 if (eh->fdpic_cnts.funcdesc_offset == -1)
16251 asection *s = htab->root.sgot;
16253 eh->fdpic_cnts.funcdesc_offset = s->size;
16255 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16256 if (bfd_link_pic(info))
16257 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16259 htab->srofixup->size += 8;
16263 if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16265 asection *s = htab->root.sgot;
16267 if (htab->root.dynamic_sections_created && h->dynindx == -1
16268 && !h->forced_local)
16269 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16272 if (h->dynindx == -1)
16274 /* We only allocate one function descriptor with its associated relocation. q */
16275 if (eh->fdpic_cnts.funcdesc_offset == -1)
16278 eh->fdpic_cnts.funcdesc_offset = s->size;
16280 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16281 if (bfd_link_pic(info))
16282 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16284 htab->srofixup->size += 8;
16288 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16289 R_ARM_RELATIVE/rofixup relocation on it. */
16290 eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16292 if (h->dynindx == -1 && !bfd_link_pic(info))
16293 htab->srofixup->size += 4;
16295 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16298 if (eh->fdpic_cnts.funcdesc_cnt > 0)
16300 if (htab->root.dynamic_sections_created && h->dynindx == -1
16301 && !h->forced_local)
16302 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16305 if (h->dynindx == -1)
16307 /* We only allocate one function descriptor with its associated relocation. */
16308 if (eh->fdpic_cnts.funcdesc_offset == -1)
16310 asection *s = htab->root.sgot;
16312 eh->fdpic_cnts.funcdesc_offset = s->size;
16314 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16315 if (bfd_link_pic(info))
16316 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16318 htab->srofixup->size += 8;
16321 if (h->dynindx == -1 && !bfd_link_pic(info))
16323 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16324 htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16328 /* Will need one dynamic reloc per reference. will be either
16329 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16330 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16331 eh->fdpic_cnts.funcdesc_cnt);
16335 /* Allocate stubs for exported Thumb functions on v4t. */
16336 if (!htab->use_blx && h->dynindx != -1
16338 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16339 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16341 struct elf_link_hash_entry * th;
16342 struct bfd_link_hash_entry * bh;
16343 struct elf_link_hash_entry * myh;
16347 /* Create a new symbol to regist the real location of the function. */
16348 s = h->root.u.def.section;
16349 sprintf (name, "__real_%s", h->root.root.string);
16350 _bfd_generic_link_add_one_symbol (info, s->owner,
16351 name, BSF_GLOBAL, s,
16352 h->root.u.def.value,
16353 NULL, TRUE, FALSE, &bh);
16355 myh = (struct elf_link_hash_entry *) bh;
16356 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16357 myh->forced_local = 1;
16358 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16359 eh->export_glue = myh;
16360 th = record_arm_to_thumb_glue (info, h);
16361 /* Point the symbol at the stub. */
16362 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16363 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16364 h->root.u.def.section = th->root.u.def.section;
16365 h->root.u.def.value = th->root.u.def.value & ~1;
16368 if (eh->dyn_relocs == NULL)
16371 /* In the shared -Bsymbolic case, discard space allocated for
16372 dynamic pc-relative relocs against symbols which turn out to be
16373 defined in regular objects. For the normal shared case, discard
16374 space for pc-relative relocs that have become local due to symbol
16375 visibility changes. */
16377 if (bfd_link_pic (info) || htab->root.is_relocatable_executable || htab->fdpic_p)
16379 /* Relocs that use pc_count are PC-relative forms, which will appear
16380 on something like ".long foo - ." or "movw REG, foo - .". We want
16381 calls to protected symbols to resolve directly to the function
16382 rather than going via the plt. If people want function pointer
16383 comparisons to work as expected then they should avoid writing
16384 assembly like ".long foo - .". */
16385 if (SYMBOL_CALLS_LOCAL (info, h))
16387 struct elf_dyn_relocs **pp;
16389 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16391 p->count -= p->pc_count;
16400 if (htab->vxworks_p)
16402 struct elf_dyn_relocs **pp;
16404 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16406 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16413 /* Also discard relocs on undefined weak syms with non-default
16415 if (eh->dyn_relocs != NULL
16416 && h->root.type == bfd_link_hash_undefweak)
16418 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16419 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16420 eh->dyn_relocs = NULL;
16422 /* Make sure undefined weak symbols are output as a dynamic
16424 else if (htab->root.dynamic_sections_created && h->dynindx == -1
16425 && !h->forced_local)
16427 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16432 else if (htab->root.is_relocatable_executable && h->dynindx == -1
16433 && h->root.type == bfd_link_hash_new)
16435 /* Output absolute symbols so that we can create relocations
16436 against them. For normal symbols we output a relocation
16437 against the section that contains them. */
16438 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16445 /* For the non-shared case, discard space for relocs against
16446 symbols which turn out to need copy relocs or are not
16449 if (!h->non_got_ref
16450 && ((h->def_dynamic
16451 && !h->def_regular)
16452 || (htab->root.dynamic_sections_created
16453 && (h->root.type == bfd_link_hash_undefweak
16454 || h->root.type == bfd_link_hash_undefined))))
16456 /* Make sure this symbol is output as a dynamic symbol.
16457 Undefined weak syms won't yet be marked as dynamic. */
16458 if (h->dynindx == -1 && !h->forced_local
16459 && h->root.type == bfd_link_hash_undefweak)
16461 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16465 /* If that succeeded, we know we'll be keeping all the
16467 if (h->dynindx != -1)
16471 eh->dyn_relocs = NULL;
16476 /* Finally, allocate space. */
16477 for (p = eh->dyn_relocs; p != NULL; p = p->next)
16479 asection *sreloc = elf_section_data (p->sec)->sreloc;
16481 if (h->type == STT_GNU_IFUNC
16482 && eh->plt.noncall_refcount == 0
16483 && SYMBOL_REFERENCES_LOCAL (info, h))
16484 elf32_arm_allocate_irelocs (info, sreloc, p->count);
16485 else if (h->dynindx != -1 && (!bfd_link_pic(info) || !info->symbolic || !h->def_regular))
16486 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16487 else if (htab->fdpic_p && !bfd_link_pic(info))
16488 htab->srofixup->size += 4 * p->count;
16490 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16496 /* Set DF_TEXTREL if we find any dynamic relocs that apply to
16497 read-only sections. */
16500 maybe_set_textrel (struct elf_link_hash_entry *h, void *info_p)
16504 if (h->root.type == bfd_link_hash_indirect)
16507 sec = readonly_dynrelocs (h);
16510 struct bfd_link_info *info = (struct bfd_link_info *) info_p;
16512 info->flags |= DF_TEXTREL;
16513 info->callbacks->minfo
16514 (_("%pB: dynamic relocation against `%pT' in read-only section `%pA'\n"),
16515 sec->owner, h->root.root.string, sec);
16517 /* Not an error, just cut short the traversal. */
16525 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16528 struct elf32_arm_link_hash_table *globals;
16530 globals = elf32_arm_hash_table (info);
16531 if (globals == NULL)
16534 globals->byteswap_code = byteswap_code;
16537 /* Set the sizes of the dynamic sections. */
16540 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16541 struct bfd_link_info * info)
16546 bfd_boolean relocs;
16548 struct elf32_arm_link_hash_table *htab;
16550 htab = elf32_arm_hash_table (info);
16554 dynobj = elf_hash_table (info)->dynobj;
16555 BFD_ASSERT (dynobj != NULL);
16556 check_use_blx (htab);
16558 if (elf_hash_table (info)->dynamic_sections_created)
16560 /* Set the contents of the .interp section to the interpreter. */
16561 if (bfd_link_executable (info) && !info->nointerp)
16563 s = bfd_get_linker_section (dynobj, ".interp");
16564 BFD_ASSERT (s != NULL);
16565 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16566 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16570 /* Set up .got offsets for local syms, and space for local dynamic
16572 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16574 bfd_signed_vma *local_got;
16575 bfd_signed_vma *end_local_got;
16576 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16577 char *local_tls_type;
16578 bfd_vma *local_tlsdesc_gotent;
16579 bfd_size_type locsymcount;
16580 Elf_Internal_Shdr *symtab_hdr;
16582 bfd_boolean is_vxworks = htab->vxworks_p;
16583 unsigned int symndx;
16584 struct fdpic_local *local_fdpic_cnts;
16586 if (! is_arm_elf (ibfd))
16589 for (s = ibfd->sections; s != NULL; s = s->next)
16591 struct elf_dyn_relocs *p;
16593 for (p = (struct elf_dyn_relocs *)
16594 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16596 if (!bfd_is_abs_section (p->sec)
16597 && bfd_is_abs_section (p->sec->output_section))
16599 /* Input section has been discarded, either because
16600 it is a copy of a linkonce section or due to
16601 linker script /DISCARD/, so we'll be discarding
16604 else if (is_vxworks
16605 && strcmp (p->sec->output_section->name,
16608 /* Relocations in vxworks .tls_vars sections are
16609 handled specially by the loader. */
16611 else if (p->count != 0)
16613 srel = elf_section_data (p->sec)->sreloc;
16614 if (htab->fdpic_p && !bfd_link_pic(info))
16615 htab->srofixup->size += 4 * p->count;
16617 elf32_arm_allocate_dynrelocs (info, srel, p->count);
16618 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16619 info->flags |= DF_TEXTREL;
16624 local_got = elf_local_got_refcounts (ibfd);
16628 symtab_hdr = & elf_symtab_hdr (ibfd);
16629 locsymcount = symtab_hdr->sh_info;
16630 end_local_got = local_got + locsymcount;
16631 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16632 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16633 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16634 local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16636 s = htab->root.sgot;
16637 srel = htab->root.srelgot;
16638 for (; local_got < end_local_got;
16639 ++local_got, ++local_iplt_ptr, ++local_tls_type,
16640 ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16642 *local_tlsdesc_gotent = (bfd_vma) -1;
16643 local_iplt = *local_iplt_ptr;
16645 /* FDPIC support. */
16646 if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16648 if (local_fdpic_cnts->funcdesc_offset == -1)
16650 local_fdpic_cnts->funcdesc_offset = s->size;
16653 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16654 if (bfd_link_pic(info))
16655 elf32_arm_allocate_dynrelocs (info, srel, 1);
16657 htab->srofixup->size += 8;
16661 if (local_fdpic_cnts->funcdesc_cnt > 0)
16663 if (local_fdpic_cnts->funcdesc_offset == -1)
16665 local_fdpic_cnts->funcdesc_offset = s->size;
16668 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16669 if (bfd_link_pic(info))
16670 elf32_arm_allocate_dynrelocs (info, srel, 1);
16672 htab->srofixup->size += 8;
16675 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16676 if (bfd_link_pic(info))
16677 elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16679 htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16682 if (local_iplt != NULL)
16684 struct elf_dyn_relocs *p;
16686 if (local_iplt->root.refcount > 0)
16688 elf32_arm_allocate_plt_entry (info, TRUE,
16691 if (local_iplt->arm.noncall_refcount == 0)
16692 /* All references to the PLT are calls, so all
16693 non-call references can resolve directly to the
16694 run-time target. This means that the .got entry
16695 would be the same as the .igot.plt entry, so there's
16696 no point creating both. */
16701 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16702 local_iplt->root.offset = (bfd_vma) -1;
16705 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16709 psrel = elf_section_data (p->sec)->sreloc;
16710 if (local_iplt->arm.noncall_refcount == 0)
16711 elf32_arm_allocate_irelocs (info, psrel, p->count);
16713 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16716 if (*local_got > 0)
16718 Elf_Internal_Sym *isym;
16720 *local_got = s->size;
16721 if (*local_tls_type & GOT_TLS_GD)
16722 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16724 if (*local_tls_type & GOT_TLS_GDESC)
16726 *local_tlsdesc_gotent = htab->root.sgotplt->size
16727 - elf32_arm_compute_jump_table_size (htab);
16728 htab->root.sgotplt->size += 8;
16729 *local_got = (bfd_vma) -2;
16730 /* plt.got_offset needs to know there's a TLS_DESC
16731 reloc in the middle of .got.plt. */
16732 htab->num_tls_desc++;
16734 if (*local_tls_type & GOT_TLS_IE)
16737 if (*local_tls_type & GOT_NORMAL)
16739 /* If the symbol is both GD and GDESC, *local_got
16740 may have been overwritten. */
16741 *local_got = s->size;
16745 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
16749 /* If all references to an STT_GNU_IFUNC PLT are calls,
16750 then all non-call references, including this GOT entry,
16751 resolve directly to the run-time target. */
16752 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
16753 && (local_iplt == NULL
16754 || local_iplt->arm.noncall_refcount == 0))
16755 elf32_arm_allocate_irelocs (info, srel, 1);
16756 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
16758 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
16759 elf32_arm_allocate_dynrelocs (info, srel, 1);
16760 else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
16761 htab->srofixup->size += 4;
16763 if ((bfd_link_pic (info) || htab->fdpic_p)
16764 && *local_tls_type & GOT_TLS_GDESC)
16766 elf32_arm_allocate_dynrelocs (info,
16767 htab->root.srelplt, 1);
16768 htab->tls_trampoline = -1;
16773 *local_got = (bfd_vma) -1;
16777 if (htab->tls_ldm_got.refcount > 0)
16779 /* Allocate two GOT entries and one dynamic relocation (if necessary)
16780 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
16781 htab->tls_ldm_got.offset = htab->root.sgot->size;
16782 htab->root.sgot->size += 8;
16783 if (bfd_link_pic (info))
16784 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16787 htab->tls_ldm_got.offset = -1;
16789 /* At the very end of the .rofixup section is a pointer to the GOT,
16790 reserve space for it. */
16791 if (htab->fdpic_p && htab->srofixup != NULL)
16792 htab->srofixup->size += 4;
16794 /* Allocate global sym .plt and .got entries, and space for global
16795 sym dynamic relocs. */
16796 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
16798 /* Here we rummage through the found bfds to collect glue information. */
16799 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16801 if (! is_arm_elf (ibfd))
16804 /* Initialise mapping tables for code/data. */
16805 bfd_elf32_arm_init_maps (ibfd);
16807 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
16808 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
16809 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
16810 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
16813 /* Allocate space for the glue sections now that we've sized them. */
16814 bfd_elf32_arm_allocate_interworking_sections (info);
16816 /* For every jump slot reserved in the sgotplt, reloc_count is
16817 incremented. However, when we reserve space for TLS descriptors,
16818 it's not incremented, so in order to compute the space reserved
16819 for them, it suffices to multiply the reloc count by the jump
16821 if (htab->root.srelplt)
16822 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
16824 if (htab->tls_trampoline)
16826 if (htab->root.splt->size == 0)
16827 htab->root.splt->size += htab->plt_header_size;
16829 htab->tls_trampoline = htab->root.splt->size;
16830 htab->root.splt->size += htab->plt_entry_size;
16832 /* If we're not using lazy TLS relocations, don't generate the
16833 PLT and GOT entries they require. */
16834 if (!(info->flags & DF_BIND_NOW))
16836 htab->dt_tlsdesc_got = htab->root.sgot->size;
16837 htab->root.sgot->size += 4;
16839 htab->dt_tlsdesc_plt = htab->root.splt->size;
16840 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
16844 /* The check_relocs and adjust_dynamic_symbol entry points have
16845 determined the sizes of the various dynamic sections. Allocate
16846 memory for them. */
16849 for (s = dynobj->sections; s != NULL; s = s->next)
16853 if ((s->flags & SEC_LINKER_CREATED) == 0)
16856 /* It's OK to base decisions on the section name, because none
16857 of the dynobj section names depend upon the input files. */
16858 name = bfd_get_section_name (dynobj, s);
16860 if (s == htab->root.splt)
16862 /* Remember whether there is a PLT. */
16863 plt = s->size != 0;
16865 else if (CONST_STRNEQ (name, ".rel"))
16869 /* Remember whether there are any reloc sections other
16870 than .rel(a).plt and .rela.plt.unloaded. */
16871 if (s != htab->root.srelplt && s != htab->srelplt2)
16874 /* We use the reloc_count field as a counter if we need
16875 to copy relocs into the output file. */
16876 s->reloc_count = 0;
16879 else if (s != htab->root.sgot
16880 && s != htab->root.sgotplt
16881 && s != htab->root.iplt
16882 && s != htab->root.igotplt
16883 && s != htab->root.sdynbss
16884 && s != htab->root.sdynrelro
16885 && s != htab->srofixup)
16887 /* It's not one of our sections, so don't allocate space. */
16893 /* If we don't need this section, strip it from the
16894 output file. This is mostly to handle .rel(a).bss and
16895 .rel(a).plt. We must create both sections in
16896 create_dynamic_sections, because they must be created
16897 before the linker maps input sections to output
16898 sections. The linker does that before
16899 adjust_dynamic_symbol is called, and it is that
16900 function which decides whether anything needs to go
16901 into these sections. */
16902 s->flags |= SEC_EXCLUDE;
16906 if ((s->flags & SEC_HAS_CONTENTS) == 0)
16909 /* Allocate memory for the section contents. */
16910 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
16911 if (s->contents == NULL)
16915 if (elf_hash_table (info)->dynamic_sections_created)
16917 /* Add some entries to the .dynamic section. We fill in the
16918 values later, in elf32_arm_finish_dynamic_sections, but we
16919 must add the entries now so that we get the correct size for
16920 the .dynamic section. The DT_DEBUG entry is filled in by the
16921 dynamic linker and used by the debugger. */
16922 #define add_dynamic_entry(TAG, VAL) \
16923 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
16925 if (bfd_link_executable (info))
16927 if (!add_dynamic_entry (DT_DEBUG, 0))
16933 if ( !add_dynamic_entry (DT_PLTGOT, 0)
16934 || !add_dynamic_entry (DT_PLTRELSZ, 0)
16935 || !add_dynamic_entry (DT_PLTREL,
16936 htab->use_rel ? DT_REL : DT_RELA)
16937 || !add_dynamic_entry (DT_JMPREL, 0))
16940 if (htab->dt_tlsdesc_plt
16941 && (!add_dynamic_entry (DT_TLSDESC_PLT,0)
16942 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
16950 if (!add_dynamic_entry (DT_REL, 0)
16951 || !add_dynamic_entry (DT_RELSZ, 0)
16952 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
16957 if (!add_dynamic_entry (DT_RELA, 0)
16958 || !add_dynamic_entry (DT_RELASZ, 0)
16959 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
16964 /* If any dynamic relocs apply to a read-only section,
16965 then we need a DT_TEXTREL entry. */
16966 if ((info->flags & DF_TEXTREL) == 0)
16967 elf_link_hash_traverse (&htab->root, maybe_set_textrel, info);
16969 if ((info->flags & DF_TEXTREL) != 0)
16971 if (!add_dynamic_entry (DT_TEXTREL, 0))
16974 if (htab->vxworks_p
16975 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
16978 #undef add_dynamic_entry
16983 /* Size sections even though they're not dynamic. We use it to setup
16984 _TLS_MODULE_BASE_, if needed. */
16987 elf32_arm_always_size_sections (bfd *output_bfd,
16988 struct bfd_link_info *info)
16991 struct elf32_arm_link_hash_table *htab;
16993 htab = elf32_arm_hash_table (info);
16995 if (bfd_link_relocatable (info))
16998 tls_sec = elf_hash_table (info)->tls_sec;
17002 struct elf_link_hash_entry *tlsbase;
17004 tlsbase = elf_link_hash_lookup
17005 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
17009 struct bfd_link_hash_entry *bh = NULL;
17010 const struct elf_backend_data *bed
17011 = get_elf_backend_data (output_bfd);
17013 if (!(_bfd_generic_link_add_one_symbol
17014 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
17015 tls_sec, 0, NULL, FALSE,
17016 bed->collect, &bh)))
17019 tlsbase->type = STT_TLS;
17020 tlsbase = (struct elf_link_hash_entry *)bh;
17021 tlsbase->def_regular = 1;
17022 tlsbase->other = STV_HIDDEN;
17023 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
17027 if (htab->fdpic_p && !bfd_link_relocatable (info)
17028 && !bfd_elf_stack_segment_size (output_bfd, info,
17029 "__stacksize", DEFAULT_STACK_SIZE))
17035 /* Finish up dynamic symbol handling. We set the contents of various
17036 dynamic sections here. */
17039 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17040 struct bfd_link_info * info,
17041 struct elf_link_hash_entry * h,
17042 Elf_Internal_Sym * sym)
17044 struct elf32_arm_link_hash_table *htab;
17045 struct elf32_arm_link_hash_entry *eh;
17047 htab = elf32_arm_hash_table (info);
17051 eh = (struct elf32_arm_link_hash_entry *) h;
17053 if (h->plt.offset != (bfd_vma) -1)
17057 BFD_ASSERT (h->dynindx != -1);
17058 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17063 if (!h->def_regular)
17065 /* Mark the symbol as undefined, rather than as defined in
17066 the .plt section. */
17067 sym->st_shndx = SHN_UNDEF;
17068 /* If the symbol is weak we need to clear the value.
17069 Otherwise, the PLT entry would provide a definition for
17070 the symbol even if the symbol wasn't defined anywhere,
17071 and so the symbol would never be NULL. Leave the value if
17072 there were any relocations where pointer equality matters
17073 (this is a clue for the dynamic linker, to make function
17074 pointer comparisons work between an application and shared
17076 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17079 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17081 /* At least one non-call relocation references this .iplt entry,
17082 so the .iplt entry is the function's canonical address. */
17083 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17084 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17085 sym->st_shndx = (_bfd_elf_section_from_bfd_section
17086 (output_bfd, htab->root.iplt->output_section));
17087 sym->st_value = (h->plt.offset
17088 + htab->root.iplt->output_section->vma
17089 + htab->root.iplt->output_offset);
17096 Elf_Internal_Rela rel;
17098 /* This symbol needs a copy reloc. Set it up. */
17099 BFD_ASSERT (h->dynindx != -1
17100 && (h->root.type == bfd_link_hash_defined
17101 || h->root.type == bfd_link_hash_defweak));
17104 rel.r_offset = (h->root.u.def.value
17105 + h->root.u.def.section->output_section->vma
17106 + h->root.u.def.section->output_offset);
17107 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17108 if (h->root.u.def.section == htab->root.sdynrelro)
17109 s = htab->root.sreldynrelro;
17111 s = htab->root.srelbss;
17112 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17115 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17116 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17117 it is relative to the ".got" section. */
17118 if (h == htab->root.hdynamic
17119 || (!htab->fdpic_p && !htab->vxworks_p && h == htab->root.hgot))
17120 sym->st_shndx = SHN_ABS;
17126 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17128 const unsigned long *template, unsigned count)
17132 for (ix = 0; ix != count; ix++)
17134 unsigned long insn = template[ix];
17136 /* Emit mov pc,rx if bx is not permitted. */
17137 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17138 insn = (insn & 0xf000000f) | 0x01a0f000;
17139 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17143 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17144 other variants, NaCl needs this entry in a static executable's
17145 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17146 zero. For .iplt really only the last bundle is useful, and .iplt
17147 could have a shorter first entry, with each individual PLT entry's
17148 relative branch calculated differently so it targets the last
17149 bundle instead of the instruction before it (labelled .Lplt_tail
17150 above). But it's simpler to keep the size and layout of PLT0
17151 consistent with the dynamic case, at the cost of some dead code at
17152 the start of .iplt and the one dead store to the stack at the start
17155 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17156 asection *plt, bfd_vma got_displacement)
17160 put_arm_insn (htab, output_bfd,
17161 elf32_arm_nacl_plt0_entry[0]
17162 | arm_movw_immediate (got_displacement),
17163 plt->contents + 0);
17164 put_arm_insn (htab, output_bfd,
17165 elf32_arm_nacl_plt0_entry[1]
17166 | arm_movt_immediate (got_displacement),
17167 plt->contents + 4);
17169 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17170 put_arm_insn (htab, output_bfd,
17171 elf32_arm_nacl_plt0_entry[i],
17172 plt->contents + (i * 4));
17175 /* Finish up the dynamic sections. */
17178 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17183 struct elf32_arm_link_hash_table *htab;
17185 htab = elf32_arm_hash_table (info);
17189 dynobj = elf_hash_table (info)->dynobj;
17191 sgot = htab->root.sgotplt;
17192 /* A broken linker script might have discarded the dynamic sections.
17193 Catch this here so that we do not seg-fault later on. */
17194 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17196 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17198 if (elf_hash_table (info)->dynamic_sections_created)
17201 Elf32_External_Dyn *dyncon, *dynconend;
17203 splt = htab->root.splt;
17204 BFD_ASSERT (splt != NULL && sdyn != NULL);
17205 BFD_ASSERT (htab->symbian_p || sgot != NULL);
17207 dyncon = (Elf32_External_Dyn *) sdyn->contents;
17208 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17210 for (; dyncon < dynconend; dyncon++)
17212 Elf_Internal_Dyn dyn;
17216 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17223 if (htab->vxworks_p
17224 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17225 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17230 goto get_vma_if_bpabi;
17233 goto get_vma_if_bpabi;
17236 goto get_vma_if_bpabi;
17238 name = ".gnu.version";
17239 goto get_vma_if_bpabi;
17241 name = ".gnu.version_d";
17242 goto get_vma_if_bpabi;
17244 name = ".gnu.version_r";
17245 goto get_vma_if_bpabi;
17248 name = htab->symbian_p ? ".got" : ".got.plt";
17251 name = RELOC_SECTION (htab, ".plt");
17253 s = bfd_get_linker_section (dynobj, name);
17257 (_("could not find section %s"), name);
17258 bfd_set_error (bfd_error_invalid_operation);
17261 if (!htab->symbian_p)
17262 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17264 /* In the BPABI, tags in the PT_DYNAMIC section point
17265 at the file offset, not the memory address, for the
17266 convenience of the post linker. */
17267 dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
17268 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17272 if (htab->symbian_p)
17277 s = htab->root.srelplt;
17278 BFD_ASSERT (s != NULL);
17279 dyn.d_un.d_val = s->size;
17280 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17287 /* In the BPABI, the DT_REL tag must point at the file
17288 offset, not the VMA, of the first relocation
17289 section. So, we use code similar to that in
17290 elflink.c, but do not check for SHF_ALLOC on the
17291 relocation section, since relocation sections are
17292 never allocated under the BPABI. PLT relocs are also
17294 if (htab->symbian_p)
17297 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
17298 ? SHT_REL : SHT_RELA);
17299 dyn.d_un.d_val = 0;
17300 for (i = 1; i < elf_numsections (output_bfd); i++)
17302 Elf_Internal_Shdr *hdr
17303 = elf_elfsections (output_bfd)[i];
17304 if (hdr->sh_type == type)
17306 if (dyn.d_tag == DT_RELSZ
17307 || dyn.d_tag == DT_RELASZ)
17308 dyn.d_un.d_val += hdr->sh_size;
17309 else if ((ufile_ptr) hdr->sh_offset
17310 <= dyn.d_un.d_val - 1)
17311 dyn.d_un.d_val = hdr->sh_offset;
17314 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17318 case DT_TLSDESC_PLT:
17319 s = htab->root.splt;
17320 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17321 + htab->dt_tlsdesc_plt);
17322 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17325 case DT_TLSDESC_GOT:
17326 s = htab->root.sgot;
17327 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17328 + htab->dt_tlsdesc_got);
17329 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17332 /* Set the bottom bit of DT_INIT/FINI if the
17333 corresponding function is Thumb. */
17335 name = info->init_function;
17338 name = info->fini_function;
17340 /* If it wasn't set by elf_bfd_final_link
17341 then there is nothing to adjust. */
17342 if (dyn.d_un.d_val != 0)
17344 struct elf_link_hash_entry * eh;
17346 eh = elf_link_hash_lookup (elf_hash_table (info), name,
17347 FALSE, FALSE, TRUE);
17349 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17350 == ST_BRANCH_TO_THUMB)
17352 dyn.d_un.d_val |= 1;
17353 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17360 /* Fill in the first entry in the procedure linkage table. */
17361 if (splt->size > 0 && htab->plt_header_size)
17363 const bfd_vma *plt0_entry;
17364 bfd_vma got_address, plt_address, got_displacement;
17366 /* Calculate the addresses of the GOT and PLT. */
17367 got_address = sgot->output_section->vma + sgot->output_offset;
17368 plt_address = splt->output_section->vma + splt->output_offset;
17370 if (htab->vxworks_p)
17372 /* The VxWorks GOT is relocated by the dynamic linker.
17373 Therefore, we must emit relocations rather than simply
17374 computing the values now. */
17375 Elf_Internal_Rela rel;
17377 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17378 put_arm_insn (htab, output_bfd, plt0_entry[0],
17379 splt->contents + 0);
17380 put_arm_insn (htab, output_bfd, plt0_entry[1],
17381 splt->contents + 4);
17382 put_arm_insn (htab, output_bfd, plt0_entry[2],
17383 splt->contents + 8);
17384 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17386 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17387 rel.r_offset = plt_address + 12;
17388 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17390 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17391 htab->srelplt2->contents);
17393 else if (htab->nacl_p)
17394 arm_nacl_put_plt0 (htab, output_bfd, splt,
17395 got_address + 8 - (plt_address + 16));
17396 else if (using_thumb_only (htab))
17398 got_displacement = got_address - (plt_address + 12);
17400 plt0_entry = elf32_thumb2_plt0_entry;
17401 put_arm_insn (htab, output_bfd, plt0_entry[0],
17402 splt->contents + 0);
17403 put_arm_insn (htab, output_bfd, plt0_entry[1],
17404 splt->contents + 4);
17405 put_arm_insn (htab, output_bfd, plt0_entry[2],
17406 splt->contents + 8);
17408 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17412 got_displacement = got_address - (plt_address + 16);
17414 plt0_entry = elf32_arm_plt0_entry;
17415 put_arm_insn (htab, output_bfd, plt0_entry[0],
17416 splt->contents + 0);
17417 put_arm_insn (htab, output_bfd, plt0_entry[1],
17418 splt->contents + 4);
17419 put_arm_insn (htab, output_bfd, plt0_entry[2],
17420 splt->contents + 8);
17421 put_arm_insn (htab, output_bfd, plt0_entry[3],
17422 splt->contents + 12);
17424 #ifdef FOUR_WORD_PLT
17425 /* The displacement value goes in the otherwise-unused
17426 last word of the second entry. */
17427 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17429 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17434 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17435 really seem like the right value. */
17436 if (splt->output_section->owner == output_bfd)
17437 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17439 if (htab->dt_tlsdesc_plt)
17441 bfd_vma got_address
17442 = sgot->output_section->vma + sgot->output_offset;
17443 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17444 + htab->root.sgot->output_offset);
17445 bfd_vma plt_address
17446 = splt->output_section->vma + splt->output_offset;
17448 arm_put_trampoline (htab, output_bfd,
17449 splt->contents + htab->dt_tlsdesc_plt,
17450 dl_tlsdesc_lazy_trampoline, 6);
17452 bfd_put_32 (output_bfd,
17453 gotplt_address + htab->dt_tlsdesc_got
17454 - (plt_address + htab->dt_tlsdesc_plt)
17455 - dl_tlsdesc_lazy_trampoline[6],
17456 splt->contents + htab->dt_tlsdesc_plt + 24);
17457 bfd_put_32 (output_bfd,
17458 got_address - (plt_address + htab->dt_tlsdesc_plt)
17459 - dl_tlsdesc_lazy_trampoline[7],
17460 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
17463 if (htab->tls_trampoline)
17465 arm_put_trampoline (htab, output_bfd,
17466 splt->contents + htab->tls_trampoline,
17467 tls_trampoline, 3);
17468 #ifdef FOUR_WORD_PLT
17469 bfd_put_32 (output_bfd, 0x00000000,
17470 splt->contents + htab->tls_trampoline + 12);
17474 if (htab->vxworks_p
17475 && !bfd_link_pic (info)
17476 && htab->root.splt->size > 0)
17478 /* Correct the .rel(a).plt.unloaded relocations. They will have
17479 incorrect symbol indexes. */
17483 num_plts = ((htab->root.splt->size - htab->plt_header_size)
17484 / htab->plt_entry_size);
17485 p = htab->srelplt2->contents + RELOC_SIZE (htab);
17487 for (; num_plts; num_plts--)
17489 Elf_Internal_Rela rel;
17491 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17492 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17493 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17494 p += RELOC_SIZE (htab);
17496 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17497 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17498 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17499 p += RELOC_SIZE (htab);
17504 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
17505 /* NaCl uses a special first entry in .iplt too. */
17506 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17508 /* Fill in the first three entries in the global offset table. */
17511 if (sgot->size > 0)
17514 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17516 bfd_put_32 (output_bfd,
17517 sdyn->output_section->vma + sdyn->output_offset,
17519 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17520 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17523 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17526 /* At the very end of the .rofixup section is a pointer to the GOT. */
17527 if (htab->fdpic_p && htab->srofixup != NULL)
17529 struct elf_link_hash_entry *hgot = htab->root.hgot;
17531 bfd_vma got_value = hgot->root.u.def.value
17532 + hgot->root.u.def.section->output_section->vma
17533 + hgot->root.u.def.section->output_offset;
17535 arm_elf_add_rofixup(output_bfd, htab->srofixup, got_value);
17537 /* Make sure we allocated and generated the same number of fixups. */
17538 BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17545 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
17547 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
17548 struct elf32_arm_link_hash_table *globals;
17549 struct elf_segment_map *m;
17551 i_ehdrp = elf_elfheader (abfd);
17553 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17554 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17556 _bfd_elf_post_process_headers (abfd, link_info);
17557 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17561 globals = elf32_arm_hash_table (link_info);
17562 if (globals != NULL && globals->byteswap_code)
17563 i_ehdrp->e_flags |= EF_ARM_BE8;
17565 if (globals->fdpic_p)
17566 i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17569 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17570 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17572 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17573 if (abi == AEABI_VFP_args_vfp)
17574 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17576 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17579 /* Scan segment to set p_flags attribute if it contains only sections with
17580 SHF_ARM_PURECODE flag. */
17581 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17587 for (j = 0; j < m->count; j++)
17589 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17595 m->p_flags_valid = 1;
17600 static enum elf_reloc_type_class
17601 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17602 const asection *rel_sec ATTRIBUTE_UNUSED,
17603 const Elf_Internal_Rela *rela)
17605 switch ((int) ELF32_R_TYPE (rela->r_info))
17607 case R_ARM_RELATIVE:
17608 return reloc_class_relative;
17609 case R_ARM_JUMP_SLOT:
17610 return reloc_class_plt;
17612 return reloc_class_copy;
17613 case R_ARM_IRELATIVE:
17614 return reloc_class_ifunc;
17616 return reloc_class_normal;
17621 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
17623 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17626 /* Return TRUE if this is an unwinding table entry. */
17629 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17631 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
17632 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
17636 /* Set the type and flags for an ARM section. We do this by
17637 the section name, which is a hack, but ought to work. */
17640 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17644 name = bfd_get_section_name (abfd, sec);
17646 if (is_arm_elf_unwind_section_name (abfd, name))
17648 hdr->sh_type = SHT_ARM_EXIDX;
17649 hdr->sh_flags |= SHF_LINK_ORDER;
17652 if (sec->flags & SEC_ELF_PURECODE)
17653 hdr->sh_flags |= SHF_ARM_PURECODE;
17658 /* Handle an ARM specific section when reading an object file. This is
17659 called when bfd_section_from_shdr finds a section with an unknown
17663 elf32_arm_section_from_shdr (bfd *abfd,
17664 Elf_Internal_Shdr * hdr,
17668 /* There ought to be a place to keep ELF backend specific flags, but
17669 at the moment there isn't one. We just keep track of the
17670 sections by their name, instead. Fortunately, the ABI gives
17671 names for all the ARM specific sections, so we will probably get
17673 switch (hdr->sh_type)
17675 case SHT_ARM_EXIDX:
17676 case SHT_ARM_PREEMPTMAP:
17677 case SHT_ARM_ATTRIBUTES:
17684 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17690 static _arm_elf_section_data *
17691 get_arm_elf_section_data (asection * sec)
17693 if (sec && sec->owner && is_arm_elf (sec->owner))
17694 return elf32_arm_section_data (sec);
17702 struct bfd_link_info *info;
17705 int (*func) (void *, const char *, Elf_Internal_Sym *,
17706 asection *, struct elf_link_hash_entry *);
17707 } output_arch_syminfo;
17709 enum map_symbol_type
17717 /* Output a single mapping symbol. */
17720 elf32_arm_output_map_sym (output_arch_syminfo *osi,
17721 enum map_symbol_type type,
17724 static const char *names[3] = {"$a", "$t", "$d"};
17725 Elf_Internal_Sym sym;
17727 sym.st_value = osi->sec->output_section->vma
17728 + osi->sec->output_offset
17732 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17733 sym.st_shndx = osi->sec_shndx;
17734 sym.st_target_internal = 0;
17735 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17736 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17739 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17740 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17743 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17744 bfd_boolean is_iplt_entry_p,
17745 union gotplt_union *root_plt,
17746 struct arm_plt_info *arm_plt)
17748 struct elf32_arm_link_hash_table *htab;
17749 bfd_vma addr, plt_header_size;
17751 if (root_plt->offset == (bfd_vma) -1)
17754 htab = elf32_arm_hash_table (osi->info);
17758 if (is_iplt_entry_p)
17760 osi->sec = htab->root.iplt;
17761 plt_header_size = 0;
17765 osi->sec = htab->root.splt;
17766 plt_header_size = htab->plt_header_size;
17768 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
17769 (osi->info->output_bfd, osi->sec->output_section));
17771 addr = root_plt->offset & -2;
17772 if (htab->symbian_p)
17774 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17776 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
17779 else if (htab->vxworks_p)
17781 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17783 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
17785 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
17787 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
17790 else if (htab->nacl_p)
17792 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17795 else if (htab->fdpic_p)
17797 enum map_symbol_type type = using_thumb_only(htab)
17801 if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
17802 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17804 if (!elf32_arm_output_map_sym (osi, type, addr))
17806 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
17808 if (htab->plt_entry_size == 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry))
17809 if (!elf32_arm_output_map_sym (osi, type, addr + 24))
17812 else if (using_thumb_only (htab))
17814 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
17819 bfd_boolean thumb_stub_p;
17821 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
17824 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17827 #ifdef FOUR_WORD_PLT
17828 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17830 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
17833 /* A three-word PLT with no Thumb thunk contains only Arm code,
17834 so only need to output a mapping symbol for the first PLT entry and
17835 entries with thumb thunks. */
17836 if (thumb_stub_p || addr == plt_header_size)
17838 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17847 /* Output mapping symbols for PLT entries associated with H. */
17850 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
17852 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
17853 struct elf32_arm_link_hash_entry *eh;
17855 if (h->root.type == bfd_link_hash_indirect)
17858 if (h->root.type == bfd_link_hash_warning)
17859 /* When warning symbols are created, they **replace** the "real"
17860 entry in the hash table, thus we never get to see the real
17861 symbol in a hash traversal. So look at it now. */
17862 h = (struct elf_link_hash_entry *) h->root.u.i.link;
17864 eh = (struct elf32_arm_link_hash_entry *) h;
17865 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
17866 &h->plt, &eh->plt);
17869 /* Bind a veneered symbol to its veneer identified by its hash entry
17870 STUB_ENTRY. The veneered location thus loose its symbol. */
17873 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
17875 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
17878 hash->root.root.u.def.section = stub_entry->stub_sec;
17879 hash->root.root.u.def.value = stub_entry->stub_offset;
17880 hash->root.size = stub_entry->stub_size;
17883 /* Output a single local symbol for a generated stub. */
17886 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
17887 bfd_vma offset, bfd_vma size)
17889 Elf_Internal_Sym sym;
17891 sym.st_value = osi->sec->output_section->vma
17892 + osi->sec->output_offset
17894 sym.st_size = size;
17896 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
17897 sym.st_shndx = osi->sec_shndx;
17898 sym.st_target_internal = 0;
17899 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
17903 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
17906 struct elf32_arm_stub_hash_entry *stub_entry;
17907 asection *stub_sec;
17910 output_arch_syminfo *osi;
17911 const insn_sequence *template_sequence;
17912 enum stub_insn_type prev_type;
17915 enum map_symbol_type sym_type;
17917 /* Massage our args to the form they really have. */
17918 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
17919 osi = (output_arch_syminfo *) in_arg;
17921 stub_sec = stub_entry->stub_sec;
17923 /* Ensure this stub is attached to the current section being
17925 if (stub_sec != osi->sec)
17928 addr = (bfd_vma) stub_entry->stub_offset;
17929 template_sequence = stub_entry->stub_template;
17931 if (arm_stub_sym_claimed (stub_entry->stub_type))
17932 arm_stub_claim_sym (stub_entry);
17935 stub_name = stub_entry->output_name;
17936 switch (template_sequence[0].type)
17939 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
17940 stub_entry->stub_size))
17945 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
17946 stub_entry->stub_size))
17955 prev_type = DATA_TYPE;
17957 for (i = 0; i < stub_entry->stub_template_size; i++)
17959 switch (template_sequence[i].type)
17962 sym_type = ARM_MAP_ARM;
17967 sym_type = ARM_MAP_THUMB;
17971 sym_type = ARM_MAP_DATA;
17979 if (template_sequence[i].type != prev_type)
17981 prev_type = template_sequence[i].type;
17982 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
17986 switch (template_sequence[i].type)
18010 /* Output mapping symbols for linker generated sections,
18011 and for those data-only sections that do not have a
18015 elf32_arm_output_arch_local_syms (bfd *output_bfd,
18016 struct bfd_link_info *info,
18018 int (*func) (void *, const char *,
18019 Elf_Internal_Sym *,
18021 struct elf_link_hash_entry *))
18023 output_arch_syminfo osi;
18024 struct elf32_arm_link_hash_table *htab;
18026 bfd_size_type size;
18029 htab = elf32_arm_hash_table (info);
18033 check_use_blx (htab);
18035 osi.flaginfo = flaginfo;
18039 /* Add a $d mapping symbol to data-only sections that
18040 don't have any mapping symbol. This may result in (harmless) redundant
18041 mapping symbols. */
18042 for (input_bfd = info->input_bfds;
18044 input_bfd = input_bfd->link.next)
18046 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
18047 for (osi.sec = input_bfd->sections;
18049 osi.sec = osi.sec->next)
18051 if (osi.sec->output_section != NULL
18052 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
18054 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
18055 == SEC_HAS_CONTENTS
18056 && get_arm_elf_section_data (osi.sec) != NULL
18057 && get_arm_elf_section_data (osi.sec)->mapcount == 0
18058 && osi.sec->size > 0
18059 && (osi.sec->flags & SEC_EXCLUDE) == 0)
18061 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18062 (output_bfd, osi.sec->output_section);
18063 if (osi.sec_shndx != (int)SHN_BAD)
18064 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
18069 /* ARM->Thumb glue. */
18070 if (htab->arm_glue_size > 0)
18072 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18073 ARM2THUMB_GLUE_SECTION_NAME);
18075 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18076 (output_bfd, osi.sec->output_section);
18077 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
18078 || htab->pic_veneer)
18079 size = ARM2THUMB_PIC_GLUE_SIZE;
18080 else if (htab->use_blx)
18081 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18083 size = ARM2THUMB_STATIC_GLUE_SIZE;
18085 for (offset = 0; offset < htab->arm_glue_size; offset += size)
18087 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18088 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18092 /* Thumb->ARM glue. */
18093 if (htab->thumb_glue_size > 0)
18095 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18096 THUMB2ARM_GLUE_SECTION_NAME);
18098 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18099 (output_bfd, osi.sec->output_section);
18100 size = THUMB2ARM_GLUE_SIZE;
18102 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18104 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18105 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18109 /* ARMv4 BX veneers. */
18110 if (htab->bx_glue_size > 0)
18112 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18113 ARM_BX_GLUE_SECTION_NAME);
18115 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18116 (output_bfd, osi.sec->output_section);
18118 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18121 /* Long calls stubs. */
18122 if (htab->stub_bfd && htab->stub_bfd->sections)
18124 asection* stub_sec;
18126 for (stub_sec = htab->stub_bfd->sections;
18128 stub_sec = stub_sec->next)
18130 /* Ignore non-stub sections. */
18131 if (!strstr (stub_sec->name, STUB_SUFFIX))
18134 osi.sec = stub_sec;
18136 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18137 (output_bfd, osi.sec->output_section);
18139 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18143 /* Finally, output mapping symbols for the PLT. */
18144 if (htab->root.splt && htab->root.splt->size > 0)
18146 osi.sec = htab->root.splt;
18147 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18148 (output_bfd, osi.sec->output_section));
18150 /* Output mapping symbols for the plt header. SymbianOS does not have a
18152 if (htab->vxworks_p)
18154 /* VxWorks shared libraries have no PLT header. */
18155 if (!bfd_link_pic (info))
18157 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18159 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18163 else if (htab->nacl_p)
18165 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18168 else if (using_thumb_only (htab) && !htab->fdpic_p)
18170 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18172 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18174 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18177 else if (!htab->symbian_p && !htab->fdpic_p)
18179 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18181 #ifndef FOUR_WORD_PLT
18182 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18187 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
18189 /* NaCl uses a special first entry in .iplt too. */
18190 osi.sec = htab->root.iplt;
18191 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18192 (output_bfd, osi.sec->output_section));
18193 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18196 if ((htab->root.splt && htab->root.splt->size > 0)
18197 || (htab->root.iplt && htab->root.iplt->size > 0))
18199 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18200 for (input_bfd = info->input_bfds;
18202 input_bfd = input_bfd->link.next)
18204 struct arm_local_iplt_info **local_iplt;
18205 unsigned int i, num_syms;
18207 local_iplt = elf32_arm_local_iplt (input_bfd);
18208 if (local_iplt != NULL)
18210 num_syms = elf_symtab_hdr (input_bfd).sh_info;
18211 for (i = 0; i < num_syms; i++)
18212 if (local_iplt[i] != NULL
18213 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
18214 &local_iplt[i]->root,
18215 &local_iplt[i]->arm))
18220 if (htab->dt_tlsdesc_plt != 0)
18222 /* Mapping symbols for the lazy tls trampoline. */
18223 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
18226 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18227 htab->dt_tlsdesc_plt + 24))
18230 if (htab->tls_trampoline != 0)
18232 /* Mapping symbols for the tls trampoline. */
18233 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18235 #ifdef FOUR_WORD_PLT
18236 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18237 htab->tls_trampoline + 12))
18245 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18246 the import library. All SYMCOUNT symbols of ABFD can be examined
18247 from their pointers in SYMS. Pointers of symbols to keep should be
18248 stored continuously at the beginning of that array.
18250 Returns the number of symbols to keep. */
18252 static unsigned int
18253 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18254 struct bfd_link_info *info,
18255 asymbol **syms, long symcount)
18259 long src_count, dst_count = 0;
18260 struct elf32_arm_link_hash_table *htab;
18262 htab = elf32_arm_hash_table (info);
18263 if (!htab->stub_bfd || !htab->stub_bfd->sections)
18267 cmse_name = (char *) bfd_malloc (maxnamelen);
18268 for (src_count = 0; src_count < symcount; src_count++)
18270 struct elf32_arm_link_hash_entry *cmse_hash;
18276 sym = syms[src_count];
18277 flags = sym->flags;
18278 name = (char *) bfd_asymbol_name (sym);
18280 if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18282 if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18285 namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18286 if (namelen > maxnamelen)
18288 cmse_name = (char *)
18289 bfd_realloc (cmse_name, namelen);
18290 maxnamelen = namelen;
18292 snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18293 cmse_hash = (struct elf32_arm_link_hash_entry *)
18294 elf_link_hash_lookup (&(htab)->root, cmse_name, FALSE, FALSE, TRUE);
18297 || (cmse_hash->root.root.type != bfd_link_hash_defined
18298 && cmse_hash->root.root.type != bfd_link_hash_defweak)
18299 || cmse_hash->root.type != STT_FUNC)
18302 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
18305 syms[dst_count++] = sym;
18309 syms[dst_count] = NULL;
18314 /* Filter symbols of ABFD to include in the import library. All
18315 SYMCOUNT symbols of ABFD can be examined from their pointers in
18316 SYMS. Pointers of symbols to keep should be stored continuously at
18317 the beginning of that array.
18319 Returns the number of symbols to keep. */
18321 static unsigned int
18322 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18323 struct bfd_link_info *info,
18324 asymbol **syms, long symcount)
18326 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18328 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18329 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18330 library to be a relocatable object file. */
18331 BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18332 if (globals->cmse_implib)
18333 return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18335 return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18338 /* Allocate target specific section data. */
18341 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18343 if (!sec->used_by_bfd)
18345 _arm_elf_section_data *sdata;
18346 bfd_size_type amt = sizeof (*sdata);
18348 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18351 sec->used_by_bfd = sdata;
18354 return _bfd_elf_new_section_hook (abfd, sec);
18358 /* Used to order a list of mapping symbols by address. */
18361 elf32_arm_compare_mapping (const void * a, const void * b)
18363 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18364 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18366 if (amap->vma > bmap->vma)
18368 else if (amap->vma < bmap->vma)
18370 else if (amap->type > bmap->type)
18371 /* Ensure results do not depend on the host qsort for objects with
18372 multiple mapping symbols at the same address by sorting on type
18375 else if (amap->type < bmap->type)
18381 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18383 static unsigned long
18384 offset_prel31 (unsigned long addr, bfd_vma offset)
18386 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18389 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18393 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18395 unsigned long first_word = bfd_get_32 (output_bfd, from);
18396 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18398 /* High bit of first word is supposed to be zero. */
18399 if ((first_word & 0x80000000ul) == 0)
18400 first_word = offset_prel31 (first_word, offset);
18402 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18403 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18404 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18405 second_word = offset_prel31 (second_word, offset);
18407 bfd_put_32 (output_bfd, first_word, to);
18408 bfd_put_32 (output_bfd, second_word, to + 4);
18411 /* Data for make_branch_to_a8_stub(). */
18413 struct a8_branch_to_stub_data
18415 asection *writing_section;
18416 bfd_byte *contents;
18420 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18421 places for a particular section. */
18424 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18427 struct elf32_arm_stub_hash_entry *stub_entry;
18428 struct a8_branch_to_stub_data *data;
18429 bfd_byte *contents;
18430 unsigned long branch_insn;
18431 bfd_vma veneered_insn_loc, veneer_entry_loc;
18432 bfd_signed_vma branch_offset;
18436 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18437 data = (struct a8_branch_to_stub_data *) in_arg;
18439 if (stub_entry->target_section != data->writing_section
18440 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18443 contents = data->contents;
18445 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18446 generated when both source and target are in the same section. */
18447 veneered_insn_loc = stub_entry->target_section->output_section->vma
18448 + stub_entry->target_section->output_offset
18449 + stub_entry->source_value;
18451 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18452 + stub_entry->stub_sec->output_offset
18453 + stub_entry->stub_offset;
18455 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18456 veneered_insn_loc &= ~3u;
18458 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18460 abfd = stub_entry->target_section->owner;
18461 loc = stub_entry->source_value;
18463 /* We attempt to avoid this condition by setting stubs_always_after_branch
18464 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18465 This check is just to be on the safe side... */
18466 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18468 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18469 "allocated in unsafe location"), abfd);
18473 switch (stub_entry->stub_type)
18475 case arm_stub_a8_veneer_b:
18476 case arm_stub_a8_veneer_b_cond:
18477 branch_insn = 0xf0009000;
18480 case arm_stub_a8_veneer_blx:
18481 branch_insn = 0xf000e800;
18484 case arm_stub_a8_veneer_bl:
18486 unsigned int i1, j1, i2, j2, s;
18488 branch_insn = 0xf000d000;
18491 if (branch_offset < -16777216 || branch_offset > 16777214)
18493 /* There's not much we can do apart from complain if this
18495 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18496 "of range (input file too large)"), abfd);
18500 /* i1 = not(j1 eor s), so:
18502 j1 = (not i1) eor s. */
18504 branch_insn |= (branch_offset >> 1) & 0x7ff;
18505 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18506 i2 = (branch_offset >> 22) & 1;
18507 i1 = (branch_offset >> 23) & 1;
18508 s = (branch_offset >> 24) & 1;
18511 branch_insn |= j2 << 11;
18512 branch_insn |= j1 << 13;
18513 branch_insn |= s << 26;
18522 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18523 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18528 /* Beginning of stm32l4xx work-around. */
18530 /* Functions encoding instructions necessary for the emission of the
18531 fix-stm32l4xx-629360.
18532 Encoding is extracted from the
18533 ARM (C) Architecture Reference Manual
18534 ARMv7-A and ARMv7-R edition
18535 ARM DDI 0406C.b (ID072512). */
18537 static inline bfd_vma
18538 create_instruction_branch_absolute (int branch_offset)
18540 /* A8.8.18 B (A8-334)
18541 B target_address (Encoding T4). */
18542 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18543 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18544 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18546 int s = ((branch_offset & 0x1000000) >> 24);
18547 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18548 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18550 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18551 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18553 bfd_vma patched_inst = 0xf0009000
18555 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
18556 | j1 << 13 /* J1. */
18557 | j2 << 11 /* J2. */
18558 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
18560 return patched_inst;
18563 static inline bfd_vma
18564 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18566 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18567 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18568 bfd_vma patched_inst = 0xe8900000
18569 | (/*W=*/wback << 21)
18571 | (reg_mask & 0x0000ffff);
18573 return patched_inst;
18576 static inline bfd_vma
18577 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18579 /* A8.8.60 LDMDB/LDMEA (A8-402)
18580 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18581 bfd_vma patched_inst = 0xe9100000
18582 | (/*W=*/wback << 21)
18584 | (reg_mask & 0x0000ffff);
18586 return patched_inst;
18589 static inline bfd_vma
18590 create_instruction_mov (int target_reg, int source_reg)
18592 /* A8.8.103 MOV (register) (A8-486)
18593 MOV Rd, Rm (Encoding T1). */
18594 bfd_vma patched_inst = 0x4600
18595 | (target_reg & 0x7)
18596 | ((target_reg & 0x8) >> 3) << 7
18597 | (source_reg << 3);
18599 return patched_inst;
18602 static inline bfd_vma
18603 create_instruction_sub (int target_reg, int source_reg, int value)
18605 /* A8.8.221 SUB (immediate) (A8-708)
18606 SUB Rd, Rn, #value (Encoding T3). */
18607 bfd_vma patched_inst = 0xf1a00000
18608 | (target_reg << 8)
18609 | (source_reg << 16)
18611 | ((value & 0x800) >> 11) << 26
18612 | ((value & 0x700) >> 8) << 12
18615 return patched_inst;
18618 static inline bfd_vma
18619 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18622 /* A8.8.332 VLDM (A8-922)
18623 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18624 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18625 | (/*W=*/wback << 21)
18627 | (num_words & 0x000000ff)
18628 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18629 | (first_reg & 0x00000001) << 22;
18631 return patched_inst;
18634 static inline bfd_vma
18635 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18638 /* A8.8.332 VLDM (A8-922)
18639 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18640 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18642 | (num_words & 0x000000ff)
18643 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18644 | (first_reg & 0x00000001) << 22;
18646 return patched_inst;
18649 static inline bfd_vma
18650 create_instruction_udf_w (int value)
18652 /* A8.8.247 UDF (A8-758)
18653 Undefined (Encoding T2). */
18654 bfd_vma patched_inst = 0xf7f0a000
18655 | (value & 0x00000fff)
18656 | (value & 0x000f0000) << 16;
18658 return patched_inst;
18661 static inline bfd_vma
18662 create_instruction_udf (int value)
18664 /* A8.8.247 UDF (A8-758)
18665 Undefined (Encoding T1). */
18666 bfd_vma patched_inst = 0xde00
18669 return patched_inst;
18672 /* Functions writing an instruction in memory, returning the next
18673 memory position to write to. */
18675 static inline bfd_byte *
18676 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18677 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18679 put_thumb2_insn (htab, output_bfd, insn, pt);
18683 static inline bfd_byte *
18684 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18685 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18687 put_thumb_insn (htab, output_bfd, insn, pt);
18691 /* Function filling up a region in memory with T1 and T2 UDFs taking
18692 care of alignment. */
18695 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18697 const bfd_byte * const base_stub_contents,
18698 bfd_byte * const from_stub_contents,
18699 const bfd_byte * const end_stub_contents)
18701 bfd_byte *current_stub_contents = from_stub_contents;
18703 /* Fill the remaining of the stub with deterministic contents : UDF
18705 Check if realignment is needed on modulo 4 frontier using T1, to
18707 if ((current_stub_contents < end_stub_contents)
18708 && !((current_stub_contents - base_stub_contents) % 2)
18709 && ((current_stub_contents - base_stub_contents) % 4))
18710 current_stub_contents =
18711 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18712 create_instruction_udf (0));
18714 for (; current_stub_contents < end_stub_contents;)
18715 current_stub_contents =
18716 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18717 create_instruction_udf_w (0));
18719 return current_stub_contents;
18722 /* Functions writing the stream of instructions equivalent to the
18723 derived sequence for ldmia, ldmdb, vldm respectively. */
18726 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18728 const insn32 initial_insn,
18729 const bfd_byte *const initial_insn_addr,
18730 bfd_byte *const base_stub_contents)
18732 int wback = (initial_insn & 0x00200000) >> 21;
18733 int ri, rn = (initial_insn & 0x000F0000) >> 16;
18734 int insn_all_registers = initial_insn & 0x0000ffff;
18735 int insn_low_registers, insn_high_registers;
18736 int usable_register_mask;
18737 int nb_registers = elf32_arm_popcount (insn_all_registers);
18738 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18739 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18740 bfd_byte *current_stub_contents = base_stub_contents;
18742 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18744 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18745 smaller than 8 registers load sequences that do not cause the
18747 if (nb_registers <= 8)
18749 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18750 current_stub_contents =
18751 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18754 /* B initial_insn_addr+4. */
18756 current_stub_contents =
18757 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18758 create_instruction_branch_absolute
18759 (initial_insn_addr - current_stub_contents));
18761 /* Fill the remaining of the stub with deterministic contents. */
18762 current_stub_contents =
18763 stm32l4xx_fill_stub_udf (htab, output_bfd,
18764 base_stub_contents, current_stub_contents,
18765 base_stub_contents +
18766 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18771 /* - reg_list[13] == 0. */
18772 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
18774 /* - reg_list[14] & reg_list[15] != 1. */
18775 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18777 /* - if (wback==1) reg_list[rn] == 0. */
18778 BFD_ASSERT (!wback || !restore_rn);
18780 /* - nb_registers > 8. */
18781 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18783 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18785 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18786 - One with the 7 lowest registers (register mask 0x007F)
18787 This LDM will finally contain between 2 and 7 registers
18788 - One with the 7 highest registers (register mask 0xDF80)
18789 This ldm will finally contain between 2 and 7 registers. */
18790 insn_low_registers = insn_all_registers & 0x007F;
18791 insn_high_registers = insn_all_registers & 0xDF80;
18793 /* A spare register may be needed during this veneer to temporarily
18794 handle the base register. This register will be restored with the
18795 last LDM operation.
18796 The usable register may be any general purpose register (that
18797 excludes PC, SP, LR : register mask is 0x1FFF). */
18798 usable_register_mask = 0x1FFF;
18800 /* Generate the stub function. */
18803 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
18804 current_stub_contents =
18805 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18806 create_instruction_ldmia
18807 (rn, /*wback=*/1, insn_low_registers));
18809 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
18810 current_stub_contents =
18811 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18812 create_instruction_ldmia
18813 (rn, /*wback=*/1, insn_high_registers));
18816 /* B initial_insn_addr+4. */
18817 current_stub_contents =
18818 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18819 create_instruction_branch_absolute
18820 (initial_insn_addr - current_stub_contents));
18823 else /* if (!wback). */
18827 /* If Rn is not part of the high-register-list, move it there. */
18828 if (!(insn_high_registers & (1 << rn)))
18830 /* Choose a Ri in the high-register-list that will be restored. */
18831 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18834 current_stub_contents =
18835 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18836 create_instruction_mov (ri, rn));
18839 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
18840 current_stub_contents =
18841 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18842 create_instruction_ldmia
18843 (ri, /*wback=*/1, insn_low_registers));
18845 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
18846 current_stub_contents =
18847 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18848 create_instruction_ldmia
18849 (ri, /*wback=*/0, insn_high_registers));
18853 /* B initial_insn_addr+4. */
18854 current_stub_contents =
18855 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18856 create_instruction_branch_absolute
18857 (initial_insn_addr - current_stub_contents));
18861 /* Fill the remaining of the stub with deterministic contents. */
18862 current_stub_contents =
18863 stm32l4xx_fill_stub_udf (htab, output_bfd,
18864 base_stub_contents, current_stub_contents,
18865 base_stub_contents +
18866 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18870 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
18872 const insn32 initial_insn,
18873 const bfd_byte *const initial_insn_addr,
18874 bfd_byte *const base_stub_contents)
18876 int wback = (initial_insn & 0x00200000) >> 21;
18877 int ri, rn = (initial_insn & 0x000f0000) >> 16;
18878 int insn_all_registers = initial_insn & 0x0000ffff;
18879 int insn_low_registers, insn_high_registers;
18880 int usable_register_mask;
18881 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18882 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18883 int nb_registers = elf32_arm_popcount (insn_all_registers);
18884 bfd_byte *current_stub_contents = base_stub_contents;
18886 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
18888 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18889 smaller than 8 registers load sequences that do not cause the
18891 if (nb_registers <= 8)
18893 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18894 current_stub_contents =
18895 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18898 /* B initial_insn_addr+4. */
18899 current_stub_contents =
18900 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18901 create_instruction_branch_absolute
18902 (initial_insn_addr - current_stub_contents));
18904 /* Fill the remaining of the stub with deterministic contents. */
18905 current_stub_contents =
18906 stm32l4xx_fill_stub_udf (htab, output_bfd,
18907 base_stub_contents, current_stub_contents,
18908 base_stub_contents +
18909 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18914 /* - reg_list[13] == 0. */
18915 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
18917 /* - reg_list[14] & reg_list[15] != 1. */
18918 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18920 /* - if (wback==1) reg_list[rn] == 0. */
18921 BFD_ASSERT (!wback || !restore_rn);
18923 /* - nb_registers > 8. */
18924 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18926 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18928 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
18929 - One with the 7 lowest registers (register mask 0x007F)
18930 This LDM will finally contain between 2 and 7 registers
18931 - One with the 7 highest registers (register mask 0xDF80)
18932 This ldm will finally contain between 2 and 7 registers. */
18933 insn_low_registers = insn_all_registers & 0x007F;
18934 insn_high_registers = insn_all_registers & 0xDF80;
18936 /* A spare register may be needed during this veneer to temporarily
18937 handle the base register. This register will be restored with
18938 the last LDM operation.
18939 The usable register may be any general purpose register (that excludes
18940 PC, SP, LR : register mask is 0x1FFF). */
18941 usable_register_mask = 0x1FFF;
18943 /* Generate the stub function. */
18944 if (!wback && !restore_pc && !restore_rn)
18946 /* Choose a Ri in the low-register-list that will be restored. */
18947 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
18950 current_stub_contents =
18951 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18952 create_instruction_mov (ri, rn));
18954 /* LDMDB Ri!, {R-high-register-list}. */
18955 current_stub_contents =
18956 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18957 create_instruction_ldmdb
18958 (ri, /*wback=*/1, insn_high_registers));
18960 /* LDMDB Ri, {R-low-register-list}. */
18961 current_stub_contents =
18962 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18963 create_instruction_ldmdb
18964 (ri, /*wback=*/0, insn_low_registers));
18966 /* B initial_insn_addr+4. */
18967 current_stub_contents =
18968 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18969 create_instruction_branch_absolute
18970 (initial_insn_addr - current_stub_contents));
18972 else if (wback && !restore_pc && !restore_rn)
18974 /* LDMDB Rn!, {R-high-register-list}. */
18975 current_stub_contents =
18976 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18977 create_instruction_ldmdb
18978 (rn, /*wback=*/1, insn_high_registers));
18980 /* LDMDB Rn!, {R-low-register-list}. */
18981 current_stub_contents =
18982 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18983 create_instruction_ldmdb
18984 (rn, /*wback=*/1, insn_low_registers));
18986 /* B initial_insn_addr+4. */
18987 current_stub_contents =
18988 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18989 create_instruction_branch_absolute
18990 (initial_insn_addr - current_stub_contents));
18992 else if (!wback && restore_pc && !restore_rn)
18994 /* Choose a Ri in the high-register-list that will be restored. */
18995 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18997 /* SUB Ri, Rn, #(4*nb_registers). */
18998 current_stub_contents =
18999 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19000 create_instruction_sub (ri, rn, (4 * nb_registers)));
19002 /* LDMIA Ri!, {R-low-register-list}. */
19003 current_stub_contents =
19004 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19005 create_instruction_ldmia
19006 (ri, /*wback=*/1, insn_low_registers));
19008 /* LDMIA Ri, {R-high-register-list}. */
19009 current_stub_contents =
19010 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19011 create_instruction_ldmia
19012 (ri, /*wback=*/0, insn_high_registers));
19014 else if (wback && restore_pc && !restore_rn)
19016 /* Choose a Ri in the high-register-list that will be restored. */
19017 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19019 /* SUB Rn, Rn, #(4*nb_registers) */
19020 current_stub_contents =
19021 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19022 create_instruction_sub (rn, rn, (4 * nb_registers)));
19025 current_stub_contents =
19026 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19027 create_instruction_mov (ri, rn));
19029 /* LDMIA Ri!, {R-low-register-list}. */
19030 current_stub_contents =
19031 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19032 create_instruction_ldmia
19033 (ri, /*wback=*/1, insn_low_registers));
19035 /* LDMIA Ri, {R-high-register-list}. */
19036 current_stub_contents =
19037 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19038 create_instruction_ldmia
19039 (ri, /*wback=*/0, insn_high_registers));
19041 else if (!wback && !restore_pc && restore_rn)
19044 if (!(insn_low_registers & (1 << rn)))
19046 /* Choose a Ri in the low-register-list that will be restored. */
19047 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19050 current_stub_contents =
19051 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19052 create_instruction_mov (ri, rn));
19055 /* LDMDB Ri!, {R-high-register-list}. */
19056 current_stub_contents =
19057 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19058 create_instruction_ldmdb
19059 (ri, /*wback=*/1, insn_high_registers));
19061 /* LDMDB Ri, {R-low-register-list}. */
19062 current_stub_contents =
19063 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19064 create_instruction_ldmdb
19065 (ri, /*wback=*/0, insn_low_registers));
19067 /* B initial_insn_addr+4. */
19068 current_stub_contents =
19069 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19070 create_instruction_branch_absolute
19071 (initial_insn_addr - current_stub_contents));
19073 else if (!wback && restore_pc && restore_rn)
19076 if (!(insn_high_registers & (1 << rn)))
19078 /* Choose a Ri in the high-register-list that will be restored. */
19079 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19082 /* SUB Ri, Rn, #(4*nb_registers). */
19083 current_stub_contents =
19084 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19085 create_instruction_sub (ri, rn, (4 * nb_registers)));
19087 /* LDMIA Ri!, {R-low-register-list}. */
19088 current_stub_contents =
19089 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19090 create_instruction_ldmia
19091 (ri, /*wback=*/1, insn_low_registers));
19093 /* LDMIA Ri, {R-high-register-list}. */
19094 current_stub_contents =
19095 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19096 create_instruction_ldmia
19097 (ri, /*wback=*/0, insn_high_registers));
19099 else if (wback && restore_rn)
19101 /* The assembler should not have accepted to encode this. */
19102 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19103 "undefined behavior.\n");
19106 /* Fill the remaining of the stub with deterministic contents. */
19107 current_stub_contents =
19108 stm32l4xx_fill_stub_udf (htab, output_bfd,
19109 base_stub_contents, current_stub_contents,
19110 base_stub_contents +
19111 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19116 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19118 const insn32 initial_insn,
19119 const bfd_byte *const initial_insn_addr,
19120 bfd_byte *const base_stub_contents)
19122 int num_words = ((unsigned int) initial_insn << 24) >> 24;
19123 bfd_byte *current_stub_contents = base_stub_contents;
19125 BFD_ASSERT (is_thumb2_vldm (initial_insn));
19127 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19128 smaller than 8 words load sequences that do not cause the
19130 if (num_words <= 8)
19132 /* Untouched instruction. */
19133 current_stub_contents =
19134 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19137 /* B initial_insn_addr+4. */
19138 current_stub_contents =
19139 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19140 create_instruction_branch_absolute
19141 (initial_insn_addr - current_stub_contents));
19145 bfd_boolean is_dp = /* DP encoding. */
19146 (initial_insn & 0xfe100f00) == 0xec100b00;
19147 bfd_boolean is_ia_nobang = /* (IA without !). */
19148 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
19149 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
19150 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
19151 bfd_boolean is_db_bang = /* (DB with !). */
19152 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
19153 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19154 /* d = UInt (Vd:D);. */
19155 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19156 | (((unsigned int)initial_insn << 9) >> 31);
19158 /* Compute the number of 8-words chunks needed to split. */
19159 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19162 /* The test coverage has been done assuming the following
19163 hypothesis that exactly one of the previous is_ predicates is
19165 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19166 && !(is_ia_nobang & is_ia_bang & is_db_bang));
19168 /* We treat the cutting of the words in one pass for all
19169 cases, then we emit the adjustments:
19172 -> vldm rx!, {8_words_or_less} for each needed 8_word
19173 -> sub rx, rx, #size (list)
19176 -> vldm rx!, {8_words_or_less} for each needed 8_word
19177 This also handles vpop instruction (when rx is sp)
19180 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19181 for (chunk = 0; chunk < chunks; ++chunk)
19183 bfd_vma new_insn = 0;
19185 if (is_ia_nobang || is_ia_bang)
19187 new_insn = create_instruction_vldmia
19191 chunks - (chunk + 1) ?
19192 8 : num_words - chunk * 8,
19193 first_reg + chunk * 8);
19195 else if (is_db_bang)
19197 new_insn = create_instruction_vldmdb
19200 chunks - (chunk + 1) ?
19201 8 : num_words - chunk * 8,
19202 first_reg + chunk * 8);
19206 current_stub_contents =
19207 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19211 /* Only this case requires the base register compensation
19215 current_stub_contents =
19216 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19217 create_instruction_sub
19218 (base_reg, base_reg, 4*num_words));
19221 /* B initial_insn_addr+4. */
19222 current_stub_contents =
19223 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19224 create_instruction_branch_absolute
19225 (initial_insn_addr - current_stub_contents));
19228 /* Fill the remaining of the stub with deterministic contents. */
19229 current_stub_contents =
19230 stm32l4xx_fill_stub_udf (htab, output_bfd,
19231 base_stub_contents, current_stub_contents,
19232 base_stub_contents +
19233 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19237 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19239 const insn32 wrong_insn,
19240 const bfd_byte *const wrong_insn_addr,
19241 bfd_byte *const stub_contents)
19243 if (is_thumb2_ldmia (wrong_insn))
19244 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19245 wrong_insn, wrong_insn_addr,
19247 else if (is_thumb2_ldmdb (wrong_insn))
19248 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19249 wrong_insn, wrong_insn_addr,
19251 else if (is_thumb2_vldm (wrong_insn))
19252 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19253 wrong_insn, wrong_insn_addr,
19257 /* End of stm32l4xx work-around. */
19260 /* Do code byteswapping. Return FALSE afterwards so that the section is
19261 written out as normal. */
19264 elf32_arm_write_section (bfd *output_bfd,
19265 struct bfd_link_info *link_info,
19267 bfd_byte *contents)
19269 unsigned int mapcount, errcount;
19270 _arm_elf_section_data *arm_data;
19271 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19272 elf32_arm_section_map *map;
19273 elf32_vfp11_erratum_list *errnode;
19274 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19277 bfd_vma offset = sec->output_section->vma + sec->output_offset;
19281 if (globals == NULL)
19284 /* If this section has not been allocated an _arm_elf_section_data
19285 structure then we cannot record anything. */
19286 arm_data = get_arm_elf_section_data (sec);
19287 if (arm_data == NULL)
19290 mapcount = arm_data->mapcount;
19291 map = arm_data->map;
19292 errcount = arm_data->erratumcount;
19296 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19298 for (errnode = arm_data->erratumlist; errnode != 0;
19299 errnode = errnode->next)
19301 bfd_vma target = errnode->vma - offset;
19303 switch (errnode->type)
19305 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19307 bfd_vma branch_to_veneer;
19308 /* Original condition code of instruction, plus bit mask for
19309 ARM B instruction. */
19310 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19313 /* The instruction is before the label. */
19316 /* Above offset included in -4 below. */
19317 branch_to_veneer = errnode->u.b.veneer->vma
19318 - errnode->vma - 4;
19320 if ((signed) branch_to_veneer < -(1 << 25)
19321 || (signed) branch_to_veneer >= (1 << 25))
19322 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19323 "range"), output_bfd);
19325 insn |= (branch_to_veneer >> 2) & 0xffffff;
19326 contents[endianflip ^ target] = insn & 0xff;
19327 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19328 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19329 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19333 case VFP11_ERRATUM_ARM_VENEER:
19335 bfd_vma branch_from_veneer;
19338 /* Take size of veneer into account. */
19339 branch_from_veneer = errnode->u.v.branch->vma
19340 - errnode->vma - 12;
19342 if ((signed) branch_from_veneer < -(1 << 25)
19343 || (signed) branch_from_veneer >= (1 << 25))
19344 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19345 "range"), output_bfd);
19347 /* Original instruction. */
19348 insn = errnode->u.v.branch->u.b.vfp_insn;
19349 contents[endianflip ^ target] = insn & 0xff;
19350 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19351 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19352 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19354 /* Branch back to insn after original insn. */
19355 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19356 contents[endianflip ^ (target + 4)] = insn & 0xff;
19357 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19358 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19359 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19369 if (arm_data->stm32l4xx_erratumcount != 0)
19371 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19372 stm32l4xx_errnode != 0;
19373 stm32l4xx_errnode = stm32l4xx_errnode->next)
19375 bfd_vma target = stm32l4xx_errnode->vma - offset;
19377 switch (stm32l4xx_errnode->type)
19379 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19382 bfd_vma branch_to_veneer =
19383 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19385 if ((signed) branch_to_veneer < -(1 << 24)
19386 || (signed) branch_to_veneer >= (1 << 24))
19388 bfd_vma out_of_range =
19389 ((signed) branch_to_veneer < -(1 << 24)) ?
19390 - branch_to_veneer - (1 << 24) :
19391 ((signed) branch_to_veneer >= (1 << 24)) ?
19392 branch_to_veneer - (1 << 24) : 0;
19395 (_("%pB(%#" PRIx64 "): error: "
19396 "cannot create STM32L4XX veneer; "
19397 "jump out of range by %" PRId64 " bytes; "
19398 "cannot encode branch instruction"),
19400 (uint64_t) (stm32l4xx_errnode->vma - 4),
19401 (int64_t) out_of_range);
19405 insn = create_instruction_branch_absolute
19406 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19408 /* The instruction is before the label. */
19411 put_thumb2_insn (globals, output_bfd,
19412 (bfd_vma) insn, contents + target);
19416 case STM32L4XX_ERRATUM_VENEER:
19419 bfd_byte * veneer_r;
19422 veneer = contents + target;
19424 + stm32l4xx_errnode->u.b.veneer->vma
19425 - stm32l4xx_errnode->vma - 4;
19427 if ((signed) (veneer_r - veneer -
19428 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19429 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19430 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19431 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19432 || (signed) (veneer_r - veneer) >= (1 << 24))
19434 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19435 "veneer"), output_bfd);
19439 /* Original instruction. */
19440 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19442 stm32l4xx_create_replacing_stub
19443 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19453 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19455 arm_unwind_table_edit *edit_node
19456 = arm_data->u.exidx.unwind_edit_list;
19457 /* Now, sec->size is the size of the section we will write. The original
19458 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19459 markers) was sec->rawsize. (This isn't the case if we perform no
19460 edits, then rawsize will be zero and we should use size). */
19461 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19462 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19463 unsigned int in_index, out_index;
19464 bfd_vma add_to_offsets = 0;
19466 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19470 unsigned int edit_index = edit_node->index;
19472 if (in_index < edit_index && in_index * 8 < input_size)
19474 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19475 contents + in_index * 8, add_to_offsets);
19479 else if (in_index == edit_index
19480 || (in_index * 8 >= input_size
19481 && edit_index == UINT_MAX))
19483 switch (edit_node->type)
19485 case DELETE_EXIDX_ENTRY:
19487 add_to_offsets += 8;
19490 case INSERT_EXIDX_CANTUNWIND_AT_END:
19492 asection *text_sec = edit_node->linked_section;
19493 bfd_vma text_offset = text_sec->output_section->vma
19494 + text_sec->output_offset
19496 bfd_vma exidx_offset = offset + out_index * 8;
19497 unsigned long prel31_offset;
19499 /* Note: this is meant to be equivalent to an
19500 R_ARM_PREL31 relocation. These synthetic
19501 EXIDX_CANTUNWIND markers are not relocated by the
19502 usual BFD method. */
19503 prel31_offset = (text_offset - exidx_offset)
19505 if (bfd_link_relocatable (link_info))
19507 /* Here relocation for new EXIDX_CANTUNWIND is
19508 created, so there is no need to
19509 adjust offset by hand. */
19510 prel31_offset = text_sec->output_offset
19514 /* First address we can't unwind. */
19515 bfd_put_32 (output_bfd, prel31_offset,
19516 &edited_contents[out_index * 8]);
19518 /* Code for EXIDX_CANTUNWIND. */
19519 bfd_put_32 (output_bfd, 0x1,
19520 &edited_contents[out_index * 8 + 4]);
19523 add_to_offsets -= 8;
19528 edit_node = edit_node->next;
19533 /* No more edits, copy remaining entries verbatim. */
19534 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19535 contents + in_index * 8, add_to_offsets);
19541 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19542 bfd_set_section_contents (output_bfd, sec->output_section,
19544 (file_ptr) sec->output_offset, sec->size);
19549 /* Fix code to point to Cortex-A8 erratum stubs. */
19550 if (globals->fix_cortex_a8)
19552 struct a8_branch_to_stub_data data;
19554 data.writing_section = sec;
19555 data.contents = contents;
19557 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19564 if (globals->byteswap_code)
19566 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19569 for (i = 0; i < mapcount; i++)
19571 if (i == mapcount - 1)
19574 end = map[i + 1].vma;
19576 switch (map[i].type)
19579 /* Byte swap code words. */
19580 while (ptr + 3 < end)
19582 tmp = contents[ptr];
19583 contents[ptr] = contents[ptr + 3];
19584 contents[ptr + 3] = tmp;
19585 tmp = contents[ptr + 1];
19586 contents[ptr + 1] = contents[ptr + 2];
19587 contents[ptr + 2] = tmp;
19593 /* Byte swap code halfwords. */
19594 while (ptr + 1 < end)
19596 tmp = contents[ptr];
19597 contents[ptr] = contents[ptr + 1];
19598 contents[ptr + 1] = tmp;
19604 /* Leave data alone. */
19612 arm_data->mapcount = -1;
19613 arm_data->mapsize = 0;
19614 arm_data->map = NULL;
19619 /* Mangle thumb function symbols as we read them in. */
19622 elf32_arm_swap_symbol_in (bfd * abfd,
19625 Elf_Internal_Sym *dst)
19627 Elf_Internal_Shdr *symtab_hdr;
19628 const char *name = NULL;
19630 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19632 dst->st_target_internal = 0;
19634 /* New EABI objects mark thumb function symbols by setting the low bit of
19636 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19637 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19639 if (dst->st_value & 1)
19641 dst->st_value &= ~(bfd_vma) 1;
19642 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19643 ST_BRANCH_TO_THUMB);
19646 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19648 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19650 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19651 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19653 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19654 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19656 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19658 /* Mark CMSE special symbols. */
19659 symtab_hdr = & elf_symtab_hdr (abfd);
19660 if (symtab_hdr->sh_size)
19661 name = bfd_elf_sym_name (abfd, symtab_hdr, dst, NULL);
19662 if (name && CONST_STRNEQ (name, CMSE_PREFIX))
19663 ARM_SET_SYM_CMSE_SPCL (dst->st_target_internal);
19669 /* Mangle thumb function symbols as we write them out. */
19672 elf32_arm_swap_symbol_out (bfd *abfd,
19673 const Elf_Internal_Sym *src,
19677 Elf_Internal_Sym newsym;
19679 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19680 of the address set, as per the new EABI. We do this unconditionally
19681 because objcopy does not set the elf header flags until after
19682 it writes out the symbol table. */
19683 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19686 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19687 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19688 if (newsym.st_shndx != SHN_UNDEF)
19690 /* Do this only for defined symbols. At link type, the static
19691 linker will simulate the work of dynamic linker of resolving
19692 symbols and will carry over the thumbness of found symbols to
19693 the output symbol table. It's not clear how it happens, but
19694 the thumbness of undefined symbols can well be different at
19695 runtime, and writing '1' for them will be confusing for users
19696 and possibly for dynamic linker itself.
19698 newsym.st_value |= 1;
19703 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19706 /* Add the PT_ARM_EXIDX program header. */
19709 elf32_arm_modify_segment_map (bfd *abfd,
19710 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19712 struct elf_segment_map *m;
19715 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19716 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19718 /* If there is already a PT_ARM_EXIDX header, then we do not
19719 want to add another one. This situation arises when running
19720 "strip"; the input binary already has the header. */
19721 m = elf_seg_map (abfd);
19722 while (m && m->p_type != PT_ARM_EXIDX)
19726 m = (struct elf_segment_map *)
19727 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19730 m->p_type = PT_ARM_EXIDX;
19732 m->sections[0] = sec;
19734 m->next = elf_seg_map (abfd);
19735 elf_seg_map (abfd) = m;
19742 /* We may add a PT_ARM_EXIDX program header. */
19745 elf32_arm_additional_program_headers (bfd *abfd,
19746 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19750 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19751 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19757 /* Hook called by the linker routine which adds symbols from an object
19761 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
19762 Elf_Internal_Sym *sym, const char **namep,
19763 flagword *flagsp, asection **secp, bfd_vma *valp)
19765 if (elf32_arm_hash_table (info) == NULL)
19768 if (elf32_arm_hash_table (info)->vxworks_p
19769 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
19770 flagsp, secp, valp))
19776 /* We use this to override swap_symbol_in and swap_symbol_out. */
19777 const struct elf_size_info elf32_arm_size_info =
19779 sizeof (Elf32_External_Ehdr),
19780 sizeof (Elf32_External_Phdr),
19781 sizeof (Elf32_External_Shdr),
19782 sizeof (Elf32_External_Rel),
19783 sizeof (Elf32_External_Rela),
19784 sizeof (Elf32_External_Sym),
19785 sizeof (Elf32_External_Dyn),
19786 sizeof (Elf_External_Note),
19790 ELFCLASS32, EV_CURRENT,
19791 bfd_elf32_write_out_phdrs,
19792 bfd_elf32_write_shdrs_and_ehdr,
19793 bfd_elf32_checksum_contents,
19794 bfd_elf32_write_relocs,
19795 elf32_arm_swap_symbol_in,
19796 elf32_arm_swap_symbol_out,
19797 bfd_elf32_slurp_reloc_table,
19798 bfd_elf32_slurp_symbol_table,
19799 bfd_elf32_swap_dyn_in,
19800 bfd_elf32_swap_dyn_out,
19801 bfd_elf32_swap_reloc_in,
19802 bfd_elf32_swap_reloc_out,
19803 bfd_elf32_swap_reloca_in,
19804 bfd_elf32_swap_reloca_out
19808 read_code32 (const bfd *abfd, const bfd_byte *addr)
19810 /* V7 BE8 code is always little endian. */
19811 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19812 return bfd_getl32 (addr);
19814 return bfd_get_32 (abfd, addr);
19818 read_code16 (const bfd *abfd, const bfd_byte *addr)
19820 /* V7 BE8 code is always little endian. */
19821 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19822 return bfd_getl16 (addr);
19824 return bfd_get_16 (abfd, addr);
19827 /* Return size of plt0 entry starting at ADDR
19828 or (bfd_vma) -1 if size can not be determined. */
19831 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
19833 bfd_vma first_word;
19836 first_word = read_code32 (abfd, addr);
19838 if (first_word == elf32_arm_plt0_entry[0])
19839 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
19840 else if (first_word == elf32_thumb2_plt0_entry[0])
19841 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
19843 /* We don't yet handle this PLT format. */
19844 return (bfd_vma) -1;
19849 /* Return size of plt entry starting at offset OFFSET
19850 of plt section located at address START
19851 or (bfd_vma) -1 if size can not be determined. */
19854 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
19856 bfd_vma first_insn;
19857 bfd_vma plt_size = 0;
19858 const bfd_byte *addr = start + offset;
19860 /* PLT entry size if fixed on Thumb-only platforms. */
19861 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
19862 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
19864 /* Respect Thumb stub if necessary. */
19865 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
19867 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
19870 /* Strip immediate from first add. */
19871 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
19873 #ifdef FOUR_WORD_PLT
19874 if (first_insn == elf32_arm_plt_entry[0])
19875 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
19877 if (first_insn == elf32_arm_plt_entry_long[0])
19878 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
19879 else if (first_insn == elf32_arm_plt_entry_short[0])
19880 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
19883 /* We don't yet handle this PLT format. */
19884 return (bfd_vma) -1;
19889 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
19892 elf32_arm_get_synthetic_symtab (bfd *abfd,
19893 long symcount ATTRIBUTE_UNUSED,
19894 asymbol **syms ATTRIBUTE_UNUSED,
19904 Elf_Internal_Shdr *hdr;
19912 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
19915 if (dynsymcount <= 0)
19918 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
19919 if (relplt == NULL)
19922 hdr = &elf_section_data (relplt)->this_hdr;
19923 if (hdr->sh_link != elf_dynsymtab (abfd)
19924 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
19927 plt = bfd_get_section_by_name (abfd, ".plt");
19931 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
19934 data = plt->contents;
19937 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
19939 bfd_cache_section_contents((asection *) plt, data);
19942 count = relplt->size / hdr->sh_entsize;
19943 size = count * sizeof (asymbol);
19944 p = relplt->relocation;
19945 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19947 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
19948 if (p->addend != 0)
19949 size += sizeof ("+0x") - 1 + 8;
19952 s = *ret = (asymbol *) bfd_malloc (size);
19956 offset = elf32_arm_plt0_size (abfd, data);
19957 if (offset == (bfd_vma) -1)
19960 names = (char *) (s + count);
19961 p = relplt->relocation;
19963 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19967 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
19968 if (plt_size == (bfd_vma) -1)
19971 *s = **p->sym_ptr_ptr;
19972 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
19973 we are defining a symbol, ensure one of them is set. */
19974 if ((s->flags & BSF_LOCAL) == 0)
19975 s->flags |= BSF_GLOBAL;
19976 s->flags |= BSF_SYNTHETIC;
19981 len = strlen ((*p->sym_ptr_ptr)->name);
19982 memcpy (names, (*p->sym_ptr_ptr)->name, len);
19984 if (p->addend != 0)
19988 memcpy (names, "+0x", sizeof ("+0x") - 1);
19989 names += sizeof ("+0x") - 1;
19990 bfd_sprintf_vma (abfd, buf, p->addend);
19991 for (a = buf; *a == '0'; ++a)
19994 memcpy (names, a, len);
19997 memcpy (names, "@plt", sizeof ("@plt"));
19998 names += sizeof ("@plt");
20000 offset += plt_size;
20007 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
20009 if (hdr->sh_flags & SHF_ARM_PURECODE)
20010 *flags |= SEC_ELF_PURECODE;
20015 elf32_arm_lookup_section_flags (char *flag_name)
20017 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
20018 return SHF_ARM_PURECODE;
20020 return SEC_NO_FLAGS;
20023 static unsigned int
20024 elf32_arm_count_additional_relocs (asection *sec)
20026 struct _arm_elf_section_data *arm_data;
20027 arm_data = get_arm_elf_section_data (sec);
20029 return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
20032 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20033 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20034 FALSE otherwise. ISECTION is the best guess matching section from the
20035 input bfd IBFD, but it might be NULL. */
20038 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
20039 bfd *obfd ATTRIBUTE_UNUSED,
20040 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
20041 Elf_Internal_Shdr *osection)
20043 switch (osection->sh_type)
20045 case SHT_ARM_EXIDX:
20047 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
20048 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
20051 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
20052 osection->sh_info = 0;
20054 /* The sh_link field must be set to the text section associated with
20055 this index section. Unfortunately the ARM EHABI does not specify
20056 exactly how to determine this association. Our caller does try
20057 to match up OSECTION with its corresponding input section however
20058 so that is a good first guess. */
20059 if (isection != NULL
20060 && osection->bfd_section != NULL
20061 && isection->bfd_section != NULL
20062 && isection->bfd_section->output_section != NULL
20063 && isection->bfd_section->output_section == osection->bfd_section
20064 && iheaders != NULL
20065 && isection->sh_link > 0
20066 && isection->sh_link < elf_numsections (ibfd)
20067 && iheaders[isection->sh_link]->bfd_section != NULL
20068 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
20071 for (i = elf_numsections (obfd); i-- > 0;)
20072 if (oheaders[i]->bfd_section
20073 == iheaders[isection->sh_link]->bfd_section->output_section)
20079 /* Failing that we have to find a matching section ourselves. If
20080 we had the output section name available we could compare that
20081 with input section names. Unfortunately we don't. So instead
20082 we use a simple heuristic and look for the nearest executable
20083 section before this one. */
20084 for (i = elf_numsections (obfd); i-- > 0;)
20085 if (oheaders[i] == osection)
20091 if (oheaders[i]->sh_type == SHT_PROGBITS
20092 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20093 == (SHF_ALLOC | SHF_EXECINSTR))
20099 osection->sh_link = i;
20100 /* If the text section was part of a group
20101 then the index section should be too. */
20102 if (oheaders[i]->sh_flags & SHF_GROUP)
20103 osection->sh_flags |= SHF_GROUP;
20109 case SHT_ARM_PREEMPTMAP:
20110 osection->sh_flags = SHF_ALLOC;
20113 case SHT_ARM_ATTRIBUTES:
20114 case SHT_ARM_DEBUGOVERLAY:
20115 case SHT_ARM_OVERLAYSECTION:
20123 /* Returns TRUE if NAME is an ARM mapping symbol.
20124 Traditionally the symbols $a, $d and $t have been used.
20125 The ARM ELF standard also defines $x (for A64 code). It also allows a
20126 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20127 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20128 not support them here. $t.x indicates the start of ThumbEE instructions. */
20131 is_arm_mapping_symbol (const char * name)
20133 return name != NULL /* Paranoia. */
20134 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20135 the mapping symbols could have acquired a prefix.
20136 We do not support this here, since such symbols no
20137 longer conform to the ARM ELF ABI. */
20138 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20139 && (name[2] == 0 || name[2] == '.');
20140 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20141 any characters that follow the period are legal characters for the body
20142 of a symbol's name. For now we just assume that this is the case. */
20145 /* Make sure that mapping symbols in object files are not removed via the
20146 "strip --strip-unneeded" tool. These symbols are needed in order to
20147 correctly generate interworking veneers, and for byte swapping code
20148 regions. Once an object file has been linked, it is safe to remove the
20149 symbols as they will no longer be needed. */
20152 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20154 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20155 && sym->section != bfd_abs_section_ptr
20156 && is_arm_mapping_symbol (sym->name))
20157 sym->flags |= BSF_KEEP;
20160 #undef elf_backend_copy_special_section_fields
20161 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20163 #define ELF_ARCH bfd_arch_arm
20164 #define ELF_TARGET_ID ARM_ELF_DATA
20165 #define ELF_MACHINE_CODE EM_ARM
20166 #ifdef __QNXTARGET__
20167 #define ELF_MAXPAGESIZE 0x1000
20169 #define ELF_MAXPAGESIZE 0x10000
20171 #define ELF_MINPAGESIZE 0x1000
20172 #define ELF_COMMONPAGESIZE 0x1000
20174 #define bfd_elf32_mkobject elf32_arm_mkobject
20176 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20177 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20178 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20179 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20180 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20181 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20182 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20183 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
20184 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20185 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20186 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20187 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20188 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20190 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20191 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20192 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20193 #define elf_backend_check_relocs elf32_arm_check_relocs
20194 #define elf_backend_update_relocs elf32_arm_update_relocs
20195 #define elf_backend_relocate_section elf32_arm_relocate_section
20196 #define elf_backend_write_section elf32_arm_write_section
20197 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20198 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20199 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20200 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20201 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20202 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20203 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20204 #define elf_backend_post_process_headers elf32_arm_post_process_headers
20205 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20206 #define elf_backend_object_p elf32_arm_object_p
20207 #define elf_backend_fake_sections elf32_arm_fake_sections
20208 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20209 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20210 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20211 #define elf_backend_size_info elf32_arm_size_info
20212 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20213 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20214 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20215 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20216 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20217 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20218 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20219 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20221 #define elf_backend_can_refcount 1
20222 #define elf_backend_can_gc_sections 1
20223 #define elf_backend_plt_readonly 1
20224 #define elf_backend_want_got_plt 1
20225 #define elf_backend_want_plt_sym 0
20226 #define elf_backend_want_dynrelro 1
20227 #define elf_backend_may_use_rel_p 1
20228 #define elf_backend_may_use_rela_p 0
20229 #define elf_backend_default_use_rela_p 0
20230 #define elf_backend_dtrel_excludes_plt 1
20232 #define elf_backend_got_header_size 12
20233 #define elf_backend_extern_protected_data 1
20235 #undef elf_backend_obj_attrs_vendor
20236 #define elf_backend_obj_attrs_vendor "aeabi"
20237 #undef elf_backend_obj_attrs_section
20238 #define elf_backend_obj_attrs_section ".ARM.attributes"
20239 #undef elf_backend_obj_attrs_arg_type
20240 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20241 #undef elf_backend_obj_attrs_section_type
20242 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20243 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20244 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20246 #undef elf_backend_section_flags
20247 #define elf_backend_section_flags elf32_arm_section_flags
20248 #undef elf_backend_lookup_section_flags_hook
20249 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20251 #define elf_backend_linux_prpsinfo32_ugid16 TRUE
20253 #include "elf32-target.h"
20255 /* Native Client targets. */
20257 #undef TARGET_LITTLE_SYM
20258 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20259 #undef TARGET_LITTLE_NAME
20260 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20261 #undef TARGET_BIG_SYM
20262 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20263 #undef TARGET_BIG_NAME
20264 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20266 /* Like elf32_arm_link_hash_table_create -- but overrides
20267 appropriately for NaCl. */
20269 static struct bfd_link_hash_table *
20270 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20272 struct bfd_link_hash_table *ret;
20274 ret = elf32_arm_link_hash_table_create (abfd);
20277 struct elf32_arm_link_hash_table *htab
20278 = (struct elf32_arm_link_hash_table *) ret;
20282 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20283 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20288 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20289 really need to use elf32_arm_modify_segment_map. But we do it
20290 anyway just to reduce gratuitous differences with the stock ARM backend. */
20293 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20295 return (elf32_arm_modify_segment_map (abfd, info)
20296 && nacl_modify_segment_map (abfd, info));
20300 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
20302 elf32_arm_final_write_processing (abfd, linker);
20303 nacl_final_write_processing (abfd, linker);
20307 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20308 const arelent *rel ATTRIBUTE_UNUSED)
20311 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20312 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20316 #define elf32_bed elf32_arm_nacl_bed
20317 #undef bfd_elf32_bfd_link_hash_table_create
20318 #define bfd_elf32_bfd_link_hash_table_create \
20319 elf32_arm_nacl_link_hash_table_create
20320 #undef elf_backend_plt_alignment
20321 #define elf_backend_plt_alignment 4
20322 #undef elf_backend_modify_segment_map
20323 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20324 #undef elf_backend_modify_program_headers
20325 #define elf_backend_modify_program_headers nacl_modify_program_headers
20326 #undef elf_backend_final_write_processing
20327 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20328 #undef bfd_elf32_get_synthetic_symtab
20329 #undef elf_backend_plt_sym_val
20330 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20331 #undef elf_backend_copy_special_section_fields
20333 #undef ELF_MINPAGESIZE
20334 #undef ELF_COMMONPAGESIZE
20337 #include "elf32-target.h"
20339 /* Reset to defaults. */
20340 #undef elf_backend_plt_alignment
20341 #undef elf_backend_modify_segment_map
20342 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20343 #undef elf_backend_modify_program_headers
20344 #undef elf_backend_final_write_processing
20345 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20346 #undef ELF_MINPAGESIZE
20347 #define ELF_MINPAGESIZE 0x1000
20348 #undef ELF_COMMONPAGESIZE
20349 #define ELF_COMMONPAGESIZE 0x1000
20352 /* FDPIC Targets. */
20354 #undef TARGET_LITTLE_SYM
20355 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20356 #undef TARGET_LITTLE_NAME
20357 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20358 #undef TARGET_BIG_SYM
20359 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20360 #undef TARGET_BIG_NAME
20361 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20362 #undef elf_match_priority
20363 #define elf_match_priority 128
20365 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20367 /* Like elf32_arm_link_hash_table_create -- but overrides
20368 appropriately for FDPIC. */
20370 static struct bfd_link_hash_table *
20371 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20373 struct bfd_link_hash_table *ret;
20375 ret = elf32_arm_link_hash_table_create (abfd);
20378 struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20385 /* We need dynamic symbols for every section, since segments can
20386 relocate independently. */
20388 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20389 struct bfd_link_info *info
20391 asection *p ATTRIBUTE_UNUSED)
20393 switch (elf_section_data (p)->this_hdr.sh_type)
20397 /* If sh_type is yet undecided, assume it could be
20398 SHT_PROGBITS/SHT_NOBITS. */
20402 /* There shouldn't be section relative relocations
20403 against any other section. */
20410 #define elf32_bed elf32_arm_fdpic_bed
20412 #undef bfd_elf32_bfd_link_hash_table_create
20413 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20415 #undef elf_backend_omit_section_dynsym
20416 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20418 #include "elf32-target.h"
20420 #undef elf_match_priority
20422 #undef elf_backend_omit_section_dynsym
20424 /* VxWorks Targets. */
20426 #undef TARGET_LITTLE_SYM
20427 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20428 #undef TARGET_LITTLE_NAME
20429 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20430 #undef TARGET_BIG_SYM
20431 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20432 #undef TARGET_BIG_NAME
20433 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20435 /* Like elf32_arm_link_hash_table_create -- but overrides
20436 appropriately for VxWorks. */
20438 static struct bfd_link_hash_table *
20439 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20441 struct bfd_link_hash_table *ret;
20443 ret = elf32_arm_link_hash_table_create (abfd);
20446 struct elf32_arm_link_hash_table *htab
20447 = (struct elf32_arm_link_hash_table *) ret;
20449 htab->vxworks_p = 1;
20455 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
20457 elf32_arm_final_write_processing (abfd, linker);
20458 elf_vxworks_final_write_processing (abfd, linker);
20462 #define elf32_bed elf32_arm_vxworks_bed
20464 #undef bfd_elf32_bfd_link_hash_table_create
20465 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20466 #undef elf_backend_final_write_processing
20467 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20468 #undef elf_backend_emit_relocs
20469 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20471 #undef elf_backend_may_use_rel_p
20472 #define elf_backend_may_use_rel_p 0
20473 #undef elf_backend_may_use_rela_p
20474 #define elf_backend_may_use_rela_p 1
20475 #undef elf_backend_default_use_rela_p
20476 #define elf_backend_default_use_rela_p 1
20477 #undef elf_backend_want_plt_sym
20478 #define elf_backend_want_plt_sym 1
20479 #undef ELF_MAXPAGESIZE
20480 #define ELF_MAXPAGESIZE 0x1000
20482 #include "elf32-target.h"
20485 /* Merge backend specific data from an object file to the output
20486 object file when linking. */
20489 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20491 bfd *obfd = info->output_bfd;
20492 flagword out_flags;
20494 bfd_boolean flags_compatible = TRUE;
20497 /* Check if we have the same endianness. */
20498 if (! _bfd_generic_verify_endian_match (ibfd, info))
20501 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20504 if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20507 /* The input BFD must have had its flags initialised. */
20508 /* The following seems bogus to me -- The flags are initialized in
20509 the assembler but I don't think an elf_flags_init field is
20510 written into the object. */
20511 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20513 in_flags = elf_elfheader (ibfd)->e_flags;
20514 out_flags = elf_elfheader (obfd)->e_flags;
20516 /* In theory there is no reason why we couldn't handle this. However
20517 in practice it isn't even close to working and there is no real
20518 reason to want it. */
20519 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20520 && !(ibfd->flags & DYNAMIC)
20521 && (in_flags & EF_ARM_BE8))
20523 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20528 if (!elf_flags_init (obfd))
20530 /* If the input is the default architecture and had the default
20531 flags then do not bother setting the flags for the output
20532 architecture, instead allow future merges to do this. If no
20533 future merges ever set these flags then they will retain their
20534 uninitialised values, which surprise surprise, correspond
20535 to the default values. */
20536 if (bfd_get_arch_info (ibfd)->the_default
20537 && elf_elfheader (ibfd)->e_flags == 0)
20540 elf_flags_init (obfd) = TRUE;
20541 elf_elfheader (obfd)->e_flags = in_flags;
20543 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20544 && bfd_get_arch_info (obfd)->the_default)
20545 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20550 /* Determine what should happen if the input ARM architecture
20551 does not match the output ARM architecture. */
20552 if (! bfd_arm_merge_machines (ibfd, obfd))
20555 /* Identical flags must be compatible. */
20556 if (in_flags == out_flags)
20559 /* Check to see if the input BFD actually contains any sections. If
20560 not, its flags may not have been initialised either, but it
20561 cannot actually cause any incompatiblity. Do not short-circuit
20562 dynamic objects; their section list may be emptied by
20563 elf_link_add_object_symbols.
20565 Also check to see if there are no code sections in the input.
20566 In this case there is no need to check for code specific flags.
20567 XXX - do we need to worry about floating-point format compatability
20568 in data sections ? */
20569 if (!(ibfd->flags & DYNAMIC))
20571 bfd_boolean null_input_bfd = TRUE;
20572 bfd_boolean only_data_sections = TRUE;
20574 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20576 /* Ignore synthetic glue sections. */
20577 if (strcmp (sec->name, ".glue_7")
20578 && strcmp (sec->name, ".glue_7t"))
20580 if ((bfd_get_section_flags (ibfd, sec)
20581 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20582 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20583 only_data_sections = FALSE;
20585 null_input_bfd = FALSE;
20590 if (null_input_bfd || only_data_sections)
20594 /* Complain about various flag mismatches. */
20595 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20596 EF_ARM_EABI_VERSION (out_flags)))
20599 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20600 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20601 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20605 /* Not sure what needs to be checked for EABI versions >= 1. */
20606 /* VxWorks libraries do not use these flags. */
20607 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20608 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20609 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20611 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20614 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20615 ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20616 obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20617 flags_compatible = FALSE;
20620 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20622 if (in_flags & EF_ARM_APCS_FLOAT)
20624 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20628 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20631 flags_compatible = FALSE;
20634 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20636 if (in_flags & EF_ARM_VFP_FLOAT)
20638 (_("error: %pB uses %s instructions, whereas %pB does not"),
20639 ibfd, "VFP", obfd);
20642 (_("error: %pB uses %s instructions, whereas %pB does not"),
20643 ibfd, "FPA", obfd);
20645 flags_compatible = FALSE;
20648 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20650 if (in_flags & EF_ARM_MAVERICK_FLOAT)
20652 (_("error: %pB uses %s instructions, whereas %pB does not"),
20653 ibfd, "Maverick", obfd);
20656 (_("error: %pB does not use %s instructions, whereas %pB does"),
20657 ibfd, "Maverick", obfd);
20659 flags_compatible = FALSE;
20662 #ifdef EF_ARM_SOFT_FLOAT
20663 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20665 /* We can allow interworking between code that is VFP format
20666 layout, and uses either soft float or integer regs for
20667 passing floating point arguments and results. We already
20668 know that the APCS_FLOAT flags match; similarly for VFP
20670 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20671 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20673 if (in_flags & EF_ARM_SOFT_FLOAT)
20675 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20679 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20682 flags_compatible = FALSE;
20687 /* Interworking mismatch is only a warning. */
20688 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20690 if (in_flags & EF_ARM_INTERWORK)
20693 (_("warning: %pB supports interworking, whereas %pB does not"),
20699 (_("warning: %pB does not support interworking, whereas %pB does"),
20705 return flags_compatible;
20709 /* Symbian OS Targets. */
20711 #undef TARGET_LITTLE_SYM
20712 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
20713 #undef TARGET_LITTLE_NAME
20714 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
20715 #undef TARGET_BIG_SYM
20716 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
20717 #undef TARGET_BIG_NAME
20718 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
20720 /* Like elf32_arm_link_hash_table_create -- but overrides
20721 appropriately for Symbian OS. */
20723 static struct bfd_link_hash_table *
20724 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
20726 struct bfd_link_hash_table *ret;
20728 ret = elf32_arm_link_hash_table_create (abfd);
20731 struct elf32_arm_link_hash_table *htab
20732 = (struct elf32_arm_link_hash_table *)ret;
20733 /* There is no PLT header for Symbian OS. */
20734 htab->plt_header_size = 0;
20735 /* The PLT entries are each one instruction and one word. */
20736 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
20737 htab->symbian_p = 1;
20738 /* Symbian uses armv5t or above, so use_blx is always true. */
20740 htab->root.is_relocatable_executable = 1;
20745 static const struct bfd_elf_special_section
20746 elf32_arm_symbian_special_sections[] =
20748 /* In a BPABI executable, the dynamic linking sections do not go in
20749 the loadable read-only segment. The post-linker may wish to
20750 refer to these sections, but they are not part of the final
20752 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
20753 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
20754 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
20755 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
20756 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
20757 /* These sections do not need to be writable as the SymbianOS
20758 postlinker will arrange things so that no dynamic relocation is
20760 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
20761 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
20762 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
20763 { NULL, 0, 0, 0, 0 }
20767 elf32_arm_symbian_begin_write_processing (bfd *abfd,
20768 struct bfd_link_info *link_info)
20770 /* BPABI objects are never loaded directly by an OS kernel; they are
20771 processed by a postlinker first, into an OS-specific format. If
20772 the D_PAGED bit is set on the file, BFD will align segments on
20773 page boundaries, so that an OS can directly map the file. With
20774 BPABI objects, that just results in wasted space. In addition,
20775 because we clear the D_PAGED bit, map_sections_to_segments will
20776 recognize that the program headers should not be mapped into any
20777 loadable segment. */
20778 abfd->flags &= ~D_PAGED;
20779 elf32_arm_begin_write_processing (abfd, link_info);
20783 elf32_arm_symbian_modify_segment_map (bfd *abfd,
20784 struct bfd_link_info *info)
20786 struct elf_segment_map *m;
20789 /* BPABI shared libraries and executables should have a PT_DYNAMIC
20790 segment. However, because the .dynamic section is not marked
20791 with SEC_LOAD, the generic ELF code will not create such a
20793 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
20796 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
20797 if (m->p_type == PT_DYNAMIC)
20802 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
20803 m->next = elf_seg_map (abfd);
20804 elf_seg_map (abfd) = m;
20808 /* Also call the generic arm routine. */
20809 return elf32_arm_modify_segment_map (abfd, info);
20812 /* Return address for Ith PLT stub in section PLT, for relocation REL
20813 or (bfd_vma) -1 if it should not be included. */
20816 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
20817 const arelent *rel ATTRIBUTE_UNUSED)
20819 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
20823 #define elf32_bed elf32_arm_symbian_bed
20825 /* The dynamic sections are not allocated on SymbianOS; the postlinker
20826 will process them and then discard them. */
20827 #undef ELF_DYNAMIC_SEC_FLAGS
20828 #define ELF_DYNAMIC_SEC_FLAGS \
20829 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
20831 #undef elf_backend_emit_relocs
20833 #undef bfd_elf32_bfd_link_hash_table_create
20834 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
20835 #undef elf_backend_special_sections
20836 #define elf_backend_special_sections elf32_arm_symbian_special_sections
20837 #undef elf_backend_begin_write_processing
20838 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
20839 #undef elf_backend_final_write_processing
20840 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20842 #undef elf_backend_modify_segment_map
20843 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
20845 /* There is no .got section for BPABI objects, and hence no header. */
20846 #undef elf_backend_got_header_size
20847 #define elf_backend_got_header_size 0
20849 /* Similarly, there is no .got.plt section. */
20850 #undef elf_backend_want_got_plt
20851 #define elf_backend_want_got_plt 0
20853 #undef elf_backend_plt_sym_val
20854 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
20856 #undef elf_backend_may_use_rel_p
20857 #define elf_backend_may_use_rel_p 1
20858 #undef elf_backend_may_use_rela_p
20859 #define elf_backend_may_use_rela_p 0
20860 #undef elf_backend_default_use_rela_p
20861 #define elf_backend_default_use_rela_p 0
20862 #undef elf_backend_want_plt_sym
20863 #define elf_backend_want_plt_sym 0
20864 #undef elf_backend_dtrel_excludes_plt
20865 #define elf_backend_dtrel_excludes_plt 0
20866 #undef ELF_MAXPAGESIZE
20867 #define ELF_MAXPAGESIZE 0x8000
20869 #include "elf32-target.h"