1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2016 Free Software Foundation, Inc.
4 This file is part of BFD, the Binary File Descriptor library.
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
30 #include "elf-vxworks.h"
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
59 #define elf_info_to_howto 0
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 struct bfd_link_info *link_info,
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
77 static reloc_howto_type elf32_arm_howto_table_1[] =
80 HOWTO (R_ARM_NONE, /* type */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
84 FALSE, /* pc_relative */
86 complain_overflow_dont,/* complain_on_overflow */
87 bfd_elf_generic_reloc, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE, /* partial_inplace */
92 FALSE), /* pcrel_offset */
94 HOWTO (R_ARM_PC24, /* type */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
98 TRUE, /* pc_relative */
100 complain_overflow_signed,/* complain_on_overflow */
101 bfd_elf_generic_reloc, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE), /* pcrel_offset */
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32, /* type */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
113 FALSE, /* pc_relative */
115 complain_overflow_bitfield,/* complain_on_overflow */
116 bfd_elf_generic_reloc, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE), /* pcrel_offset */
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32, /* type */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
128 TRUE, /* pc_relative */
130 complain_overflow_bitfield,/* complain_on_overflow */
131 bfd_elf_generic_reloc, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE), /* pcrel_offset */
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0, /* type */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
143 TRUE, /* pc_relative */
145 complain_overflow_dont,/* complain_on_overflow */
146 bfd_elf_generic_reloc, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE), /* pcrel_offset */
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16, /* type */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
158 FALSE, /* pc_relative */
160 complain_overflow_bitfield,/* complain_on_overflow */
161 bfd_elf_generic_reloc, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE), /* pcrel_offset */
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12, /* type */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
173 FALSE, /* pc_relative */
175 complain_overflow_bitfield,/* complain_on_overflow */
176 bfd_elf_generic_reloc, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE), /* pcrel_offset */
183 HOWTO (R_ARM_THM_ABS5, /* type */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
187 FALSE, /* pc_relative */
189 complain_overflow_bitfield,/* complain_on_overflow */
190 bfd_elf_generic_reloc, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE), /* pcrel_offset */
198 HOWTO (R_ARM_ABS8, /* type */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
202 FALSE, /* pc_relative */
204 complain_overflow_bitfield,/* complain_on_overflow */
205 bfd_elf_generic_reloc, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE), /* pcrel_offset */
212 HOWTO (R_ARM_SBREL32, /* type */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
216 FALSE, /* pc_relative */
218 complain_overflow_dont,/* complain_on_overflow */
219 bfd_elf_generic_reloc, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE), /* pcrel_offset */
226 HOWTO (R_ARM_THM_CALL, /* type */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
230 TRUE, /* pc_relative */
232 complain_overflow_signed,/* complain_on_overflow */
233 bfd_elf_generic_reloc, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE), /* pcrel_offset */
240 HOWTO (R_ARM_THM_PC8, /* type */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
244 TRUE, /* pc_relative */
246 complain_overflow_signed,/* complain_on_overflow */
247 bfd_elf_generic_reloc, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE), /* pcrel_offset */
254 HOWTO (R_ARM_BREL_ADJ, /* type */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
258 FALSE, /* pc_relative */
260 complain_overflow_signed,/* complain_on_overflow */
261 bfd_elf_generic_reloc, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
268 HOWTO (R_ARM_TLS_DESC, /* type */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
272 FALSE, /* pc_relative */
274 complain_overflow_bitfield,/* complain_on_overflow */
275 bfd_elf_generic_reloc, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
282 HOWTO (R_ARM_THM_SWI8, /* type */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
286 FALSE, /* pc_relative */
288 complain_overflow_signed,/* complain_on_overflow */
289 bfd_elf_generic_reloc, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE), /* pcrel_offset */
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25, /* type */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
301 TRUE, /* pc_relative */
303 complain_overflow_signed,/* complain_on_overflow */
304 bfd_elf_generic_reloc, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE), /* pcrel_offset */
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22, /* type */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
316 TRUE, /* pc_relative */
318 complain_overflow_signed,/* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE), /* pcrel_offset */
326 /* Dynamic TLS relocations. */
328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
332 FALSE, /* pc_relative */
334 complain_overflow_bitfield,/* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
346 FALSE, /* pc_relative */
348 complain_overflow_bitfield,/* complain_on_overflow */
349 bfd_elf_generic_reloc, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
356 HOWTO (R_ARM_TLS_TPOFF32, /* type */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
360 FALSE, /* pc_relative */
362 complain_overflow_bitfield,/* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE), /* pcrel_offset */
370 /* Relocs used in ARM Linux */
372 HOWTO (R_ARM_COPY, /* type */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
376 FALSE, /* pc_relative */
378 complain_overflow_bitfield,/* complain_on_overflow */
379 bfd_elf_generic_reloc, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE), /* pcrel_offset */
386 HOWTO (R_ARM_GLOB_DAT, /* type */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
390 FALSE, /* pc_relative */
392 complain_overflow_bitfield,/* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
400 HOWTO (R_ARM_JUMP_SLOT, /* type */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
404 FALSE, /* pc_relative */
406 complain_overflow_bitfield,/* complain_on_overflow */
407 bfd_elf_generic_reloc, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE), /* pcrel_offset */
414 HOWTO (R_ARM_RELATIVE, /* type */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
418 FALSE, /* pc_relative */
420 complain_overflow_bitfield,/* complain_on_overflow */
421 bfd_elf_generic_reloc, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE), /* pcrel_offset */
428 HOWTO (R_ARM_GOTOFF32, /* type */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
432 FALSE, /* pc_relative */
434 complain_overflow_bitfield,/* complain_on_overflow */
435 bfd_elf_generic_reloc, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE), /* pcrel_offset */
442 HOWTO (R_ARM_GOTPC, /* type */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
446 TRUE, /* pc_relative */
448 complain_overflow_bitfield,/* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE), /* pcrel_offset */
456 HOWTO (R_ARM_GOT32, /* type */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
460 FALSE, /* pc_relative */
462 complain_overflow_bitfield,/* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
470 HOWTO (R_ARM_PLT32, /* type */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
474 TRUE, /* pc_relative */
476 complain_overflow_bitfield,/* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE), /* pcrel_offset */
484 HOWTO (R_ARM_CALL, /* type */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
488 TRUE, /* pc_relative */
490 complain_overflow_signed,/* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE), /* pcrel_offset */
498 HOWTO (R_ARM_JUMP24, /* type */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
502 TRUE, /* pc_relative */
504 complain_overflow_signed,/* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE), /* pcrel_offset */
512 HOWTO (R_ARM_THM_JUMP24, /* type */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
516 TRUE, /* pc_relative */
518 complain_overflow_signed,/* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE), /* pcrel_offset */
526 HOWTO (R_ARM_BASE_ABS, /* type */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
530 FALSE, /* pc_relative */
532 complain_overflow_dont,/* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
544 TRUE, /* pc_relative */
546 complain_overflow_dont,/* complain_on_overflow */
547 bfd_elf_generic_reloc, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE), /* pcrel_offset */
554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
558 TRUE, /* pc_relative */
560 complain_overflow_dont,/* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE), /* pcrel_offset */
568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
572 TRUE, /* pc_relative */
574 complain_overflow_dont,/* complain_on_overflow */
575 bfd_elf_generic_reloc, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE), /* pcrel_offset */
582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
586 FALSE, /* pc_relative */
588 complain_overflow_dont,/* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE), /* pcrel_offset */
596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
600 FALSE, /* pc_relative */
602 complain_overflow_dont,/* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE), /* pcrel_offset */
610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
614 FALSE, /* pc_relative */
616 complain_overflow_dont,/* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE), /* pcrel_offset */
624 HOWTO (R_ARM_TARGET1, /* type */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
628 FALSE, /* pc_relative */
630 complain_overflow_dont,/* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
638 HOWTO (R_ARM_ROSEGREL32, /* type */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
642 FALSE, /* pc_relative */
644 complain_overflow_dont,/* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE), /* pcrel_offset */
652 HOWTO (R_ARM_V4BX, /* type */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
656 FALSE, /* pc_relative */
658 complain_overflow_dont,/* complain_on_overflow */
659 bfd_elf_generic_reloc, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE), /* pcrel_offset */
666 HOWTO (R_ARM_TARGET2, /* type */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
670 FALSE, /* pc_relative */
672 complain_overflow_signed,/* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
680 HOWTO (R_ARM_PREL31, /* type */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
684 TRUE, /* pc_relative */
686 complain_overflow_signed,/* complain_on_overflow */
687 bfd_elf_generic_reloc, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE), /* pcrel_offset */
694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
698 FALSE, /* pc_relative */
700 complain_overflow_dont,/* complain_on_overflow */
701 bfd_elf_generic_reloc, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE), /* pcrel_offset */
708 HOWTO (R_ARM_MOVT_ABS, /* type */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
712 FALSE, /* pc_relative */
714 complain_overflow_bitfield,/* complain_on_overflow */
715 bfd_elf_generic_reloc, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE), /* pcrel_offset */
722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
726 TRUE, /* pc_relative */
728 complain_overflow_dont,/* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE), /* pcrel_offset */
736 HOWTO (R_ARM_MOVT_PREL, /* type */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
740 TRUE, /* pc_relative */
742 complain_overflow_bitfield,/* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE), /* pcrel_offset */
750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
754 FALSE, /* pc_relative */
756 complain_overflow_dont,/* complain_on_overflow */
757 bfd_elf_generic_reloc, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE), /* pcrel_offset */
764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
768 FALSE, /* pc_relative */
770 complain_overflow_bitfield,/* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE), /* pcrel_offset */
778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
782 TRUE, /* pc_relative */
784 complain_overflow_dont,/* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE), /* pcrel_offset */
792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
796 TRUE, /* pc_relative */
798 complain_overflow_bitfield,/* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE), /* pcrel_offset */
806 HOWTO (R_ARM_THM_JUMP19, /* type */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
810 TRUE, /* pc_relative */
812 complain_overflow_signed,/* complain_on_overflow */
813 bfd_elf_generic_reloc, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE), /* pcrel_offset */
820 HOWTO (R_ARM_THM_JUMP6, /* type */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
824 TRUE, /* pc_relative */
826 complain_overflow_unsigned,/* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE), /* pcrel_offset */
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
841 TRUE, /* pc_relative */
843 complain_overflow_dont,/* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE), /* pcrel_offset */
851 HOWTO (R_ARM_THM_PC12, /* type */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
855 TRUE, /* pc_relative */
857 complain_overflow_dont,/* complain_on_overflow */
858 bfd_elf_generic_reloc, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE), /* pcrel_offset */
865 HOWTO (R_ARM_ABS32_NOI, /* type */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
869 FALSE, /* pc_relative */
871 complain_overflow_dont,/* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE), /* pcrel_offset */
879 HOWTO (R_ARM_REL32_NOI, /* type */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
883 TRUE, /* pc_relative */
885 complain_overflow_dont,/* complain_on_overflow */
886 bfd_elf_generic_reloc, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE), /* pcrel_offset */
893 /* Group relocations. */
895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
899 TRUE, /* pc_relative */
901 complain_overflow_dont,/* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE), /* pcrel_offset */
909 HOWTO (R_ARM_ALU_PC_G0, /* type */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
913 TRUE, /* pc_relative */
915 complain_overflow_dont,/* complain_on_overflow */
916 bfd_elf_generic_reloc, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE), /* pcrel_offset */
923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
927 TRUE, /* pc_relative */
929 complain_overflow_dont,/* complain_on_overflow */
930 bfd_elf_generic_reloc, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE), /* pcrel_offset */
937 HOWTO (R_ARM_ALU_PC_G1, /* type */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
941 TRUE, /* pc_relative */
943 complain_overflow_dont,/* complain_on_overflow */
944 bfd_elf_generic_reloc, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE), /* pcrel_offset */
951 HOWTO (R_ARM_ALU_PC_G2, /* type */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
955 TRUE, /* pc_relative */
957 complain_overflow_dont,/* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
965 HOWTO (R_ARM_LDR_PC_G1, /* type */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
969 TRUE, /* pc_relative */
971 complain_overflow_dont,/* complain_on_overflow */
972 bfd_elf_generic_reloc, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE), /* pcrel_offset */
979 HOWTO (R_ARM_LDR_PC_G2, /* type */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
983 TRUE, /* pc_relative */
985 complain_overflow_dont,/* complain_on_overflow */
986 bfd_elf_generic_reloc, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE), /* pcrel_offset */
993 HOWTO (R_ARM_LDRS_PC_G0, /* type */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
997 TRUE, /* pc_relative */
999 complain_overflow_dont,/* complain_on_overflow */
1000 bfd_elf_generic_reloc, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE), /* pcrel_offset */
1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 TRUE, /* pc_relative */
1013 complain_overflow_dont,/* complain_on_overflow */
1014 bfd_elf_generic_reloc, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE), /* pcrel_offset */
1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 TRUE, /* pc_relative */
1027 complain_overflow_dont,/* complain_on_overflow */
1028 bfd_elf_generic_reloc, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE), /* pcrel_offset */
1035 HOWTO (R_ARM_LDC_PC_G0, /* type */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 TRUE, /* pc_relative */
1041 complain_overflow_dont,/* complain_on_overflow */
1042 bfd_elf_generic_reloc, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE), /* pcrel_offset */
1049 HOWTO (R_ARM_LDC_PC_G1, /* type */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 TRUE, /* pc_relative */
1055 complain_overflow_dont,/* complain_on_overflow */
1056 bfd_elf_generic_reloc, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE), /* pcrel_offset */
1063 HOWTO (R_ARM_LDC_PC_G2, /* type */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 TRUE, /* pc_relative */
1069 complain_overflow_dont,/* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 TRUE, /* pc_relative */
1083 complain_overflow_dont,/* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1091 HOWTO (R_ARM_ALU_SB_G0, /* type */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 TRUE, /* pc_relative */
1097 complain_overflow_dont,/* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 TRUE, /* pc_relative */
1111 complain_overflow_dont,/* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE), /* pcrel_offset */
1119 HOWTO (R_ARM_ALU_SB_G1, /* type */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 TRUE, /* pc_relative */
1125 complain_overflow_dont,/* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE), /* pcrel_offset */
1133 HOWTO (R_ARM_ALU_SB_G2, /* type */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 TRUE, /* pc_relative */
1139 complain_overflow_dont,/* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE), /* pcrel_offset */
1147 HOWTO (R_ARM_LDR_SB_G0, /* type */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 TRUE, /* pc_relative */
1153 complain_overflow_dont,/* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE), /* pcrel_offset */
1161 HOWTO (R_ARM_LDR_SB_G1, /* type */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 TRUE, /* pc_relative */
1167 complain_overflow_dont,/* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE), /* pcrel_offset */
1175 HOWTO (R_ARM_LDR_SB_G2, /* type */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 TRUE, /* pc_relative */
1181 complain_overflow_dont,/* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE), /* pcrel_offset */
1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 TRUE, /* pc_relative */
1195 complain_overflow_dont,/* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE), /* pcrel_offset */
1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 TRUE, /* pc_relative */
1209 complain_overflow_dont,/* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 TRUE, /* pc_relative */
1223 complain_overflow_dont,/* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1231 HOWTO (R_ARM_LDC_SB_G0, /* type */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1235 TRUE, /* pc_relative */
1237 complain_overflow_dont,/* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1245 HOWTO (R_ARM_LDC_SB_G1, /* type */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 TRUE, /* pc_relative */
1251 complain_overflow_dont,/* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE), /* pcrel_offset */
1259 HOWTO (R_ARM_LDC_SB_G2, /* type */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 TRUE, /* pc_relative */
1265 complain_overflow_dont,/* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE), /* pcrel_offset */
1273 /* End of group relocations. */
1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1279 FALSE, /* pc_relative */
1281 complain_overflow_dont,/* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1289 HOWTO (R_ARM_MOVT_BREL, /* type */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1293 FALSE, /* pc_relative */
1295 complain_overflow_bitfield,/* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1303 HOWTO (R_ARM_MOVW_BREL, /* type */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1307 FALSE, /* pc_relative */
1309 complain_overflow_dont,/* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1321 FALSE, /* pc_relative */
1323 complain_overflow_dont,/* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1335 FALSE, /* pc_relative */
1337 complain_overflow_bitfield,/* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1349 FALSE, /* pc_relative */
1351 complain_overflow_dont,/* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 FALSE, /* pc_relative */
1365 complain_overflow_bitfield,/* complain_on_overflow */
1366 NULL, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1373 HOWTO (R_ARM_TLS_CALL, /* type */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 FALSE, /* pc_relative */
1379 complain_overflow_dont,/* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 FALSE, /* pc_relative */
1393 complain_overflow_bitfield,/* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 FALSE, /* pc_relative */
1407 complain_overflow_dont,/* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1415 HOWTO (R_ARM_PLT32_ABS, /* type */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 FALSE, /* pc_relative */
1421 complain_overflow_dont,/* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1429 HOWTO (R_ARM_GOT_ABS, /* type */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1433 FALSE, /* pc_relative */
1435 complain_overflow_dont,/* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1443 HOWTO (R_ARM_GOT_PREL, /* type */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 TRUE, /* pc_relative */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE), /* pcrel_offset */
1457 HOWTO (R_ARM_GOT_BREL12, /* type */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1461 FALSE, /* pc_relative */
1463 complain_overflow_bitfield,/* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1471 HOWTO (R_ARM_GOTOFF12, /* type */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 FALSE, /* pc_relative */
1477 complain_overflow_bitfield,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 FALSE, /* pc_relative */
1494 complain_overflow_dont, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE, /* partial_inplace */
1500 FALSE), /* pcrel_offset */
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 FALSE, /* pc_relative */
1509 complain_overflow_dont, /* complain_on_overflow */
1510 NULL, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE, /* partial_inplace */
1515 FALSE), /* pcrel_offset */
1517 HOWTO (R_ARM_THM_JUMP11, /* type */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1521 TRUE, /* pc_relative */
1523 complain_overflow_signed, /* complain_on_overflow */
1524 bfd_elf_generic_reloc, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE), /* pcrel_offset */
1531 HOWTO (R_ARM_THM_JUMP8, /* type */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1535 TRUE, /* pc_relative */
1537 complain_overflow_signed, /* complain_on_overflow */
1538 bfd_elf_generic_reloc, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE), /* pcrel_offset */
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32, /* type */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 FALSE, /* pc_relative */
1552 complain_overflow_bitfield,/* complain_on_overflow */
1553 NULL, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE), /* pcrel_offset */
1560 HOWTO (R_ARM_TLS_LDM32, /* type */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 FALSE, /* pc_relative */
1566 complain_overflow_bitfield,/* complain_on_overflow */
1567 bfd_elf_generic_reloc, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE), /* pcrel_offset */
1574 HOWTO (R_ARM_TLS_LDO32, /* type */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 FALSE, /* pc_relative */
1580 complain_overflow_bitfield,/* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1588 HOWTO (R_ARM_TLS_IE32, /* type */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 FALSE, /* pc_relative */
1594 complain_overflow_bitfield,/* complain_on_overflow */
1595 NULL, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE), /* pcrel_offset */
1602 HOWTO (R_ARM_TLS_LE32, /* type */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1606 FALSE, /* pc_relative */
1608 complain_overflow_bitfield,/* complain_on_overflow */
1609 NULL, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1616 HOWTO (R_ARM_TLS_LDO12, /* type */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1620 FALSE, /* pc_relative */
1622 complain_overflow_bitfield,/* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1630 HOWTO (R_ARM_TLS_LE12, /* type */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1634 FALSE, /* pc_relative */
1636 complain_overflow_bitfield,/* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1644 HOWTO (R_ARM_TLS_IE12GP, /* type */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1648 FALSE, /* pc_relative */
1650 complain_overflow_bitfield,/* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1658 /* 112-127 private relocations. */
1676 /* R_ARM_ME_TOO, obsolete. */
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1683 FALSE, /* pc_relative */
1685 complain_overflow_bitfield,/* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1698 FALSE, /* pc_relative. */
1700 complain_overflow_bitfield,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1711 FALSE, /* pc_relative. */
1713 complain_overflow_bitfield,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1724 FALSE, /* pc_relative. */
1726 complain_overflow_bitfield,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1737 FALSE, /* pc_relative. */
1739 complain_overflow_bitfield,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE), /* pcrel_offset. */
1749 static reloc_howto_type elf32_arm_howto_table_2[1] =
1751 HOWTO (R_ARM_IRELATIVE, /* type */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1755 FALSE, /* pc_relative */
1757 complain_overflow_bitfield,/* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE) /* pcrel_offset */
1766 /* 249-255 extended, currently unused, relocations: */
1767 static reloc_howto_type elf32_arm_howto_table_3[4] =
1769 HOWTO (R_ARM_RREL32, /* type */
1771 0, /* size (0 = byte, 1 = short, 2 = long) */
1773 FALSE, /* pc_relative */
1775 complain_overflow_dont,/* complain_on_overflow */
1776 bfd_elf_generic_reloc, /* special_function */
1777 "R_ARM_RREL32", /* name */
1778 FALSE, /* partial_inplace */
1781 FALSE), /* pcrel_offset */
1783 HOWTO (R_ARM_RABS32, /* type */
1785 0, /* size (0 = byte, 1 = short, 2 = long) */
1787 FALSE, /* pc_relative */
1789 complain_overflow_dont,/* complain_on_overflow */
1790 bfd_elf_generic_reloc, /* special_function */
1791 "R_ARM_RABS32", /* name */
1792 FALSE, /* partial_inplace */
1795 FALSE), /* pcrel_offset */
1797 HOWTO (R_ARM_RPC24, /* type */
1799 0, /* size (0 = byte, 1 = short, 2 = long) */
1801 FALSE, /* pc_relative */
1803 complain_overflow_dont,/* complain_on_overflow */
1804 bfd_elf_generic_reloc, /* special_function */
1805 "R_ARM_RPC24", /* name */
1806 FALSE, /* partial_inplace */
1809 FALSE), /* pcrel_offset */
1811 HOWTO (R_ARM_RBASE, /* type */
1813 0, /* size (0 = byte, 1 = short, 2 = long) */
1815 FALSE, /* pc_relative */
1817 complain_overflow_dont,/* complain_on_overflow */
1818 bfd_elf_generic_reloc, /* special_function */
1819 "R_ARM_RBASE", /* name */
1820 FALSE, /* partial_inplace */
1823 FALSE) /* pcrel_offset */
1826 static reloc_howto_type *
1827 elf32_arm_howto_from_type (unsigned int r_type)
1829 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1830 return &elf32_arm_howto_table_1[r_type];
1832 if (r_type == R_ARM_IRELATIVE)
1833 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1835 if (r_type >= R_ARM_RREL32
1836 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1837 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1843 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1844 Elf_Internal_Rela * elf_reloc)
1846 unsigned int r_type;
1848 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1849 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1852 struct elf32_arm_reloc_map
1854 bfd_reloc_code_real_type bfd_reloc_val;
1855 unsigned char elf_reloc_val;
1858 /* All entries in this list must also be present in elf32_arm_howto_table. */
1859 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1861 {BFD_RELOC_NONE, R_ARM_NONE},
1862 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1863 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1864 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1865 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1866 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1867 {BFD_RELOC_32, R_ARM_ABS32},
1868 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1869 {BFD_RELOC_8, R_ARM_ABS8},
1870 {BFD_RELOC_16, R_ARM_ABS16},
1871 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1872 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1873 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1874 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1875 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1876 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1877 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1878 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1879 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1880 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1881 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1882 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1883 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1884 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1885 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1886 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1887 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1888 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1889 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1890 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1891 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1892 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1893 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1894 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1895 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1896 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1897 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1898 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1899 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1900 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1901 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1902 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1903 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1904 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1905 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1906 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1907 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1908 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1909 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1910 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1911 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1912 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1913 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1914 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1915 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1916 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1917 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1918 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1919 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1920 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1921 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1922 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1923 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1924 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1925 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1926 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1927 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1928 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1929 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1930 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1931 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1932 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1933 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1934 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1935 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1936 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1937 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1938 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1939 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1940 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1941 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1942 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1943 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1944 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1945 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1946 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
1947 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
1948 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
1949 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
1950 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
1953 static reloc_howto_type *
1954 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1955 bfd_reloc_code_real_type code)
1959 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1960 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1961 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1966 static reloc_howto_type *
1967 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1972 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1973 if (elf32_arm_howto_table_1[i].name != NULL
1974 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1975 return &elf32_arm_howto_table_1[i];
1977 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1978 if (elf32_arm_howto_table_2[i].name != NULL
1979 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1980 return &elf32_arm_howto_table_2[i];
1982 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1983 if (elf32_arm_howto_table_3[i].name != NULL
1984 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1985 return &elf32_arm_howto_table_3[i];
1990 /* Support for core dump NOTE sections. */
1993 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1998 switch (note->descsz)
2003 case 148: /* Linux/ARM 32-bit. */
2005 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2008 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2017 /* Make a ".reg/999" section. */
2018 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2019 size, note->descpos + offset);
2023 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2025 switch (note->descsz)
2030 case 124: /* Linux/ARM elf_prpsinfo. */
2031 elf_tdata (abfd)->core->pid
2032 = bfd_get_32 (abfd, note->descdata + 12);
2033 elf_tdata (abfd)->core->program
2034 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2035 elf_tdata (abfd)->core->command
2036 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2039 /* Note that for some reason, a spurious space is tacked
2040 onto the end of the args in some (at least one anyway)
2041 implementations, so strip it off if it exists. */
2043 char *command = elf_tdata (abfd)->core->command;
2044 int n = strlen (command);
2046 if (0 < n && command[n - 1] == ' ')
2047 command[n - 1] = '\0';
2054 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2067 va_start (ap, note_type);
2068 memset (data, 0, sizeof (data));
2069 strncpy (data + 28, va_arg (ap, const char *), 16);
2070 strncpy (data + 44, va_arg (ap, const char *), 80);
2073 return elfcore_write_note (abfd, buf, bufsiz,
2074 "CORE", note_type, data, sizeof (data));
2085 va_start (ap, note_type);
2086 memset (data, 0, sizeof (data));
2087 pid = va_arg (ap, long);
2088 bfd_put_32 (abfd, pid, data + 24);
2089 cursig = va_arg (ap, int);
2090 bfd_put_16 (abfd, cursig, data + 12);
2091 greg = va_arg (ap, const void *);
2092 memcpy (data + 72, greg, 72);
2095 return elfcore_write_note (abfd, buf, bufsiz,
2096 "CORE", note_type, data, sizeof (data));
2101 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2102 #define TARGET_LITTLE_NAME "elf32-littlearm"
2103 #define TARGET_BIG_SYM arm_elf32_be_vec
2104 #define TARGET_BIG_NAME "elf32-bigarm"
2106 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2107 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2108 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2110 typedef unsigned long int insn32;
2111 typedef unsigned short int insn16;
2113 /* In lieu of proper flags, assume all EABIv4 or later objects are
2115 #define INTERWORK_FLAG(abfd) \
2116 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2117 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2118 || ((abfd)->flags & BFD_LINKER_CREATED))
2120 /* The linker script knows the section names for placement.
2121 The entry_names are used to do simple name mangling on the stubs.
2122 Given a function name, and its type, the stub can be found. The
2123 name can be changed. The only requirement is the %s be present. */
2124 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2125 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2127 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2128 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2130 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2131 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2133 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2134 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2136 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2137 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2139 #define STUB_ENTRY_NAME "__%s_veneer"
2141 /* The name of the dynamic interpreter. This is put in the .interp
2143 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2145 static const unsigned long tls_trampoline [] =
2147 0xe08e0000, /* add r0, lr, r0 */
2148 0xe5901004, /* ldr r1, [r0,#4] */
2149 0xe12fff11, /* bx r1 */
2152 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2154 0xe52d2004, /* push {r2} */
2155 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2156 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2157 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2158 0xe081100f, /* 2: add r1, pc */
2159 0xe12fff12, /* bx r2 */
2160 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2161 + dl_tlsdesc_lazy_resolver(GOT) */
2162 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2165 #ifdef FOUR_WORD_PLT
2167 /* The first entry in a procedure linkage table looks like
2168 this. It is set up so that any shared library function that is
2169 called before the relocation has been set up calls the dynamic
2171 static const bfd_vma elf32_arm_plt0_entry [] =
2173 0xe52de004, /* str lr, [sp, #-4]! */
2174 0xe59fe010, /* ldr lr, [pc, #16] */
2175 0xe08fe00e, /* add lr, pc, lr */
2176 0xe5bef008, /* ldr pc, [lr, #8]! */
2179 /* Subsequent entries in a procedure linkage table look like
2181 static const bfd_vma elf32_arm_plt_entry [] =
2183 0xe28fc600, /* add ip, pc, #NN */
2184 0xe28cca00, /* add ip, ip, #NN */
2185 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2186 0x00000000, /* unused */
2189 #else /* not FOUR_WORD_PLT */
2191 /* The first entry in a procedure linkage table looks like
2192 this. It is set up so that any shared library function that is
2193 called before the relocation has been set up calls the dynamic
2195 static const bfd_vma elf32_arm_plt0_entry [] =
2197 0xe52de004, /* str lr, [sp, #-4]! */
2198 0xe59fe004, /* ldr lr, [pc, #4] */
2199 0xe08fe00e, /* add lr, pc, lr */
2200 0xe5bef008, /* ldr pc, [lr, #8]! */
2201 0x00000000, /* &GOT[0] - . */
2204 /* By default subsequent entries in a procedure linkage table look like
2205 this. Offsets that don't fit into 28 bits will cause link error. */
2206 static const bfd_vma elf32_arm_plt_entry_short [] =
2208 0xe28fc600, /* add ip, pc, #0xNN00000 */
2209 0xe28cca00, /* add ip, ip, #0xNN000 */
2210 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2213 /* When explicitly asked, we'll use this "long" entry format
2214 which can cope with arbitrary displacements. */
2215 static const bfd_vma elf32_arm_plt_entry_long [] =
2217 0xe28fc200, /* add ip, pc, #0xN0000000 */
2218 0xe28cc600, /* add ip, ip, #0xNN00000 */
2219 0xe28cca00, /* add ip, ip, #0xNN000 */
2220 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2223 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2225 #endif /* not FOUR_WORD_PLT */
2227 /* The first entry in a procedure linkage table looks like this.
2228 It is set up so that any shared library function that is called before the
2229 relocation has been set up calls the dynamic linker first. */
2230 static const bfd_vma elf32_thumb2_plt0_entry [] =
2232 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2233 an instruction maybe encoded to one or two array elements. */
2234 0xf8dfb500, /* push {lr} */
2235 0x44fee008, /* ldr.w lr, [pc, #8] */
2237 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2238 0x00000000, /* &GOT[0] - . */
2241 /* Subsequent entries in a procedure linkage table for thumb only target
2243 static const bfd_vma elf32_thumb2_plt_entry [] =
2245 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2246 an instruction maybe encoded to one or two array elements. */
2247 0x0c00f240, /* movw ip, #0xNNNN */
2248 0x0c00f2c0, /* movt ip, #0xNNNN */
2249 0xf8dc44fc, /* add ip, pc */
2250 0xbf00f000 /* ldr.w pc, [ip] */
2254 /* The format of the first entry in the procedure linkage table
2255 for a VxWorks executable. */
2256 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2258 0xe52dc008, /* str ip,[sp,#-8]! */
2259 0xe59fc000, /* ldr ip,[pc] */
2260 0xe59cf008, /* ldr pc,[ip,#8] */
2261 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2264 /* The format of subsequent entries in a VxWorks executable. */
2265 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2267 0xe59fc000, /* ldr ip,[pc] */
2268 0xe59cf000, /* ldr pc,[ip] */
2269 0x00000000, /* .long @got */
2270 0xe59fc000, /* ldr ip,[pc] */
2271 0xea000000, /* b _PLT */
2272 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2275 /* The format of entries in a VxWorks shared library. */
2276 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2278 0xe59fc000, /* ldr ip,[pc] */
2279 0xe79cf009, /* ldr pc,[ip,r9] */
2280 0x00000000, /* .long @got */
2281 0xe59fc000, /* ldr ip,[pc] */
2282 0xe599f008, /* ldr pc,[r9,#8] */
2283 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2286 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2287 #define PLT_THUMB_STUB_SIZE 4
2288 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2294 /* The entries in a PLT when using a DLL-based target with multiple
2296 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2298 0xe51ff004, /* ldr pc, [pc, #-4] */
2299 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2302 /* The first entry in a procedure linkage table looks like
2303 this. It is set up so that any shared library function that is
2304 called before the relocation has been set up calls the dynamic
2306 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2309 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2310 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2311 0xe08cc00f, /* add ip, ip, pc */
2312 0xe52dc008, /* str ip, [sp, #-8]! */
2313 /* Second bundle: */
2314 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2315 0xe59cc000, /* ldr ip, [ip] */
2316 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2317 0xe12fff1c, /* bx ip */
2319 0xe320f000, /* nop */
2320 0xe320f000, /* nop */
2321 0xe320f000, /* nop */
2323 0xe50dc004, /* str ip, [sp, #-4] */
2324 /* Fourth bundle: */
2325 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2326 0xe59cc000, /* ldr ip, [ip] */
2327 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2328 0xe12fff1c, /* bx ip */
2330 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2332 /* Subsequent entries in a procedure linkage table look like this. */
2333 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2335 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2336 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2337 0xe08cc00f, /* add ip, ip, pc */
2338 0xea000000, /* b .Lplt_tail */
2341 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2342 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2343 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2344 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2345 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2346 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2347 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2348 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2358 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2359 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2360 is inserted in arm_build_one_stub(). */
2361 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2362 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2363 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2364 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2365 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2366 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2371 enum stub_insn_type type;
2372 unsigned int r_type;
2376 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2377 to reach the stub if necessary. */
2378 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2380 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2381 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2384 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2386 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2388 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2389 ARM_INSN (0xe12fff1c), /* bx ip */
2390 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2393 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2394 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2396 THUMB16_INSN (0xb401), /* push {r0} */
2397 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2398 THUMB16_INSN (0x4684), /* mov ip, r0 */
2399 THUMB16_INSN (0xbc01), /* pop {r0} */
2400 THUMB16_INSN (0x4760), /* bx ip */
2401 THUMB16_INSN (0xbf00), /* nop */
2402 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2405 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2407 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2409 THUMB16_INSN (0x4778), /* bx pc */
2410 THUMB16_INSN (0x46c0), /* nop */
2411 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2412 ARM_INSN (0xe12fff1c), /* bx ip */
2413 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2416 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2418 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2420 THUMB16_INSN (0x4778), /* bx pc */
2421 THUMB16_INSN (0x46c0), /* nop */
2422 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2423 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2426 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2427 one, when the destination is close enough. */
2428 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2430 THUMB16_INSN (0x4778), /* bx pc */
2431 THUMB16_INSN (0x46c0), /* nop */
2432 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2435 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2436 blx to reach the stub if necessary. */
2437 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2439 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2440 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2441 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2444 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2445 blx to reach the stub if necessary. We can not add into pc;
2446 it is not guaranteed to mode switch (different in ARMv6 and
2448 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2450 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2451 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2452 ARM_INSN (0xe12fff1c), /* bx ip */
2453 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2456 /* V4T ARM -> ARM long branch stub, PIC. */
2457 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2459 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2460 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2461 ARM_INSN (0xe12fff1c), /* bx ip */
2462 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2465 /* V4T Thumb -> ARM long branch stub, PIC. */
2466 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2468 THUMB16_INSN (0x4778), /* bx pc */
2469 THUMB16_INSN (0x46c0), /* nop */
2470 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2471 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2472 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2475 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2477 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2479 THUMB16_INSN (0xb401), /* push {r0} */
2480 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2481 THUMB16_INSN (0x46fc), /* mov ip, pc */
2482 THUMB16_INSN (0x4484), /* add ip, r0 */
2483 THUMB16_INSN (0xbc01), /* pop {r0} */
2484 THUMB16_INSN (0x4760), /* bx ip */
2485 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2488 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2490 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2492 THUMB16_INSN (0x4778), /* bx pc */
2493 THUMB16_INSN (0x46c0), /* nop */
2494 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2495 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2496 ARM_INSN (0xe12fff1c), /* bx ip */
2497 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2500 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2501 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2502 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2504 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2505 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2506 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2509 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2510 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2511 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2513 THUMB16_INSN (0x4778), /* bx pc */
2514 THUMB16_INSN (0x46c0), /* nop */
2515 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2516 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2517 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2520 /* NaCl ARM -> ARM long branch stub. */
2521 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2523 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2524 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2525 ARM_INSN (0xe12fff1c), /* bx ip */
2526 ARM_INSN (0xe320f000), /* nop */
2527 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2528 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2529 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2530 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2533 /* NaCl ARM -> ARM long branch stub, PIC. */
2534 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2536 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2537 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2538 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2539 ARM_INSN (0xe12fff1c), /* bx ip */
2540 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2541 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2542 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2543 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2547 /* Cortex-A8 erratum-workaround stubs. */
2549 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2550 can't use a conditional branch to reach this stub). */
2552 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2554 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2555 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2556 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2559 /* Stub used for b.w and bl.w instructions. */
2561 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2563 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2566 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2568 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2571 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2572 instruction (which switches to ARM mode) to point to this stub. Jump to the
2573 real destination using an ARM-mode branch. */
2575 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2577 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2580 /* For each section group there can be a specially created linker section
2581 to hold the stubs for that group. The name of the stub section is based
2582 upon the name of another section within that group with the suffix below
2585 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2586 create what appeared to be a linker stub section when it actually
2587 contained user code/data. For example, consider this fragment:
2589 const char * stubborn_problems[] = { "np" };
2591 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2594 .data.rel.local.stubborn_problems
2596 This then causes problems in arm32_arm_build_stubs() as it triggers:
2598 // Ignore non-stub sections.
2599 if (!strstr (stub_sec->name, STUB_SUFFIX))
2602 And so the section would be ignored instead of being processed. Hence
2603 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2605 #define STUB_SUFFIX ".__stub"
2607 /* One entry per long/short branch stub defined above. */
2609 DEF_STUB(long_branch_any_any) \
2610 DEF_STUB(long_branch_v4t_arm_thumb) \
2611 DEF_STUB(long_branch_thumb_only) \
2612 DEF_STUB(long_branch_v4t_thumb_thumb) \
2613 DEF_STUB(long_branch_v4t_thumb_arm) \
2614 DEF_STUB(short_branch_v4t_thumb_arm) \
2615 DEF_STUB(long_branch_any_arm_pic) \
2616 DEF_STUB(long_branch_any_thumb_pic) \
2617 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2618 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2619 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2620 DEF_STUB(long_branch_thumb_only_pic) \
2621 DEF_STUB(long_branch_any_tls_pic) \
2622 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2623 DEF_STUB(long_branch_arm_nacl) \
2624 DEF_STUB(long_branch_arm_nacl_pic) \
2625 DEF_STUB(a8_veneer_b_cond) \
2626 DEF_STUB(a8_veneer_b) \
2627 DEF_STUB(a8_veneer_bl) \
2628 DEF_STUB(a8_veneer_blx)
2630 #define DEF_STUB(x) arm_stub_##x,
2631 enum elf32_arm_stub_type
2639 /* Note the first a8_veneer type. */
2640 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2644 const insn_sequence* template_sequence;
2648 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2649 static const stub_def stub_definitions[] =
2655 struct elf32_arm_stub_hash_entry
2657 /* Base hash table entry structure. */
2658 struct bfd_hash_entry root;
2660 /* The stub section. */
2663 /* Offset within stub_sec of the beginning of this stub. */
2664 bfd_vma stub_offset;
2666 /* Given the symbol's value and its section we can determine its final
2667 value when building the stubs (so the stub knows where to jump). */
2668 bfd_vma target_value;
2669 asection *target_section;
2671 /* Same as above but for the source of the branch to the stub. Used for
2672 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2673 such, source section does not need to be recorded since Cortex-A8 erratum
2674 workaround stubs are only generated when both source and target are in the
2676 bfd_vma source_value;
2678 /* The instruction which caused this stub to be generated (only valid for
2679 Cortex-A8 erratum workaround stubs at present). */
2680 unsigned long orig_insn;
2682 /* The stub type. */
2683 enum elf32_arm_stub_type stub_type;
2684 /* Its encoding size in bytes. */
2687 const insn_sequence *stub_template;
2688 /* The size of the template (number of entries). */
2689 int stub_template_size;
2691 /* The symbol table entry, if any, that this was derived from. */
2692 struct elf32_arm_link_hash_entry *h;
2694 /* Type of branch. */
2695 enum arm_st_branch_type branch_type;
2697 /* Where this stub is being called from, or, in the case of combined
2698 stub sections, the first input section in the group. */
2701 /* The name for the local symbol at the start of this stub. The
2702 stub name in the hash table has to be unique; this does not, so
2703 it can be friendlier. */
2707 /* Used to build a map of a section. This is required for mixed-endian
2710 typedef struct elf32_elf_section_map
2715 elf32_arm_section_map;
2717 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2721 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2722 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2723 VFP11_ERRATUM_ARM_VENEER,
2724 VFP11_ERRATUM_THUMB_VENEER
2726 elf32_vfp11_erratum_type;
2728 typedef struct elf32_vfp11_erratum_list
2730 struct elf32_vfp11_erratum_list *next;
2736 struct elf32_vfp11_erratum_list *veneer;
2737 unsigned int vfp_insn;
2741 struct elf32_vfp11_erratum_list *branch;
2745 elf32_vfp11_erratum_type type;
2747 elf32_vfp11_erratum_list;
2749 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2753 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2754 STM32L4XX_ERRATUM_VENEER
2756 elf32_stm32l4xx_erratum_type;
2758 typedef struct elf32_stm32l4xx_erratum_list
2760 struct elf32_stm32l4xx_erratum_list *next;
2766 struct elf32_stm32l4xx_erratum_list *veneer;
2771 struct elf32_stm32l4xx_erratum_list *branch;
2775 elf32_stm32l4xx_erratum_type type;
2777 elf32_stm32l4xx_erratum_list;
2782 INSERT_EXIDX_CANTUNWIND_AT_END
2784 arm_unwind_edit_type;
2786 /* A (sorted) list of edits to apply to an unwind table. */
2787 typedef struct arm_unwind_table_edit
2789 arm_unwind_edit_type type;
2790 /* Note: we sometimes want to insert an unwind entry corresponding to a
2791 section different from the one we're currently writing out, so record the
2792 (text) section this edit relates to here. */
2793 asection *linked_section;
2795 struct arm_unwind_table_edit *next;
2797 arm_unwind_table_edit;
2799 typedef struct _arm_elf_section_data
2801 /* Information about mapping symbols. */
2802 struct bfd_elf_section_data elf;
2803 unsigned int mapcount;
2804 unsigned int mapsize;
2805 elf32_arm_section_map *map;
2806 /* Information about CPU errata. */
2807 unsigned int erratumcount;
2808 elf32_vfp11_erratum_list *erratumlist;
2809 unsigned int stm32l4xx_erratumcount;
2810 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
2811 unsigned int additional_reloc_count;
2812 /* Information about unwind tables. */
2815 /* Unwind info attached to a text section. */
2818 asection *arm_exidx_sec;
2821 /* Unwind info attached to an .ARM.exidx section. */
2824 arm_unwind_table_edit *unwind_edit_list;
2825 arm_unwind_table_edit *unwind_edit_tail;
2829 _arm_elf_section_data;
2831 #define elf32_arm_section_data(sec) \
2832 ((_arm_elf_section_data *) elf_section_data (sec))
2834 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2835 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2836 so may be created multiple times: we use an array of these entries whilst
2837 relaxing which we can refresh easily, then create stubs for each potentially
2838 erratum-triggering instruction once we've settled on a solution. */
2840 struct a8_erratum_fix
2845 bfd_vma target_offset;
2846 unsigned long orig_insn;
2848 enum elf32_arm_stub_type stub_type;
2849 enum arm_st_branch_type branch_type;
2852 /* A table of relocs applied to branches which might trigger Cortex-A8
2855 struct a8_erratum_reloc
2858 bfd_vma destination;
2859 struct elf32_arm_link_hash_entry *hash;
2860 const char *sym_name;
2861 unsigned int r_type;
2862 enum arm_st_branch_type branch_type;
2863 bfd_boolean non_a8_stub;
2866 /* The size of the thread control block. */
2869 /* ARM-specific information about a PLT entry, over and above the usual
2873 /* We reference count Thumb references to a PLT entry separately,
2874 so that we can emit the Thumb trampoline only if needed. */
2875 bfd_signed_vma thumb_refcount;
2877 /* Some references from Thumb code may be eliminated by BL->BLX
2878 conversion, so record them separately. */
2879 bfd_signed_vma maybe_thumb_refcount;
2881 /* How many of the recorded PLT accesses were from non-call relocations.
2882 This information is useful when deciding whether anything takes the
2883 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2884 non-call references to the function should resolve directly to the
2885 real runtime target. */
2886 unsigned int noncall_refcount;
2888 /* Since PLT entries have variable size if the Thumb prologue is
2889 used, we need to record the index into .got.plt instead of
2890 recomputing it from the PLT offset. */
2891 bfd_signed_vma got_offset;
2894 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2895 struct arm_local_iplt_info
2897 /* The information that is usually found in the generic ELF part of
2898 the hash table entry. */
2899 union gotplt_union root;
2901 /* The information that is usually found in the ARM-specific part of
2902 the hash table entry. */
2903 struct arm_plt_info arm;
2905 /* A list of all potential dynamic relocations against this symbol. */
2906 struct elf_dyn_relocs *dyn_relocs;
2909 struct elf_arm_obj_tdata
2911 struct elf_obj_tdata root;
2913 /* tls_type for each local got entry. */
2914 char *local_got_tls_type;
2916 /* GOTPLT entries for TLS descriptors. */
2917 bfd_vma *local_tlsdesc_gotent;
2919 /* Information for local symbols that need entries in .iplt. */
2920 struct arm_local_iplt_info **local_iplt;
2922 /* Zero to warn when linking objects with incompatible enum sizes. */
2923 int no_enum_size_warning;
2925 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2926 int no_wchar_size_warning;
2929 #define elf_arm_tdata(bfd) \
2930 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2932 #define elf32_arm_local_got_tls_type(bfd) \
2933 (elf_arm_tdata (bfd)->local_got_tls_type)
2935 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2936 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2938 #define elf32_arm_local_iplt(bfd) \
2939 (elf_arm_tdata (bfd)->local_iplt)
2941 #define is_arm_elf(bfd) \
2942 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2943 && elf_tdata (bfd) != NULL \
2944 && elf_object_id (bfd) == ARM_ELF_DATA)
2947 elf32_arm_mkobject (bfd *abfd)
2949 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2953 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2955 /* Arm ELF linker hash entry. */
2956 struct elf32_arm_link_hash_entry
2958 struct elf_link_hash_entry root;
2960 /* Track dynamic relocs copied for this symbol. */
2961 struct elf_dyn_relocs *dyn_relocs;
2963 /* ARM-specific PLT information. */
2964 struct arm_plt_info plt;
2966 #define GOT_UNKNOWN 0
2967 #define GOT_NORMAL 1
2968 #define GOT_TLS_GD 2
2969 #define GOT_TLS_IE 4
2970 #define GOT_TLS_GDESC 8
2971 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2972 unsigned int tls_type : 8;
2974 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2975 unsigned int is_iplt : 1;
2977 unsigned int unused : 23;
2979 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2980 starting at the end of the jump table. */
2981 bfd_vma tlsdesc_got;
2983 /* The symbol marking the real symbol location for exported thumb
2984 symbols with Arm stubs. */
2985 struct elf_link_hash_entry *export_glue;
2987 /* A pointer to the most recently used stub hash entry against this
2989 struct elf32_arm_stub_hash_entry *stub_cache;
2992 /* Traverse an arm ELF linker hash table. */
2993 #define elf32_arm_link_hash_traverse(table, func, info) \
2994 (elf_link_hash_traverse \
2996 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2999 /* Get the ARM elf linker hash table from a link_info structure. */
3000 #define elf32_arm_hash_table(info) \
3001 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3002 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3004 #define arm_stub_hash_lookup(table, string, create, copy) \
3005 ((struct elf32_arm_stub_hash_entry *) \
3006 bfd_hash_lookup ((table), (string), (create), (copy)))
3008 /* Array to keep track of which stub sections have been created, and
3009 information on stub grouping. */
3012 /* This is the section to which stubs in the group will be
3015 /* The stub section. */
3019 #define elf32_arm_compute_jump_table_size(htab) \
3020 ((htab)->next_tls_desc_index * 4)
3022 /* ARM ELF linker hash table. */
3023 struct elf32_arm_link_hash_table
3025 /* The main hash table. */
3026 struct elf_link_hash_table root;
3028 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3029 bfd_size_type thumb_glue_size;
3031 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3032 bfd_size_type arm_glue_size;
3034 /* The size in bytes of section containing the ARMv4 BX veneers. */
3035 bfd_size_type bx_glue_size;
3037 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3038 veneer has been populated. */
3039 bfd_vma bx_glue_offset[15];
3041 /* The size in bytes of the section containing glue for VFP11 erratum
3043 bfd_size_type vfp11_erratum_glue_size;
3045 /* The size in bytes of the section containing glue for STM32L4XX erratum
3047 bfd_size_type stm32l4xx_erratum_glue_size;
3049 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3050 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3051 elf32_arm_write_section(). */
3052 struct a8_erratum_fix *a8_erratum_fixes;
3053 unsigned int num_a8_erratum_fixes;
3055 /* An arbitrary input BFD chosen to hold the glue sections. */
3056 bfd * bfd_of_glue_owner;
3058 /* Nonzero to output a BE8 image. */
3061 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3062 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3065 /* The relocation to use for R_ARM_TARGET2 relocations. */
3068 /* 0 = Ignore R_ARM_V4BX.
3069 1 = Convert BX to MOV PC.
3070 2 = Generate v4 interworing stubs. */
3073 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3076 /* Whether we should fix the ARM1176 BLX immediate issue. */
3079 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3082 /* What sort of code sequences we should look for which may trigger the
3083 VFP11 denorm erratum. */
3084 bfd_arm_vfp11_fix vfp11_fix;
3086 /* Global counter for the number of fixes we have emitted. */
3087 int num_vfp11_fixes;
3089 /* What sort of code sequences we should look for which may trigger the
3090 STM32L4XX erratum. */
3091 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3093 /* Global counter for the number of fixes we have emitted. */
3094 int num_stm32l4xx_fixes;
3096 /* Nonzero to force PIC branch veneers. */
3099 /* The number of bytes in the initial entry in the PLT. */
3100 bfd_size_type plt_header_size;
3102 /* The number of bytes in the subsequent PLT etries. */
3103 bfd_size_type plt_entry_size;
3105 /* True if the target system is VxWorks. */
3108 /* True if the target system is Symbian OS. */
3111 /* True if the target system is Native Client. */
3114 /* True if the target uses REL relocations. */
3117 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3118 bfd_vma next_tls_desc_index;
3120 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3121 bfd_vma num_tls_desc;
3123 /* Short-cuts to get to dynamic linker sections. */
3127 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3130 /* The offset into splt of the PLT entry for the TLS descriptor
3131 resolver. Special values are 0, if not necessary (or not found
3132 to be necessary yet), and -1 if needed but not determined
3134 bfd_vma dt_tlsdesc_plt;
3136 /* The offset into sgot of the GOT entry used by the PLT entry
3138 bfd_vma dt_tlsdesc_got;
3140 /* Offset in .plt section of tls_arm_trampoline. */
3141 bfd_vma tls_trampoline;
3143 /* Data for R_ARM_TLS_LDM32 relocations. */
3146 bfd_signed_vma refcount;
3150 /* Small local sym cache. */
3151 struct sym_cache sym_cache;
3153 /* For convenience in allocate_dynrelocs. */
3156 /* The amount of space used by the reserved portion of the sgotplt
3157 section, plus whatever space is used by the jump slots. */
3158 bfd_vma sgotplt_jump_table_size;
3160 /* The stub hash table. */
3161 struct bfd_hash_table stub_hash_table;
3163 /* Linker stub bfd. */
3166 /* Linker call-backs. */
3167 asection * (*add_stub_section) (const char *, asection *, asection *,
3169 void (*layout_sections_again) (void);
3171 /* Array to keep track of which stub sections have been created, and
3172 information on stub grouping. */
3173 struct map_stub *stub_group;
3175 /* Number of elements in stub_group. */
3176 unsigned int top_id;
3178 /* Assorted information used by elf32_arm_size_stubs. */
3179 unsigned int bfd_count;
3180 unsigned int top_index;
3181 asection **input_list;
3185 ctz (unsigned int mask)
3187 #if GCC_VERSION >= 3004
3188 return __builtin_ctz (mask);
3192 for (i = 0; i < 8 * sizeof (mask); i++)
3203 popcount (unsigned int mask)
3205 #if GCC_VERSION >= 3004
3206 return __builtin_popcount (mask);
3208 unsigned int i, sum = 0;
3210 for (i = 0; i < 8 * sizeof (mask); i++)
3220 /* Create an entry in an ARM ELF linker hash table. */
3222 static struct bfd_hash_entry *
3223 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3224 struct bfd_hash_table * table,
3225 const char * string)
3227 struct elf32_arm_link_hash_entry * ret =
3228 (struct elf32_arm_link_hash_entry *) entry;
3230 /* Allocate the structure if it has not already been allocated by a
3233 ret = (struct elf32_arm_link_hash_entry *)
3234 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3236 return (struct bfd_hash_entry *) ret;
3238 /* Call the allocation method of the superclass. */
3239 ret = ((struct elf32_arm_link_hash_entry *)
3240 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3244 ret->dyn_relocs = NULL;
3245 ret->tls_type = GOT_UNKNOWN;
3246 ret->tlsdesc_got = (bfd_vma) -1;
3247 ret->plt.thumb_refcount = 0;
3248 ret->plt.maybe_thumb_refcount = 0;
3249 ret->plt.noncall_refcount = 0;
3250 ret->plt.got_offset = -1;
3251 ret->is_iplt = FALSE;
3252 ret->export_glue = NULL;
3254 ret->stub_cache = NULL;
3257 return (struct bfd_hash_entry *) ret;
3260 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3264 elf32_arm_allocate_local_sym_info (bfd *abfd)
3266 if (elf_local_got_refcounts (abfd) == NULL)
3268 bfd_size_type num_syms;
3272 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3273 size = num_syms * (sizeof (bfd_signed_vma)
3274 + sizeof (struct arm_local_iplt_info *)
3277 data = bfd_zalloc (abfd, size);
3281 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3282 data += num_syms * sizeof (bfd_signed_vma);
3284 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3285 data += num_syms * sizeof (struct arm_local_iplt_info *);
3287 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3288 data += num_syms * sizeof (bfd_vma);
3290 elf32_arm_local_got_tls_type (abfd) = data;
3295 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3296 to input bfd ABFD. Create the information if it doesn't already exist.
3297 Return null if an allocation fails. */
3299 static struct arm_local_iplt_info *
3300 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3302 struct arm_local_iplt_info **ptr;
3304 if (!elf32_arm_allocate_local_sym_info (abfd))
3307 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3308 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3310 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3314 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3315 in ABFD's symbol table. If the symbol is global, H points to its
3316 hash table entry, otherwise H is null.
3318 Return true if the symbol does have PLT information. When returning
3319 true, point *ROOT_PLT at the target-independent reference count/offset
3320 union and *ARM_PLT at the ARM-specific information. */
3323 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3324 unsigned long r_symndx, union gotplt_union **root_plt,
3325 struct arm_plt_info **arm_plt)
3327 struct arm_local_iplt_info *local_iplt;
3331 *root_plt = &h->root.plt;
3336 if (elf32_arm_local_iplt (abfd) == NULL)
3339 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3340 if (local_iplt == NULL)
3343 *root_plt = &local_iplt->root;
3344 *arm_plt = &local_iplt->arm;
3348 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3352 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3353 struct arm_plt_info *arm_plt)
3355 struct elf32_arm_link_hash_table *htab;
3357 htab = elf32_arm_hash_table (info);
3358 return (arm_plt->thumb_refcount != 0
3359 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3362 /* Return a pointer to the head of the dynamic reloc list that should
3363 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3364 ABFD's symbol table. Return null if an error occurs. */
3366 static struct elf_dyn_relocs **
3367 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3368 Elf_Internal_Sym *isym)
3370 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3372 struct arm_local_iplt_info *local_iplt;
3374 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3375 if (local_iplt == NULL)
3377 return &local_iplt->dyn_relocs;
3381 /* Track dynamic relocs needed for local syms too.
3382 We really need local syms available to do this
3387 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3391 vpp = &elf_section_data (s)->local_dynrel;
3392 return (struct elf_dyn_relocs **) vpp;
3396 /* Initialize an entry in the stub hash table. */
3398 static struct bfd_hash_entry *
3399 stub_hash_newfunc (struct bfd_hash_entry *entry,
3400 struct bfd_hash_table *table,
3403 /* Allocate the structure if it has not already been allocated by a
3407 entry = (struct bfd_hash_entry *)
3408 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3413 /* Call the allocation method of the superclass. */
3414 entry = bfd_hash_newfunc (entry, table, string);
3417 struct elf32_arm_stub_hash_entry *eh;
3419 /* Initialize the local fields. */
3420 eh = (struct elf32_arm_stub_hash_entry *) entry;
3421 eh->stub_sec = NULL;
3422 eh->stub_offset = 0;
3423 eh->source_value = 0;
3424 eh->target_value = 0;
3425 eh->target_section = NULL;
3427 eh->stub_type = arm_stub_none;
3429 eh->stub_template = NULL;
3430 eh->stub_template_size = 0;
3433 eh->output_name = NULL;
3439 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3440 shortcuts to them in our hash table. */
3443 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3445 struct elf32_arm_link_hash_table *htab;
3447 htab = elf32_arm_hash_table (info);
3451 /* BPABI objects never have a GOT, or associated sections. */
3452 if (htab->symbian_p)
3455 if (! _bfd_elf_create_got_section (dynobj, info))
3461 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3464 create_ifunc_sections (struct bfd_link_info *info)
3466 struct elf32_arm_link_hash_table *htab;
3467 const struct elf_backend_data *bed;
3472 htab = elf32_arm_hash_table (info);
3473 dynobj = htab->root.dynobj;
3474 bed = get_elf_backend_data (dynobj);
3475 flags = bed->dynamic_sec_flags;
3477 if (htab->root.iplt == NULL)
3479 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3480 flags | SEC_READONLY | SEC_CODE);
3482 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3484 htab->root.iplt = s;
3487 if (htab->root.irelplt == NULL)
3489 s = bfd_make_section_anyway_with_flags (dynobj,
3490 RELOC_SECTION (htab, ".iplt"),
3491 flags | SEC_READONLY);
3493 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3495 htab->root.irelplt = s;
3498 if (htab->root.igotplt == NULL)
3500 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3502 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3504 htab->root.igotplt = s;
3509 /* Determine if we're dealing with a Thumb only architecture. */
3512 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3515 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3516 Tag_CPU_arch_profile);
3519 return profile == 'M';
3521 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3523 /* Force return logic to be reviewed for each new architecture. */
3524 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3525 || arch == TAG_CPU_ARCH_V8M_BASE
3526 || arch == TAG_CPU_ARCH_V8M_MAIN);
3528 if (arch == TAG_CPU_ARCH_V6_M
3529 || arch == TAG_CPU_ARCH_V6S_M
3530 || arch == TAG_CPU_ARCH_V7E_M
3531 || arch == TAG_CPU_ARCH_V8M_BASE
3532 || arch == TAG_CPU_ARCH_V8M_MAIN)
3538 /* Determine if we're dealing with a Thumb-2 object. */
3541 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3544 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3548 return thumb_isa == 2;
3550 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3552 /* Force return logic to be reviewed for each new architecture. */
3553 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3554 || arch == TAG_CPU_ARCH_V8M_BASE
3555 || arch == TAG_CPU_ARCH_V8M_MAIN);
3557 return (arch == TAG_CPU_ARCH_V6T2
3558 || arch == TAG_CPU_ARCH_V7
3559 || arch == TAG_CPU_ARCH_V7E_M
3560 || arch == TAG_CPU_ARCH_V8
3561 || arch == TAG_CPU_ARCH_V8M_MAIN);
3564 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3565 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3569 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3571 struct elf32_arm_link_hash_table *htab;
3573 htab = elf32_arm_hash_table (info);
3577 if (!htab->root.sgot && !create_got_section (dynobj, info))
3580 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3583 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3584 if (!bfd_link_pic (info))
3585 htab->srelbss = bfd_get_linker_section (dynobj,
3586 RELOC_SECTION (htab, ".bss"));
3588 if (htab->vxworks_p)
3590 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3593 if (bfd_link_pic (info))
3595 htab->plt_header_size = 0;
3596 htab->plt_entry_size
3597 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3601 htab->plt_header_size
3602 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3603 htab->plt_entry_size
3604 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3607 if (elf_elfheader (dynobj))
3608 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3613 Test for thumb only architectures. Note - we cannot just call
3614 using_thumb_only() as the attributes in the output bfd have not been
3615 initialised at this point, so instead we use the input bfd. */
3616 bfd * saved_obfd = htab->obfd;
3618 htab->obfd = dynobj;
3619 if (using_thumb_only (htab))
3621 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3622 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3624 htab->obfd = saved_obfd;
3627 if (!htab->root.splt
3628 || !htab->root.srelplt
3630 || (!bfd_link_pic (info) && !htab->srelbss))
3636 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3639 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3640 struct elf_link_hash_entry *dir,
3641 struct elf_link_hash_entry *ind)
3643 struct elf32_arm_link_hash_entry *edir, *eind;
3645 edir = (struct elf32_arm_link_hash_entry *) dir;
3646 eind = (struct elf32_arm_link_hash_entry *) ind;
3648 if (eind->dyn_relocs != NULL)
3650 if (edir->dyn_relocs != NULL)
3652 struct elf_dyn_relocs **pp;
3653 struct elf_dyn_relocs *p;
3655 /* Add reloc counts against the indirect sym to the direct sym
3656 list. Merge any entries against the same section. */
3657 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3659 struct elf_dyn_relocs *q;
3661 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3662 if (q->sec == p->sec)
3664 q->pc_count += p->pc_count;
3665 q->count += p->count;
3672 *pp = edir->dyn_relocs;
3675 edir->dyn_relocs = eind->dyn_relocs;
3676 eind->dyn_relocs = NULL;
3679 if (ind->root.type == bfd_link_hash_indirect)
3681 /* Copy over PLT info. */
3682 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3683 eind->plt.thumb_refcount = 0;
3684 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3685 eind->plt.maybe_thumb_refcount = 0;
3686 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3687 eind->plt.noncall_refcount = 0;
3689 /* We should only allocate a function to .iplt once the final
3690 symbol information is known. */
3691 BFD_ASSERT (!eind->is_iplt);
3693 if (dir->got.refcount <= 0)
3695 edir->tls_type = eind->tls_type;
3696 eind->tls_type = GOT_UNKNOWN;
3700 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3703 /* Destroy an ARM elf linker hash table. */
3706 elf32_arm_link_hash_table_free (bfd *obfd)
3708 struct elf32_arm_link_hash_table *ret
3709 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
3711 bfd_hash_table_free (&ret->stub_hash_table);
3712 _bfd_elf_link_hash_table_free (obfd);
3715 /* Create an ARM elf linker hash table. */
3717 static struct bfd_link_hash_table *
3718 elf32_arm_link_hash_table_create (bfd *abfd)
3720 struct elf32_arm_link_hash_table *ret;
3721 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3723 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
3727 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3728 elf32_arm_link_hash_newfunc,
3729 sizeof (struct elf32_arm_link_hash_entry),
3736 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3737 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
3738 #ifdef FOUR_WORD_PLT
3739 ret->plt_header_size = 16;
3740 ret->plt_entry_size = 16;
3742 ret->plt_header_size = 20;
3743 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
3748 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3749 sizeof (struct elf32_arm_stub_hash_entry)))
3751 _bfd_elf_link_hash_table_free (abfd);
3754 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
3756 return &ret->root.root;
3759 /* Determine what kind of NOPs are available. */
3762 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3764 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3767 /* Force return logic to be reviewed for each new architecture. */
3768 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3769 || arch == TAG_CPU_ARCH_V8M_BASE
3770 || arch == TAG_CPU_ARCH_V8M_MAIN);
3772 return (arch == TAG_CPU_ARCH_V6T2
3773 || arch == TAG_CPU_ARCH_V6K
3774 || arch == TAG_CPU_ARCH_V7
3775 || arch == TAG_CPU_ARCH_V8);
3779 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3783 case arm_stub_long_branch_thumb_only:
3784 case arm_stub_long_branch_v4t_thumb_arm:
3785 case arm_stub_short_branch_v4t_thumb_arm:
3786 case arm_stub_long_branch_v4t_thumb_arm_pic:
3787 case arm_stub_long_branch_v4t_thumb_tls_pic:
3788 case arm_stub_long_branch_thumb_only_pic:
3799 /* Determine the type of stub needed, if any, for a call. */
3801 static enum elf32_arm_stub_type
3802 arm_type_of_stub (struct bfd_link_info *info,
3803 asection *input_sec,
3804 const Elf_Internal_Rela *rel,
3805 unsigned char st_type,
3806 enum arm_st_branch_type *actual_branch_type,
3807 struct elf32_arm_link_hash_entry *hash,
3808 bfd_vma destination,
3814 bfd_signed_vma branch_offset;
3815 unsigned int r_type;
3816 struct elf32_arm_link_hash_table * globals;
3819 enum elf32_arm_stub_type stub_type = arm_stub_none;
3821 enum arm_st_branch_type branch_type = *actual_branch_type;
3822 union gotplt_union *root_plt;
3823 struct arm_plt_info *arm_plt;
3825 if (branch_type == ST_BRANCH_LONG)
3828 globals = elf32_arm_hash_table (info);
3829 if (globals == NULL)
3832 thumb_only = using_thumb_only (globals);
3834 thumb2 = using_thumb2 (globals);
3836 /* Determine where the call point is. */
3837 location = (input_sec->output_offset
3838 + input_sec->output_section->vma
3841 r_type = ELF32_R_TYPE (rel->r_info);
3843 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3844 are considering a function call relocation. */
3845 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3846 || r_type == R_ARM_THM_JUMP19)
3847 && branch_type == ST_BRANCH_TO_ARM)
3848 branch_type = ST_BRANCH_TO_THUMB;
3850 /* For TLS call relocs, it is the caller's responsibility to provide
3851 the address of the appropriate trampoline. */
3852 if (r_type != R_ARM_TLS_CALL
3853 && r_type != R_ARM_THM_TLS_CALL
3854 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3855 &root_plt, &arm_plt)
3856 && root_plt->offset != (bfd_vma) -1)
3860 if (hash == NULL || hash->is_iplt)
3861 splt = globals->root.iplt;
3863 splt = globals->root.splt;
3868 /* Note when dealing with PLT entries: the main PLT stub is in
3869 ARM mode, so if the branch is in Thumb mode, another
3870 Thumb->ARM stub will be inserted later just before the ARM
3871 PLT stub. We don't take this extra distance into account
3872 here, because if a long branch stub is needed, we'll add a
3873 Thumb->Arm one and branch directly to the ARM PLT entry
3874 because it avoids spreading offset corrections in several
3877 destination = (splt->output_section->vma
3878 + splt->output_offset
3879 + root_plt->offset);
3881 branch_type = ST_BRANCH_TO_ARM;
3884 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3885 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3887 branch_offset = (bfd_signed_vma)(destination - location);
3889 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3890 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
3892 /* Handle cases where:
3893 - this call goes too far (different Thumb/Thumb2 max
3895 - it's a Thumb->Arm call and blx is not available, or it's a
3896 Thumb->Arm branch (not bl). A stub is needed in this case,
3897 but only if this call is not through a PLT entry. Indeed,
3898 PLT stubs handle mode switching already.
3901 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3902 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3904 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3905 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3907 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
3908 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
3909 && (r_type == R_ARM_THM_JUMP19))
3910 || (branch_type == ST_BRANCH_TO_ARM
3911 && (((r_type == R_ARM_THM_CALL
3912 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3913 || (r_type == R_ARM_THM_JUMP24)
3914 || (r_type == R_ARM_THM_JUMP19))
3917 if (branch_type == ST_BRANCH_TO_THUMB)
3919 /* Thumb to thumb. */
3922 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3924 ? ((globals->use_blx
3925 && (r_type == R_ARM_THM_CALL))
3926 /* V5T and above. Stub starts with ARM code, so
3927 we must be able to switch mode before
3928 reaching it, which is only possible for 'bl'
3929 (ie R_ARM_THM_CALL relocation). */
3930 ? arm_stub_long_branch_any_thumb_pic
3931 /* On V4T, use Thumb code only. */
3932 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3934 /* non-PIC stubs. */
3935 : ((globals->use_blx
3936 && (r_type == R_ARM_THM_CALL))
3937 /* V5T and above. */
3938 ? arm_stub_long_branch_any_any
3940 : arm_stub_long_branch_v4t_thumb_thumb);
3944 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3946 ? arm_stub_long_branch_thumb_only_pic
3948 : arm_stub_long_branch_thumb_only;
3955 && sym_sec->owner != NULL
3956 && !INTERWORK_FLAG (sym_sec->owner))
3958 (*_bfd_error_handler)
3959 (_("%B(%s): warning: interworking not enabled.\n"
3960 " first occurrence: %B: Thumb call to ARM"),
3961 sym_sec->owner, input_bfd, name);
3965 (bfd_link_pic (info) | globals->pic_veneer)
3967 ? (r_type == R_ARM_THM_TLS_CALL
3968 /* TLS PIC stubs. */
3969 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3970 : arm_stub_long_branch_v4t_thumb_tls_pic)
3971 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3972 /* V5T PIC and above. */
3973 ? arm_stub_long_branch_any_arm_pic
3975 : arm_stub_long_branch_v4t_thumb_arm_pic))
3977 /* non-PIC stubs. */
3978 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3979 /* V5T and above. */
3980 ? arm_stub_long_branch_any_any
3982 : arm_stub_long_branch_v4t_thumb_arm);
3984 /* Handle v4t short branches. */
3985 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3986 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3987 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3988 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3992 else if (r_type == R_ARM_CALL
3993 || r_type == R_ARM_JUMP24
3994 || r_type == R_ARM_PLT32
3995 || r_type == R_ARM_TLS_CALL)
3997 if (branch_type == ST_BRANCH_TO_THUMB)
4002 && sym_sec->owner != NULL
4003 && !INTERWORK_FLAG (sym_sec->owner))
4005 (*_bfd_error_handler)
4006 (_("%B(%s): warning: interworking not enabled.\n"
4007 " first occurrence: %B: ARM call to Thumb"),
4008 sym_sec->owner, input_bfd, name);
4011 /* We have an extra 2-bytes reach because of
4012 the mode change (bit 24 (H) of BLX encoding). */
4013 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4014 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4015 || (r_type == R_ARM_CALL && !globals->use_blx)
4016 || (r_type == R_ARM_JUMP24)
4017 || (r_type == R_ARM_PLT32))
4019 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4021 ? ((globals->use_blx)
4022 /* V5T and above. */
4023 ? arm_stub_long_branch_any_thumb_pic
4025 : arm_stub_long_branch_v4t_arm_thumb_pic)
4027 /* non-PIC stubs. */
4028 : ((globals->use_blx)
4029 /* V5T and above. */
4030 ? arm_stub_long_branch_any_any
4032 : arm_stub_long_branch_v4t_arm_thumb);
4038 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4039 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4042 (bfd_link_pic (info) | globals->pic_veneer)
4044 ? (r_type == R_ARM_TLS_CALL
4046 ? arm_stub_long_branch_any_tls_pic
4048 ? arm_stub_long_branch_arm_nacl_pic
4049 : arm_stub_long_branch_any_arm_pic))
4050 /* non-PIC stubs. */
4052 ? arm_stub_long_branch_arm_nacl
4053 : arm_stub_long_branch_any_any);
4058 /* If a stub is needed, record the actual destination type. */
4059 if (stub_type != arm_stub_none)
4060 *actual_branch_type = branch_type;
4065 /* Build a name for an entry in the stub hash table. */
4068 elf32_arm_stub_name (const asection *input_section,
4069 const asection *sym_sec,
4070 const struct elf32_arm_link_hash_entry *hash,
4071 const Elf_Internal_Rela *rel,
4072 enum elf32_arm_stub_type stub_type)
4079 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4080 stub_name = (char *) bfd_malloc (len);
4081 if (stub_name != NULL)
4082 sprintf (stub_name, "%08x_%s+%x_%d",
4083 input_section->id & 0xffffffff,
4084 hash->root.root.root.string,
4085 (int) rel->r_addend & 0xffffffff,
4090 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4091 stub_name = (char *) bfd_malloc (len);
4092 if (stub_name != NULL)
4093 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4094 input_section->id & 0xffffffff,
4095 sym_sec->id & 0xffffffff,
4096 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4097 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4098 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4099 (int) rel->r_addend & 0xffffffff,
4106 /* Look up an entry in the stub hash. Stub entries are cached because
4107 creating the stub name takes a bit of time. */
4109 static struct elf32_arm_stub_hash_entry *
4110 elf32_arm_get_stub_entry (const asection *input_section,
4111 const asection *sym_sec,
4112 struct elf_link_hash_entry *hash,
4113 const Elf_Internal_Rela *rel,
4114 struct elf32_arm_link_hash_table *htab,
4115 enum elf32_arm_stub_type stub_type)
4117 struct elf32_arm_stub_hash_entry *stub_entry;
4118 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4119 const asection *id_sec;
4121 if ((input_section->flags & SEC_CODE) == 0)
4124 /* If this input section is part of a group of sections sharing one
4125 stub section, then use the id of the first section in the group.
4126 Stub names need to include a section id, as there may well be
4127 more than one stub used to reach say, printf, and we need to
4128 distinguish between them. */
4129 id_sec = htab->stub_group[input_section->id].link_sec;
4131 if (h != NULL && h->stub_cache != NULL
4132 && h->stub_cache->h == h
4133 && h->stub_cache->id_sec == id_sec
4134 && h->stub_cache->stub_type == stub_type)
4136 stub_entry = h->stub_cache;
4142 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4143 if (stub_name == NULL)
4146 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4147 stub_name, FALSE, FALSE);
4149 h->stub_cache = stub_entry;
4157 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4161 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4163 if (stub_type >= max_stub_type)
4164 abort (); /* Should be unreachable. */
4169 /* Required alignment (as a power of 2) for the dedicated section holding
4170 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4171 with input sections. */
4174 arm_dedicated_stub_output_section_required_alignment
4175 (enum elf32_arm_stub_type stub_type)
4177 if (stub_type >= max_stub_type)
4178 abort (); /* Should be unreachable. */
4180 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4184 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4185 NULL if veneers of this type are interspersed with input sections. */
4188 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4190 if (stub_type >= max_stub_type)
4191 abort (); /* Should be unreachable. */
4193 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4197 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4198 returns the address of the hash table field in HTAB holding a pointer to the
4199 corresponding input section. Otherwise, returns NULL. */
4202 arm_dedicated_stub_input_section_ptr
4203 (struct elf32_arm_link_hash_table *htab ATTRIBUTE_UNUSED,
4204 enum elf32_arm_stub_type stub_type)
4206 if (stub_type >= max_stub_type)
4207 abort (); /* Should be unreachable. */
4209 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4213 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4214 is the section that branch into veneer and can be NULL if stub should go in
4215 a dedicated output section. Returns a pointer to the stub section, and the
4216 section to which the stub section will be attached (in *LINK_SEC_P).
4217 LINK_SEC_P may be NULL. */
4220 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4221 struct elf32_arm_link_hash_table *htab,
4222 enum elf32_arm_stub_type stub_type)
4224 asection *link_sec, *out_sec, **stub_sec_p;
4225 const char *stub_sec_prefix;
4226 bfd_boolean dedicated_output_section =
4227 arm_dedicated_stub_output_section_required (stub_type);
4230 if (dedicated_output_section)
4232 bfd *output_bfd = htab->obfd;
4233 const char *out_sec_name =
4234 arm_dedicated_stub_output_section_name (stub_type);
4236 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4237 stub_sec_prefix = out_sec_name;
4238 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4239 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4240 if (out_sec == NULL)
4242 (*_bfd_error_handler) (_("No address assigned to the veneers output "
4243 "section %s"), out_sec_name);
4249 link_sec = htab->stub_group[section->id].link_sec;
4250 BFD_ASSERT (link_sec != NULL);
4251 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4252 if (*stub_sec_p == NULL)
4253 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4254 stub_sec_prefix = link_sec->name;
4255 out_sec = link_sec->output_section;
4256 align = htab->nacl_p ? 4 : 3;
4259 if (*stub_sec_p == NULL)
4265 namelen = strlen (stub_sec_prefix);
4266 len = namelen + sizeof (STUB_SUFFIX);
4267 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4271 memcpy (s_name, stub_sec_prefix, namelen);
4272 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4273 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4275 if (*stub_sec_p == NULL)
4278 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4279 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4283 if (!dedicated_output_section)
4284 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4287 *link_sec_p = link_sec;
4292 /* Add a new stub entry to the stub hash. Not all fields of the new
4293 stub entry are initialised. */
4295 static struct elf32_arm_stub_hash_entry *
4296 elf32_arm_add_stub (const char *stub_name, asection *section,
4297 struct elf32_arm_link_hash_table *htab,
4298 enum elf32_arm_stub_type stub_type)
4302 struct elf32_arm_stub_hash_entry *stub_entry;
4304 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4306 if (stub_sec == NULL)
4309 /* Enter this entry into the linker stub hash table. */
4310 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4312 if (stub_entry == NULL)
4314 if (section == NULL)
4316 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4322 stub_entry->stub_sec = stub_sec;
4323 stub_entry->stub_offset = 0;
4324 stub_entry->id_sec = link_sec;
4329 /* Store an Arm insn into an output section not processed by
4330 elf32_arm_write_section. */
4333 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4334 bfd * output_bfd, bfd_vma val, void * ptr)
4336 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4337 bfd_putl32 (val, ptr);
4339 bfd_putb32 (val, ptr);
4342 /* Store a 16-bit Thumb insn into an output section not processed by
4343 elf32_arm_write_section. */
4346 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4347 bfd * output_bfd, bfd_vma val, void * ptr)
4349 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4350 bfd_putl16 (val, ptr);
4352 bfd_putb16 (val, ptr);
4355 /* Store a Thumb2 insn into an output section not processed by
4356 elf32_arm_write_section. */
4359 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4360 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4362 /* T2 instructions are 16-bit streamed. */
4363 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4365 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4366 bfd_putl16 ((val & 0xffff), ptr + 2);
4370 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4371 bfd_putb16 ((val & 0xffff), ptr + 2);
4375 /* If it's possible to change R_TYPE to a more efficient access
4376 model, return the new reloc type. */
4379 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4380 struct elf_link_hash_entry *h)
4382 int is_local = (h == NULL);
4384 if (bfd_link_pic (info)
4385 || (h && h->root.type == bfd_link_hash_undefweak))
4388 /* We do not support relaxations for Old TLS models. */
4391 case R_ARM_TLS_GOTDESC:
4392 case R_ARM_TLS_CALL:
4393 case R_ARM_THM_TLS_CALL:
4394 case R_ARM_TLS_DESCSEQ:
4395 case R_ARM_THM_TLS_DESCSEQ:
4396 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4402 static bfd_reloc_status_type elf32_arm_final_link_relocate
4403 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4404 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4405 const char *, unsigned char, enum arm_st_branch_type,
4406 struct elf_link_hash_entry *, bfd_boolean *, char **);
4409 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4413 case arm_stub_a8_veneer_b_cond:
4414 case arm_stub_a8_veneer_b:
4415 case arm_stub_a8_veneer_bl:
4418 case arm_stub_long_branch_any_any:
4419 case arm_stub_long_branch_v4t_arm_thumb:
4420 case arm_stub_long_branch_thumb_only:
4421 case arm_stub_long_branch_v4t_thumb_thumb:
4422 case arm_stub_long_branch_v4t_thumb_arm:
4423 case arm_stub_short_branch_v4t_thumb_arm:
4424 case arm_stub_long_branch_any_arm_pic:
4425 case arm_stub_long_branch_any_thumb_pic:
4426 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4427 case arm_stub_long_branch_v4t_arm_thumb_pic:
4428 case arm_stub_long_branch_v4t_thumb_arm_pic:
4429 case arm_stub_long_branch_thumb_only_pic:
4430 case arm_stub_long_branch_any_tls_pic:
4431 case arm_stub_long_branch_v4t_thumb_tls_pic:
4432 case arm_stub_a8_veneer_blx:
4435 case arm_stub_long_branch_arm_nacl:
4436 case arm_stub_long_branch_arm_nacl_pic:
4440 abort (); /* Should be unreachable. */
4444 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4445 veneering (TRUE) or have their own symbol (FALSE). */
4448 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4450 if (stub_type >= max_stub_type)
4451 abort (); /* Should be unreachable. */
4456 /* Returns the padding needed for the dedicated section used stubs of type
4460 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4462 if (stub_type >= max_stub_type)
4463 abort (); /* Should be unreachable. */
4469 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4473 struct elf32_arm_stub_hash_entry *stub_entry;
4474 struct elf32_arm_link_hash_table *globals;
4475 struct bfd_link_info *info;
4482 const insn_sequence *template_sequence;
4484 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4485 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4488 /* Massage our args to the form they really have. */
4489 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4490 info = (struct bfd_link_info *) in_arg;
4492 globals = elf32_arm_hash_table (info);
4493 if (globals == NULL)
4496 stub_sec = stub_entry->stub_sec;
4498 if ((globals->fix_cortex_a8 < 0)
4499 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4500 /* We have to do less-strictly-aligned fixes last. */
4503 /* Make a note of the offset within the stubs for this entry. */
4504 stub_entry->stub_offset = stub_sec->size;
4505 loc = stub_sec->contents + stub_entry->stub_offset;
4507 stub_bfd = stub_sec->owner;
4509 /* This is the address of the stub destination. */
4510 sym_value = (stub_entry->target_value
4511 + stub_entry->target_section->output_offset
4512 + stub_entry->target_section->output_section->vma);
4514 template_sequence = stub_entry->stub_template;
4515 template_size = stub_entry->stub_template_size;
4518 for (i = 0; i < template_size; i++)
4520 switch (template_sequence[i].type)
4524 bfd_vma data = (bfd_vma) template_sequence[i].data;
4525 if (template_sequence[i].reloc_addend != 0)
4527 /* We've borrowed the reloc_addend field to mean we should
4528 insert a condition code into this (Thumb-1 branch)
4529 instruction. See THUMB16_BCOND_INSN. */
4530 BFD_ASSERT ((data & 0xff00) == 0xd000);
4531 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4533 bfd_put_16 (stub_bfd, data, loc + size);
4539 bfd_put_16 (stub_bfd,
4540 (template_sequence[i].data >> 16) & 0xffff,
4542 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4544 if (template_sequence[i].r_type != R_ARM_NONE)
4546 stub_reloc_idx[nrelocs] = i;
4547 stub_reloc_offset[nrelocs++] = size;
4553 bfd_put_32 (stub_bfd, template_sequence[i].data,
4555 /* Handle cases where the target is encoded within the
4557 if (template_sequence[i].r_type == R_ARM_JUMP24)
4559 stub_reloc_idx[nrelocs] = i;
4560 stub_reloc_offset[nrelocs++] = size;
4566 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4567 stub_reloc_idx[nrelocs] = i;
4568 stub_reloc_offset[nrelocs++] = size;
4578 stub_sec->size += size;
4580 /* Stub size has already been computed in arm_size_one_stub. Check
4582 BFD_ASSERT (size == stub_entry->stub_size);
4584 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4585 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4588 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4590 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4592 for (i = 0; i < nrelocs; i++)
4594 Elf_Internal_Rela rel;
4595 bfd_boolean unresolved_reloc;
4596 char *error_message;
4598 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
4600 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4601 rel.r_info = ELF32_R_INFO (0,
4602 template_sequence[stub_reloc_idx[i]].r_type);
4605 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4606 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4607 template should refer back to the instruction after the original
4608 branch. We use target_section as Cortex-A8 erratum workaround stubs
4609 are only generated when both source and target are in the same
4611 points_to = stub_entry->target_section->output_section->vma
4612 + stub_entry->target_section->output_offset
4613 + stub_entry->source_value;
4615 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4616 (template_sequence[stub_reloc_idx[i]].r_type),
4617 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4618 points_to, info, stub_entry->target_section, "", STT_FUNC,
4619 stub_entry->branch_type,
4620 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4628 /* Calculate the template, template size and instruction size for a stub.
4629 Return value is the instruction size. */
4632 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4633 const insn_sequence **stub_template,
4634 int *stub_template_size)
4636 const insn_sequence *template_sequence = NULL;
4637 int template_size = 0, i;
4640 template_sequence = stub_definitions[stub_type].template_sequence;
4642 *stub_template = template_sequence;
4644 template_size = stub_definitions[stub_type].template_size;
4645 if (stub_template_size)
4646 *stub_template_size = template_size;
4649 for (i = 0; i < template_size; i++)
4651 switch (template_sequence[i].type)
4672 /* As above, but don't actually build the stub. Just bump offset so
4673 we know stub section sizes. */
4676 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4677 void *in_arg ATTRIBUTE_UNUSED)
4679 struct elf32_arm_stub_hash_entry *stub_entry;
4680 const insn_sequence *template_sequence;
4681 int template_size, size;
4683 /* Massage our args to the form they really have. */
4684 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4686 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4687 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4689 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4692 stub_entry->stub_size = size;
4693 stub_entry->stub_template = template_sequence;
4694 stub_entry->stub_template_size = template_size;
4696 size = (size + 7) & ~7;
4697 stub_entry->stub_sec->size += size;
4702 /* External entry points for sizing and building linker stubs. */
4704 /* Set up various things so that we can make a list of input sections
4705 for each output section included in the link. Returns -1 on error,
4706 0 when no stubs will be needed, and 1 on success. */
4709 elf32_arm_setup_section_lists (bfd *output_bfd,
4710 struct bfd_link_info *info)
4713 unsigned int bfd_count;
4714 unsigned int top_id, top_index;
4716 asection **input_list, **list;
4718 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4722 if (! is_elf_hash_table (htab))
4725 /* Count the number of input BFDs and find the top input section id. */
4726 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4728 input_bfd = input_bfd->link.next)
4731 for (section = input_bfd->sections;
4733 section = section->next)
4735 if (top_id < section->id)
4736 top_id = section->id;
4739 htab->bfd_count = bfd_count;
4741 amt = sizeof (struct map_stub) * (top_id + 1);
4742 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4743 if (htab->stub_group == NULL)
4745 htab->top_id = top_id;
4747 /* We can't use output_bfd->section_count here to find the top output
4748 section index as some sections may have been removed, and
4749 _bfd_strip_section_from_output doesn't renumber the indices. */
4750 for (section = output_bfd->sections, top_index = 0;
4752 section = section->next)
4754 if (top_index < section->index)
4755 top_index = section->index;
4758 htab->top_index = top_index;
4759 amt = sizeof (asection *) * (top_index + 1);
4760 input_list = (asection **) bfd_malloc (amt);
4761 htab->input_list = input_list;
4762 if (input_list == NULL)
4765 /* For sections we aren't interested in, mark their entries with a
4766 value we can check later. */
4767 list = input_list + top_index;
4769 *list = bfd_abs_section_ptr;
4770 while (list-- != input_list);
4772 for (section = output_bfd->sections;
4774 section = section->next)
4776 if ((section->flags & SEC_CODE) != 0)
4777 input_list[section->index] = NULL;
4783 /* The linker repeatedly calls this function for each input section,
4784 in the order that input sections are linked into output sections.
4785 Build lists of input sections to determine groupings between which
4786 we may insert linker stubs. */
4789 elf32_arm_next_input_section (struct bfd_link_info *info,
4792 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4797 if (isec->output_section->index <= htab->top_index)
4799 asection **list = htab->input_list + isec->output_section->index;
4801 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4803 /* Steal the link_sec pointer for our list. */
4804 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4805 /* This happens to make the list in reverse order,
4806 which we reverse later. */
4807 PREV_SEC (isec) = *list;
4813 /* See whether we can group stub sections together. Grouping stub
4814 sections may result in fewer stubs. More importantly, we need to
4815 put all .init* and .fini* stubs at the end of the .init or
4816 .fini output sections respectively, because glibc splits the
4817 _init and _fini functions into multiple parts. Putting a stub in
4818 the middle of a function is not a good idea. */
4821 group_sections (struct elf32_arm_link_hash_table *htab,
4822 bfd_size_type stub_group_size,
4823 bfd_boolean stubs_always_after_branch)
4825 asection **list = htab->input_list;
4829 asection *tail = *list;
4832 if (tail == bfd_abs_section_ptr)
4835 /* Reverse the list: we must avoid placing stubs at the
4836 beginning of the section because the beginning of the text
4837 section may be required for an interrupt vector in bare metal
4839 #define NEXT_SEC PREV_SEC
4841 while (tail != NULL)
4843 /* Pop from tail. */
4844 asection *item = tail;
4845 tail = PREV_SEC (item);
4848 NEXT_SEC (item) = head;
4852 while (head != NULL)
4856 bfd_vma stub_group_start = head->output_offset;
4857 bfd_vma end_of_next;
4860 while (NEXT_SEC (curr) != NULL)
4862 next = NEXT_SEC (curr);
4863 end_of_next = next->output_offset + next->size;
4864 if (end_of_next - stub_group_start >= stub_group_size)
4865 /* End of NEXT is too far from start, so stop. */
4867 /* Add NEXT to the group. */
4871 /* OK, the size from the start to the start of CURR is less
4872 than stub_group_size and thus can be handled by one stub
4873 section. (Or the head section is itself larger than
4874 stub_group_size, in which case we may be toast.)
4875 We should really be keeping track of the total size of
4876 stubs added here, as stubs contribute to the final output
4880 next = NEXT_SEC (head);
4881 /* Set up this stub group. */
4882 htab->stub_group[head->id].link_sec = curr;
4884 while (head != curr && (head = next) != NULL);
4886 /* But wait, there's more! Input sections up to stub_group_size
4887 bytes after the stub section can be handled by it too. */
4888 if (!stubs_always_after_branch)
4890 stub_group_start = curr->output_offset + curr->size;
4892 while (next != NULL)
4894 end_of_next = next->output_offset + next->size;
4895 if (end_of_next - stub_group_start >= stub_group_size)
4896 /* End of NEXT is too far from stubs, so stop. */
4898 /* Add NEXT to the stub group. */
4900 next = NEXT_SEC (head);
4901 htab->stub_group[head->id].link_sec = curr;
4907 while (list++ != htab->input_list + htab->top_index);
4909 free (htab->input_list);
4914 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4918 a8_reloc_compare (const void *a, const void *b)
4920 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4921 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4923 if (ra->from < rb->from)
4925 else if (ra->from > rb->from)
4931 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4932 const char *, char **);
4934 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4935 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4936 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4940 cortex_a8_erratum_scan (bfd *input_bfd,
4941 struct bfd_link_info *info,
4942 struct a8_erratum_fix **a8_fixes_p,
4943 unsigned int *num_a8_fixes_p,
4944 unsigned int *a8_fix_table_size_p,
4945 struct a8_erratum_reloc *a8_relocs,
4946 unsigned int num_a8_relocs,
4947 unsigned prev_num_a8_fixes,
4948 bfd_boolean *stub_changed_p)
4951 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4952 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4953 unsigned int num_a8_fixes = *num_a8_fixes_p;
4954 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4959 for (section = input_bfd->sections;
4961 section = section->next)
4963 bfd_byte *contents = NULL;
4964 struct _arm_elf_section_data *sec_data;
4968 if (elf_section_type (section) != SHT_PROGBITS
4969 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4970 || (section->flags & SEC_EXCLUDE) != 0
4971 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4972 || (section->output_section == bfd_abs_section_ptr))
4975 base_vma = section->output_section->vma + section->output_offset;
4977 if (elf_section_data (section)->this_hdr.contents != NULL)
4978 contents = elf_section_data (section)->this_hdr.contents;
4979 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4982 sec_data = elf32_arm_section_data (section);
4984 for (span = 0; span < sec_data->mapcount; span++)
4986 unsigned int span_start = sec_data->map[span].vma;
4987 unsigned int span_end = (span == sec_data->mapcount - 1)
4988 ? section->size : sec_data->map[span + 1].vma;
4990 char span_type = sec_data->map[span].type;
4991 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4993 if (span_type != 't')
4996 /* Span is entirely within a single 4KB region: skip scanning. */
4997 if (((base_vma + span_start) & ~0xfff)
4998 == ((base_vma + span_end) & ~0xfff))
5001 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5003 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5004 * The branch target is in the same 4KB region as the
5005 first half of the branch.
5006 * The instruction before the branch is a 32-bit
5007 length non-branch instruction. */
5008 for (i = span_start; i < span_end;)
5010 unsigned int insn = bfd_getl16 (&contents[i]);
5011 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5012 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5014 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5019 /* Load the rest of the insn (in manual-friendly order). */
5020 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5022 /* Encoding T4: B<c>.W. */
5023 is_b = (insn & 0xf800d000) == 0xf0009000;
5024 /* Encoding T1: BL<c>.W. */
5025 is_bl = (insn & 0xf800d000) == 0xf000d000;
5026 /* Encoding T2: BLX<c>.W. */
5027 is_blx = (insn & 0xf800d000) == 0xf000c000;
5028 /* Encoding T3: B<c>.W (not permitted in IT block). */
5029 is_bcc = (insn & 0xf800d000) == 0xf0008000
5030 && (insn & 0x07f00000) != 0x03800000;
5033 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5035 if (((base_vma + i) & 0xfff) == 0xffe
5039 && ! last_was_branch)
5041 bfd_signed_vma offset = 0;
5042 bfd_boolean force_target_arm = FALSE;
5043 bfd_boolean force_target_thumb = FALSE;
5045 enum elf32_arm_stub_type stub_type = arm_stub_none;
5046 struct a8_erratum_reloc key, *found;
5047 bfd_boolean use_plt = FALSE;
5049 key.from = base_vma + i;
5050 found = (struct a8_erratum_reloc *)
5051 bsearch (&key, a8_relocs, num_a8_relocs,
5052 sizeof (struct a8_erratum_reloc),
5057 char *error_message = NULL;
5058 struct elf_link_hash_entry *entry;
5060 /* We don't care about the error returned from this
5061 function, only if there is glue or not. */
5062 entry = find_thumb_glue (info, found->sym_name,
5066 found->non_a8_stub = TRUE;
5068 /* Keep a simpler condition, for the sake of clarity. */
5069 if (htab->root.splt != NULL && found->hash != NULL
5070 && found->hash->root.plt.offset != (bfd_vma) -1)
5073 if (found->r_type == R_ARM_THM_CALL)
5075 if (found->branch_type == ST_BRANCH_TO_ARM
5077 force_target_arm = TRUE;
5079 force_target_thumb = TRUE;
5083 /* Check if we have an offending branch instruction. */
5085 if (found && found->non_a8_stub)
5086 /* We've already made a stub for this instruction, e.g.
5087 it's a long branch or a Thumb->ARM stub. Assume that
5088 stub will suffice to work around the A8 erratum (see
5089 setting of always_after_branch above). */
5093 offset = (insn & 0x7ff) << 1;
5094 offset |= (insn & 0x3f0000) >> 4;
5095 offset |= (insn & 0x2000) ? 0x40000 : 0;
5096 offset |= (insn & 0x800) ? 0x80000 : 0;
5097 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5098 if (offset & 0x100000)
5099 offset |= ~ ((bfd_signed_vma) 0xfffff);
5100 stub_type = arm_stub_a8_veneer_b_cond;
5102 else if (is_b || is_bl || is_blx)
5104 int s = (insn & 0x4000000) != 0;
5105 int j1 = (insn & 0x2000) != 0;
5106 int j2 = (insn & 0x800) != 0;
5110 offset = (insn & 0x7ff) << 1;
5111 offset |= (insn & 0x3ff0000) >> 4;
5115 if (offset & 0x1000000)
5116 offset |= ~ ((bfd_signed_vma) 0xffffff);
5119 offset &= ~ ((bfd_signed_vma) 3);
5121 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5122 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5125 if (stub_type != arm_stub_none)
5127 bfd_vma pc_for_insn = base_vma + i + 4;
5129 /* The original instruction is a BL, but the target is
5130 an ARM instruction. If we were not making a stub,
5131 the BL would have been converted to a BLX. Use the
5132 BLX stub instead in that case. */
5133 if (htab->use_blx && force_target_arm
5134 && stub_type == arm_stub_a8_veneer_bl)
5136 stub_type = arm_stub_a8_veneer_blx;
5140 /* Conversely, if the original instruction was
5141 BLX but the target is Thumb mode, use the BL
5143 else if (force_target_thumb
5144 && stub_type == arm_stub_a8_veneer_blx)
5146 stub_type = arm_stub_a8_veneer_bl;
5152 pc_for_insn &= ~ ((bfd_vma) 3);
5154 /* If we found a relocation, use the proper destination,
5155 not the offset in the (unrelocated) instruction.
5156 Note this is always done if we switched the stub type
5160 (bfd_signed_vma) (found->destination - pc_for_insn);
5162 /* If the stub will use a Thumb-mode branch to a
5163 PLT target, redirect it to the preceding Thumb
5165 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5166 offset -= PLT_THUMB_STUB_SIZE;
5168 target = pc_for_insn + offset;
5170 /* The BLX stub is ARM-mode code. Adjust the offset to
5171 take the different PC value (+8 instead of +4) into
5173 if (stub_type == arm_stub_a8_veneer_blx)
5176 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5178 char *stub_name = NULL;
5180 if (num_a8_fixes == a8_fix_table_size)
5182 a8_fix_table_size *= 2;
5183 a8_fixes = (struct a8_erratum_fix *)
5184 bfd_realloc (a8_fixes,
5185 sizeof (struct a8_erratum_fix)
5186 * a8_fix_table_size);
5189 if (num_a8_fixes < prev_num_a8_fixes)
5191 /* If we're doing a subsequent scan,
5192 check if we've found the same fix as
5193 before, and try and reuse the stub
5195 stub_name = a8_fixes[num_a8_fixes].stub_name;
5196 if ((a8_fixes[num_a8_fixes].section != section)
5197 || (a8_fixes[num_a8_fixes].offset != i))
5201 *stub_changed_p = TRUE;
5207 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5208 if (stub_name != NULL)
5209 sprintf (stub_name, "%x:%x", section->id, i);
5212 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5213 a8_fixes[num_a8_fixes].section = section;
5214 a8_fixes[num_a8_fixes].offset = i;
5215 a8_fixes[num_a8_fixes].target_offset =
5217 a8_fixes[num_a8_fixes].orig_insn = insn;
5218 a8_fixes[num_a8_fixes].stub_name = stub_name;
5219 a8_fixes[num_a8_fixes].stub_type = stub_type;
5220 a8_fixes[num_a8_fixes].branch_type =
5221 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5228 i += insn_32bit ? 4 : 2;
5229 last_was_32bit = insn_32bit;
5230 last_was_branch = is_32bit_branch;
5234 if (elf_section_data (section)->this_hdr.contents == NULL)
5238 *a8_fixes_p = a8_fixes;
5239 *num_a8_fixes_p = num_a8_fixes;
5240 *a8_fix_table_size_p = a8_fix_table_size;
5245 /* Create or update a stub entry depending on whether the stub can already be
5246 found in HTAB. The stub is identified by:
5247 - its type STUB_TYPE
5248 - its source branch (note that several can share the same stub) whose
5249 section and relocation (if any) are given by SECTION and IRELA
5251 - its target symbol whose input section, hash, name, value and branch type
5252 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5255 If found, the value of the stub's target symbol is updated from SYM_VALUE
5256 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5257 TRUE and the stub entry is initialized.
5259 Returns whether the stub could be successfully created or updated, or FALSE
5260 if an error occured. */
5263 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5264 enum elf32_arm_stub_type stub_type, asection *section,
5265 Elf_Internal_Rela *irela, asection *sym_sec,
5266 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5267 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5268 bfd_boolean *new_stub)
5270 const asection *id_sec;
5272 struct elf32_arm_stub_hash_entry *stub_entry;
5273 unsigned int r_type;
5274 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5276 BFD_ASSERT (stub_type != arm_stub_none);
5280 stub_name = sym_name;
5284 BFD_ASSERT (section);
5286 /* Support for grouping stub sections. */
5287 id_sec = htab->stub_group[section->id].link_sec;
5289 /* Get the name of this stub. */
5290 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5296 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5298 /* The proper stub has already been created, just update its value. */
5299 if (stub_entry != NULL)
5303 stub_entry->target_value = sym_value;
5307 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5308 if (stub_entry == NULL)
5315 stub_entry->target_value = sym_value;
5316 stub_entry->target_section = sym_sec;
5317 stub_entry->stub_type = stub_type;
5318 stub_entry->h = hash;
5319 stub_entry->branch_type = branch_type;
5322 stub_entry->output_name = sym_name;
5325 if (sym_name == NULL)
5326 sym_name = "unnamed";
5327 stub_entry->output_name = (char *)
5328 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5329 + strlen (sym_name));
5330 if (stub_entry->output_name == NULL)
5336 /* For historical reasons, use the existing names for ARM-to-Thumb and
5337 Thumb-to-ARM stubs. */
5338 r_type = ELF32_R_TYPE (irela->r_info);
5339 if ((r_type == (unsigned int) R_ARM_THM_CALL
5340 || r_type == (unsigned int) R_ARM_THM_JUMP24
5341 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5342 && branch_type == ST_BRANCH_TO_ARM)
5343 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5344 else if ((r_type == (unsigned int) R_ARM_CALL
5345 || r_type == (unsigned int) R_ARM_JUMP24)
5346 && branch_type == ST_BRANCH_TO_THUMB)
5347 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5349 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5356 /* Determine and set the size of the stub section for a final link.
5358 The basic idea here is to examine all the relocations looking for
5359 PC-relative calls to a target that is unreachable with a "bl"
5363 elf32_arm_size_stubs (bfd *output_bfd,
5365 struct bfd_link_info *info,
5366 bfd_signed_vma group_size,
5367 asection * (*add_stub_section) (const char *, asection *,
5370 void (*layout_sections_again) (void))
5372 bfd_size_type stub_group_size;
5373 bfd_boolean stubs_always_after_branch;
5374 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5375 struct a8_erratum_fix *a8_fixes = NULL;
5376 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
5377 struct a8_erratum_reloc *a8_relocs = NULL;
5378 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
5383 if (htab->fix_cortex_a8)
5385 a8_fixes = (struct a8_erratum_fix *)
5386 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
5387 a8_relocs = (struct a8_erratum_reloc *)
5388 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
5391 /* Propagate mach to stub bfd, because it may not have been
5392 finalized when we created stub_bfd. */
5393 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
5394 bfd_get_mach (output_bfd));
5396 /* Stash our params away. */
5397 htab->stub_bfd = stub_bfd;
5398 htab->add_stub_section = add_stub_section;
5399 htab->layout_sections_again = layout_sections_again;
5400 stubs_always_after_branch = group_size < 0;
5402 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
5403 as the first half of a 32-bit branch straddling two 4K pages. This is a
5404 crude way of enforcing that. */
5405 if (htab->fix_cortex_a8)
5406 stubs_always_after_branch = 1;
5409 stub_group_size = -group_size;
5411 stub_group_size = group_size;
5413 if (stub_group_size == 1)
5415 /* Default values. */
5416 /* Thumb branch range is +-4MB has to be used as the default
5417 maximum size (a given section can contain both ARM and Thumb
5418 code, so the worst case has to be taken into account).
5420 This value is 24K less than that, which allows for 2025
5421 12-byte stubs. If we exceed that, then we will fail to link.
5422 The user will have to relink with an explicit group size
5424 stub_group_size = 4170000;
5427 group_sections (htab, stub_group_size, stubs_always_after_branch);
5429 /* If we're applying the cortex A8 fix, we need to determine the
5430 program header size now, because we cannot change it later --
5431 that could alter section placements. Notice the A8 erratum fix
5432 ends up requiring the section addresses to remain unchanged
5433 modulo the page size. That's something we cannot represent
5434 inside BFD, and we don't want to force the section alignment to
5435 be the page size. */
5436 if (htab->fix_cortex_a8)
5437 (*htab->layout_sections_again) ();
5442 unsigned int bfd_indx;
5444 enum elf32_arm_stub_type stub_type;
5445 bfd_boolean stub_changed = FALSE;
5446 unsigned prev_num_a8_fixes = num_a8_fixes;
5449 for (input_bfd = info->input_bfds, bfd_indx = 0;
5451 input_bfd = input_bfd->link.next, bfd_indx++)
5453 Elf_Internal_Shdr *symtab_hdr;
5455 Elf_Internal_Sym *local_syms = NULL;
5457 if (!is_arm_elf (input_bfd))
5462 /* We'll need the symbol table in a second. */
5463 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5464 if (symtab_hdr->sh_info == 0)
5467 /* Walk over each section attached to the input bfd. */
5468 for (section = input_bfd->sections;
5470 section = section->next)
5472 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5474 /* If there aren't any relocs, then there's nothing more
5476 if ((section->flags & SEC_RELOC) == 0
5477 || section->reloc_count == 0
5478 || (section->flags & SEC_CODE) == 0)
5481 /* If this section is a link-once section that will be
5482 discarded, then don't create any stubs. */
5483 if (section->output_section == NULL
5484 || section->output_section->owner != output_bfd)
5487 /* Get the relocs. */
5489 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5490 NULL, info->keep_memory);
5491 if (internal_relocs == NULL)
5492 goto error_ret_free_local;
5494 /* Now examine each relocation. */
5495 irela = internal_relocs;
5496 irelaend = irela + section->reloc_count;
5497 for (; irela < irelaend; irela++)
5499 unsigned int r_type, r_indx;
5502 bfd_vma destination;
5503 struct elf32_arm_link_hash_entry *hash;
5504 const char *sym_name;
5505 unsigned char st_type;
5506 enum arm_st_branch_type branch_type;
5507 bfd_boolean created_stub = FALSE;
5509 r_type = ELF32_R_TYPE (irela->r_info);
5510 r_indx = ELF32_R_SYM (irela->r_info);
5512 if (r_type >= (unsigned int) R_ARM_max)
5514 bfd_set_error (bfd_error_bad_value);
5515 error_ret_free_internal:
5516 if (elf_section_data (section)->relocs == NULL)
5517 free (internal_relocs);
5519 error_ret_free_local:
5520 if (local_syms != NULL
5521 && (symtab_hdr->contents
5522 != (unsigned char *) local_syms))
5528 if (r_indx >= symtab_hdr->sh_info)
5529 hash = elf32_arm_hash_entry
5530 (elf_sym_hashes (input_bfd)
5531 [r_indx - symtab_hdr->sh_info]);
5533 /* Only look for stubs on branch instructions, or
5534 non-relaxed TLSCALL */
5535 if ((r_type != (unsigned int) R_ARM_CALL)
5536 && (r_type != (unsigned int) R_ARM_THM_CALL)
5537 && (r_type != (unsigned int) R_ARM_JUMP24)
5538 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5539 && (r_type != (unsigned int) R_ARM_THM_XPC22)
5540 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5541 && (r_type != (unsigned int) R_ARM_PLT32)
5542 && !((r_type == (unsigned int) R_ARM_TLS_CALL
5543 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5544 && r_type == elf32_arm_tls_transition
5545 (info, r_type, &hash->root)
5546 && ((hash ? hash->tls_type
5547 : (elf32_arm_local_got_tls_type
5548 (input_bfd)[r_indx]))
5549 & GOT_TLS_GDESC) != 0))
5552 /* Now determine the call target, its name, value,
5559 if (r_type == (unsigned int) R_ARM_TLS_CALL
5560 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5562 /* A non-relaxed TLS call. The target is the
5563 plt-resident trampoline and nothing to do
5565 BFD_ASSERT (htab->tls_trampoline > 0);
5566 sym_sec = htab->root.splt;
5567 sym_value = htab->tls_trampoline;
5570 branch_type = ST_BRANCH_TO_ARM;
5574 /* It's a local symbol. */
5575 Elf_Internal_Sym *sym;
5577 if (local_syms == NULL)
5580 = (Elf_Internal_Sym *) symtab_hdr->contents;
5581 if (local_syms == NULL)
5583 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5584 symtab_hdr->sh_info, 0,
5586 if (local_syms == NULL)
5587 goto error_ret_free_internal;
5590 sym = local_syms + r_indx;
5591 if (sym->st_shndx == SHN_UNDEF)
5592 sym_sec = bfd_und_section_ptr;
5593 else if (sym->st_shndx == SHN_ABS)
5594 sym_sec = bfd_abs_section_ptr;
5595 else if (sym->st_shndx == SHN_COMMON)
5596 sym_sec = bfd_com_section_ptr;
5599 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5602 /* This is an undefined symbol. It can never
5606 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5607 sym_value = sym->st_value;
5608 destination = (sym_value + irela->r_addend
5609 + sym_sec->output_offset
5610 + sym_sec->output_section->vma);
5611 st_type = ELF_ST_TYPE (sym->st_info);
5613 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
5615 = bfd_elf_string_from_elf_section (input_bfd,
5616 symtab_hdr->sh_link,
5621 /* It's an external symbol. */
5622 while (hash->root.root.type == bfd_link_hash_indirect
5623 || hash->root.root.type == bfd_link_hash_warning)
5624 hash = ((struct elf32_arm_link_hash_entry *)
5625 hash->root.root.u.i.link);
5627 if (hash->root.root.type == bfd_link_hash_defined
5628 || hash->root.root.type == bfd_link_hash_defweak)
5630 sym_sec = hash->root.root.u.def.section;
5631 sym_value = hash->root.root.u.def.value;
5633 struct elf32_arm_link_hash_table *globals =
5634 elf32_arm_hash_table (info);
5636 /* For a destination in a shared library,
5637 use the PLT stub as target address to
5638 decide whether a branch stub is
5641 && globals->root.splt != NULL
5643 && hash->root.plt.offset != (bfd_vma) -1)
5645 sym_sec = globals->root.splt;
5646 sym_value = hash->root.plt.offset;
5647 if (sym_sec->output_section != NULL)
5648 destination = (sym_value
5649 + sym_sec->output_offset
5650 + sym_sec->output_section->vma);
5652 else if (sym_sec->output_section != NULL)
5653 destination = (sym_value + irela->r_addend
5654 + sym_sec->output_offset
5655 + sym_sec->output_section->vma);
5657 else if ((hash->root.root.type == bfd_link_hash_undefined)
5658 || (hash->root.root.type == bfd_link_hash_undefweak))
5660 /* For a shared library, use the PLT stub as
5661 target address to decide whether a long
5662 branch stub is needed.
5663 For absolute code, they cannot be handled. */
5664 struct elf32_arm_link_hash_table *globals =
5665 elf32_arm_hash_table (info);
5668 && globals->root.splt != NULL
5670 && hash->root.plt.offset != (bfd_vma) -1)
5672 sym_sec = globals->root.splt;
5673 sym_value = hash->root.plt.offset;
5674 if (sym_sec->output_section != NULL)
5675 destination = (sym_value
5676 + sym_sec->output_offset
5677 + sym_sec->output_section->vma);
5684 bfd_set_error (bfd_error_bad_value);
5685 goto error_ret_free_internal;
5687 st_type = hash->root.type;
5689 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
5690 sym_name = hash->root.root.root.string;
5695 bfd_boolean new_stub;
5697 /* Determine what (if any) linker stub is needed. */
5698 stub_type = arm_type_of_stub (info, section, irela,
5699 st_type, &branch_type,
5700 hash, destination, sym_sec,
5701 input_bfd, sym_name);
5702 if (stub_type == arm_stub_none)
5705 /* We've either created a stub for this reloc already,
5706 or we are about to. */
5708 elf32_arm_create_stub (htab, stub_type, section, irela,
5710 (char *) sym_name, sym_value,
5711 branch_type, &new_stub);
5714 goto error_ret_free_internal;
5718 stub_changed = TRUE;
5722 /* Look for relocations which might trigger Cortex-A8
5724 if (htab->fix_cortex_a8
5725 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5726 || r_type == (unsigned int) R_ARM_THM_JUMP19
5727 || r_type == (unsigned int) R_ARM_THM_CALL
5728 || r_type == (unsigned int) R_ARM_THM_XPC22))
5730 bfd_vma from = section->output_section->vma
5731 + section->output_offset
5734 if ((from & 0xfff) == 0xffe)
5736 /* Found a candidate. Note we haven't checked the
5737 destination is within 4K here: if we do so (and
5738 don't create an entry in a8_relocs) we can't tell
5739 that a branch should have been relocated when
5741 if (num_a8_relocs == a8_reloc_table_size)
5743 a8_reloc_table_size *= 2;
5744 a8_relocs = (struct a8_erratum_reloc *)
5745 bfd_realloc (a8_relocs,
5746 sizeof (struct a8_erratum_reloc)
5747 * a8_reloc_table_size);
5750 a8_relocs[num_a8_relocs].from = from;
5751 a8_relocs[num_a8_relocs].destination = destination;
5752 a8_relocs[num_a8_relocs].r_type = r_type;
5753 a8_relocs[num_a8_relocs].branch_type = branch_type;
5754 a8_relocs[num_a8_relocs].sym_name = sym_name;
5755 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5756 a8_relocs[num_a8_relocs].hash = hash;
5763 /* We're done with the internal relocs, free them. */
5764 if (elf_section_data (section)->relocs == NULL)
5765 free (internal_relocs);
5768 if (htab->fix_cortex_a8)
5770 /* Sort relocs which might apply to Cortex-A8 erratum. */
5771 qsort (a8_relocs, num_a8_relocs,
5772 sizeof (struct a8_erratum_reloc),
5775 /* Scan for branches which might trigger Cortex-A8 erratum. */
5776 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5777 &num_a8_fixes, &a8_fix_table_size,
5778 a8_relocs, num_a8_relocs,
5779 prev_num_a8_fixes, &stub_changed)
5781 goto error_ret_free_local;
5784 if (local_syms != NULL
5785 && symtab_hdr->contents != (unsigned char *) local_syms)
5787 if (!info->keep_memory)
5790 symtab_hdr->contents = (unsigned char *) local_syms;
5794 if (prev_num_a8_fixes != num_a8_fixes)
5795 stub_changed = TRUE;
5800 /* OK, we've added some stubs. Find out the new size of the
5802 for (stub_sec = htab->stub_bfd->sections;
5804 stub_sec = stub_sec->next)
5806 /* Ignore non-stub sections. */
5807 if (!strstr (stub_sec->name, STUB_SUFFIX))
5813 /* Compute stub section size, considering padding. */
5814 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5815 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
5819 asection **stub_sec_p;
5821 padding = arm_dedicated_stub_section_padding (stub_type);
5822 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
5823 /* Skip if no stub input section or no stub section padding
5825 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
5827 /* Stub section padding required but no dedicated section. */
5828 BFD_ASSERT (stub_sec_p);
5830 size = (*stub_sec_p)->size;
5831 size = (size + padding - 1) & ~(padding - 1);
5832 (*stub_sec_p)->size = size;
5835 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5836 if (htab->fix_cortex_a8)
5837 for (i = 0; i < num_a8_fixes; i++)
5839 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5840 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
5842 if (stub_sec == NULL)
5846 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5851 /* Ask the linker to do its stuff. */
5852 (*htab->layout_sections_again) ();
5855 /* Add stubs for Cortex-A8 erratum fixes now. */
5856 if (htab->fix_cortex_a8)
5858 for (i = 0; i < num_a8_fixes; i++)
5860 struct elf32_arm_stub_hash_entry *stub_entry;
5861 char *stub_name = a8_fixes[i].stub_name;
5862 asection *section = a8_fixes[i].section;
5863 unsigned int section_id = a8_fixes[i].section->id;
5864 asection *link_sec = htab->stub_group[section_id].link_sec;
5865 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5866 const insn_sequence *template_sequence;
5867 int template_size, size = 0;
5869 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5871 if (stub_entry == NULL)
5873 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5879 stub_entry->stub_sec = stub_sec;
5880 stub_entry->stub_offset = 0;
5881 stub_entry->id_sec = link_sec;
5882 stub_entry->stub_type = a8_fixes[i].stub_type;
5883 stub_entry->source_value = a8_fixes[i].offset;
5884 stub_entry->target_section = a8_fixes[i].section;
5885 stub_entry->target_value = a8_fixes[i].target_offset;
5886 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5887 stub_entry->branch_type = a8_fixes[i].branch_type;
5889 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5893 stub_entry->stub_size = size;
5894 stub_entry->stub_template = template_sequence;
5895 stub_entry->stub_template_size = template_size;
5898 /* Stash the Cortex-A8 erratum fix array for use later in
5899 elf32_arm_write_section(). */
5900 htab->a8_erratum_fixes = a8_fixes;
5901 htab->num_a8_erratum_fixes = num_a8_fixes;
5905 htab->a8_erratum_fixes = NULL;
5906 htab->num_a8_erratum_fixes = 0;
5911 /* Build all the stubs associated with the current output file. The
5912 stubs are kept in a hash table attached to the main linker hash
5913 table. We also set up the .plt entries for statically linked PIC
5914 functions here. This function is called via arm_elf_finish in the
5918 elf32_arm_build_stubs (struct bfd_link_info *info)
5921 struct bfd_hash_table *table;
5922 struct elf32_arm_link_hash_table *htab;
5924 htab = elf32_arm_hash_table (info);
5928 for (stub_sec = htab->stub_bfd->sections;
5930 stub_sec = stub_sec->next)
5934 /* Ignore non-stub sections. */
5935 if (!strstr (stub_sec->name, STUB_SUFFIX))
5938 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
5939 must at least be done for stub section requiring padding. */
5940 size = stub_sec->size;
5941 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5942 if (stub_sec->contents == NULL && size != 0)
5947 /* Build the stubs as directed by the stub hash table. */
5948 table = &htab->stub_hash_table;
5949 bfd_hash_traverse (table, arm_build_one_stub, info);
5950 if (htab->fix_cortex_a8)
5952 /* Place the cortex a8 stubs last. */
5953 htab->fix_cortex_a8 = -1;
5954 bfd_hash_traverse (table, arm_build_one_stub, info);
5960 /* Locate the Thumb encoded calling stub for NAME. */
5962 static struct elf_link_hash_entry *
5963 find_thumb_glue (struct bfd_link_info *link_info,
5965 char **error_message)
5968 struct elf_link_hash_entry *hash;
5969 struct elf32_arm_link_hash_table *hash_table;
5971 /* We need a pointer to the armelf specific hash table. */
5972 hash_table = elf32_arm_hash_table (link_info);
5973 if (hash_table == NULL)
5976 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5977 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
5979 BFD_ASSERT (tmp_name);
5981 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
5983 hash = elf_link_hash_lookup
5984 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5987 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
5988 tmp_name, name) == -1)
5989 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5996 /* Locate the ARM encoded calling stub for NAME. */
5998 static struct elf_link_hash_entry *
5999 find_arm_glue (struct bfd_link_info *link_info,
6001 char **error_message)
6004 struct elf_link_hash_entry *myh;
6005 struct elf32_arm_link_hash_table *hash_table;
6007 /* We need a pointer to the elfarm specific hash table. */
6008 hash_table = elf32_arm_hash_table (link_info);
6009 if (hash_table == NULL)
6012 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6013 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6015 BFD_ASSERT (tmp_name);
6017 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6019 myh = elf_link_hash_lookup
6020 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
6023 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
6024 tmp_name, name) == -1)
6025 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
6032 /* ARM->Thumb glue (static images):
6036 ldr r12, __func_addr
6039 .word func @ behave as if you saw a ARM_32 reloc.
6046 .word func @ behave as if you saw a ARM_32 reloc.
6048 (relocatable images)
6051 ldr r12, __func_offset
6057 #define ARM2THUMB_STATIC_GLUE_SIZE 12
6058 static const insn32 a2t1_ldr_insn = 0xe59fc000;
6059 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
6060 static const insn32 a2t3_func_addr_insn = 0x00000001;
6062 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
6063 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
6064 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
6066 #define ARM2THUMB_PIC_GLUE_SIZE 16
6067 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
6068 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
6069 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
6071 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
6075 __func_from_thumb: __func_from_thumb:
6077 nop ldr r6, __func_addr
6087 #define THUMB2ARM_GLUE_SIZE 8
6088 static const insn16 t2a1_bx_pc_insn = 0x4778;
6089 static const insn16 t2a2_noop_insn = 0x46c0;
6090 static const insn32 t2a3_b_insn = 0xea000000;
6092 #define VFP11_ERRATUM_VENEER_SIZE 8
6093 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
6094 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
6096 #define ARM_BX_VENEER_SIZE 12
6097 static const insn32 armbx1_tst_insn = 0xe3100001;
6098 static const insn32 armbx2_moveq_insn = 0x01a0f000;
6099 static const insn32 armbx3_bx_insn = 0xe12fff10;
6101 #ifndef ELFARM_NABI_C_INCLUDED
6103 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
6106 bfd_byte * contents;
6110 /* Do not include empty glue sections in the output. */
6113 s = bfd_get_linker_section (abfd, name);
6115 s->flags |= SEC_EXCLUDE;
6120 BFD_ASSERT (abfd != NULL);
6122 s = bfd_get_linker_section (abfd, name);
6123 BFD_ASSERT (s != NULL);
6125 contents = (bfd_byte *) bfd_alloc (abfd, size);
6127 BFD_ASSERT (s->size == size);
6128 s->contents = contents;
6132 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
6134 struct elf32_arm_link_hash_table * globals;
6136 globals = elf32_arm_hash_table (info);
6137 BFD_ASSERT (globals != NULL);
6139 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6140 globals->arm_glue_size,
6141 ARM2THUMB_GLUE_SECTION_NAME);
6143 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6144 globals->thumb_glue_size,
6145 THUMB2ARM_GLUE_SECTION_NAME);
6147 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6148 globals->vfp11_erratum_glue_size,
6149 VFP11_ERRATUM_VENEER_SECTION_NAME);
6151 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6152 globals->stm32l4xx_erratum_glue_size,
6153 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6155 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6156 globals->bx_glue_size,
6157 ARM_BX_GLUE_SECTION_NAME);
6162 /* Allocate space and symbols for calling a Thumb function from Arm mode.
6163 returns the symbol identifying the stub. */
6165 static struct elf_link_hash_entry *
6166 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
6167 struct elf_link_hash_entry * h)
6169 const char * name = h->root.root.string;
6172 struct elf_link_hash_entry * myh;
6173 struct bfd_link_hash_entry * bh;
6174 struct elf32_arm_link_hash_table * globals;
6178 globals = elf32_arm_hash_table (link_info);
6179 BFD_ASSERT (globals != NULL);
6180 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6182 s = bfd_get_linker_section
6183 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
6185 BFD_ASSERT (s != NULL);
6187 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6188 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6190 BFD_ASSERT (tmp_name);
6192 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6194 myh = elf_link_hash_lookup
6195 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6199 /* We've already seen this guy. */
6204 /* The only trick here is using hash_table->arm_glue_size as the value.
6205 Even though the section isn't allocated yet, this is where we will be
6206 putting it. The +1 on the value marks that the stub has not been
6207 output yet - not that it is a Thumb function. */
6209 val = globals->arm_glue_size + 1;
6210 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6211 tmp_name, BSF_GLOBAL, s, val,
6212 NULL, TRUE, FALSE, &bh);
6214 myh = (struct elf_link_hash_entry *) bh;
6215 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6216 myh->forced_local = 1;
6220 if (bfd_link_pic (link_info)
6221 || globals->root.is_relocatable_executable
6222 || globals->pic_veneer)
6223 size = ARM2THUMB_PIC_GLUE_SIZE;
6224 else if (globals->use_blx)
6225 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
6227 size = ARM2THUMB_STATIC_GLUE_SIZE;
6230 globals->arm_glue_size += size;
6235 /* Allocate space for ARMv4 BX veneers. */
6238 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
6241 struct elf32_arm_link_hash_table *globals;
6243 struct elf_link_hash_entry *myh;
6244 struct bfd_link_hash_entry *bh;
6247 /* BX PC does not need a veneer. */
6251 globals = elf32_arm_hash_table (link_info);
6252 BFD_ASSERT (globals != NULL);
6253 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6255 /* Check if this veneer has already been allocated. */
6256 if (globals->bx_glue_offset[reg])
6259 s = bfd_get_linker_section
6260 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
6262 BFD_ASSERT (s != NULL);
6264 /* Add symbol for veneer. */
6266 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
6268 BFD_ASSERT (tmp_name);
6270 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
6272 myh = elf_link_hash_lookup
6273 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
6275 BFD_ASSERT (myh == NULL);
6278 val = globals->bx_glue_size;
6279 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6280 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6281 NULL, TRUE, FALSE, &bh);
6283 myh = (struct elf_link_hash_entry *) bh;
6284 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6285 myh->forced_local = 1;
6287 s->size += ARM_BX_VENEER_SIZE;
6288 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
6289 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
6293 /* Add an entry to the code/data map for section SEC. */
6296 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
6298 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6299 unsigned int newidx;
6301 if (sec_data->map == NULL)
6303 sec_data->map = (elf32_arm_section_map *)
6304 bfd_malloc (sizeof (elf32_arm_section_map));
6305 sec_data->mapcount = 0;
6306 sec_data->mapsize = 1;
6309 newidx = sec_data->mapcount++;
6311 if (sec_data->mapcount > sec_data->mapsize)
6313 sec_data->mapsize *= 2;
6314 sec_data->map = (elf32_arm_section_map *)
6315 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
6316 * sizeof (elf32_arm_section_map));
6321 sec_data->map[newidx].vma = vma;
6322 sec_data->map[newidx].type = type;
6327 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
6328 veneers are handled for now. */
6331 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
6332 elf32_vfp11_erratum_list *branch,
6334 asection *branch_sec,
6335 unsigned int offset)
6338 struct elf32_arm_link_hash_table *hash_table;
6340 struct elf_link_hash_entry *myh;
6341 struct bfd_link_hash_entry *bh;
6343 struct _arm_elf_section_data *sec_data;
6344 elf32_vfp11_erratum_list *newerr;
6346 hash_table = elf32_arm_hash_table (link_info);
6347 BFD_ASSERT (hash_table != NULL);
6348 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6350 s = bfd_get_linker_section
6351 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
6353 sec_data = elf32_arm_section_data (s);
6355 BFD_ASSERT (s != NULL);
6357 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6358 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6360 BFD_ASSERT (tmp_name);
6362 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6363 hash_table->num_vfp11_fixes);
6365 myh = elf_link_hash_lookup
6366 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6368 BFD_ASSERT (myh == NULL);
6371 val = hash_table->vfp11_erratum_glue_size;
6372 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6373 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6374 NULL, TRUE, FALSE, &bh);
6376 myh = (struct elf_link_hash_entry *) bh;
6377 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6378 myh->forced_local = 1;
6380 /* Link veneer back to calling location. */
6381 sec_data->erratumcount += 1;
6382 newerr = (elf32_vfp11_erratum_list *)
6383 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6385 newerr->type = VFP11_ERRATUM_ARM_VENEER;
6387 newerr->u.v.branch = branch;
6388 newerr->u.v.id = hash_table->num_vfp11_fixes;
6389 branch->u.b.veneer = newerr;
6391 newerr->next = sec_data->erratumlist;
6392 sec_data->erratumlist = newerr;
6394 /* A symbol for the return from the veneer. */
6395 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6396 hash_table->num_vfp11_fixes);
6398 myh = elf_link_hash_lookup
6399 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6406 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6407 branch_sec, val, NULL, TRUE, FALSE, &bh);
6409 myh = (struct elf_link_hash_entry *) bh;
6410 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6411 myh->forced_local = 1;
6415 /* Generate a mapping symbol for the veneer section, and explicitly add an
6416 entry for that symbol to the code/data map for the section. */
6417 if (hash_table->vfp11_erratum_glue_size == 0)
6420 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
6421 ever requires this erratum fix. */
6422 _bfd_generic_link_add_one_symbol (link_info,
6423 hash_table->bfd_of_glue_owner, "$a",
6424 BSF_LOCAL, s, 0, NULL,
6427 myh = (struct elf_link_hash_entry *) bh;
6428 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6429 myh->forced_local = 1;
6431 /* The elf32_arm_init_maps function only cares about symbols from input
6432 BFDs. We must make a note of this generated mapping symbol
6433 ourselves so that code byteswapping works properly in
6434 elf32_arm_write_section. */
6435 elf32_arm_section_map_add (s, 'a', 0);
6438 s->size += VFP11_ERRATUM_VENEER_SIZE;
6439 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
6440 hash_table->num_vfp11_fixes++;
6442 /* The offset of the veneer. */
6446 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
6447 veneers need to be handled because used only in Cortex-M. */
6450 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
6451 elf32_stm32l4xx_erratum_list *branch,
6453 asection *branch_sec,
6454 unsigned int offset,
6455 bfd_size_type veneer_size)
6458 struct elf32_arm_link_hash_table *hash_table;
6460 struct elf_link_hash_entry *myh;
6461 struct bfd_link_hash_entry *bh;
6463 struct _arm_elf_section_data *sec_data;
6464 elf32_stm32l4xx_erratum_list *newerr;
6466 hash_table = elf32_arm_hash_table (link_info);
6467 BFD_ASSERT (hash_table != NULL);
6468 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6470 s = bfd_get_linker_section
6471 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6473 BFD_ASSERT (s != NULL);
6475 sec_data = elf32_arm_section_data (s);
6477 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6478 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
6480 BFD_ASSERT (tmp_name);
6482 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
6483 hash_table->num_stm32l4xx_fixes);
6485 myh = elf_link_hash_lookup
6486 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6488 BFD_ASSERT (myh == NULL);
6491 val = hash_table->stm32l4xx_erratum_glue_size;
6492 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6493 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6494 NULL, TRUE, FALSE, &bh);
6496 myh = (struct elf_link_hash_entry *) bh;
6497 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6498 myh->forced_local = 1;
6500 /* Link veneer back to calling location. */
6501 sec_data->stm32l4xx_erratumcount += 1;
6502 newerr = (elf32_stm32l4xx_erratum_list *)
6503 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
6505 newerr->type = STM32L4XX_ERRATUM_VENEER;
6507 newerr->u.v.branch = branch;
6508 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
6509 branch->u.b.veneer = newerr;
6511 newerr->next = sec_data->stm32l4xx_erratumlist;
6512 sec_data->stm32l4xx_erratumlist = newerr;
6514 /* A symbol for the return from the veneer. */
6515 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
6516 hash_table->num_stm32l4xx_fixes);
6518 myh = elf_link_hash_lookup
6519 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6526 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6527 branch_sec, val, NULL, TRUE, FALSE, &bh);
6529 myh = (struct elf_link_hash_entry *) bh;
6530 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6531 myh->forced_local = 1;
6535 /* Generate a mapping symbol for the veneer section, and explicitly add an
6536 entry for that symbol to the code/data map for the section. */
6537 if (hash_table->stm32l4xx_erratum_glue_size == 0)
6540 /* Creates a THUMB symbol since there is no other choice. */
6541 _bfd_generic_link_add_one_symbol (link_info,
6542 hash_table->bfd_of_glue_owner, "$t",
6543 BSF_LOCAL, s, 0, NULL,
6546 myh = (struct elf_link_hash_entry *) bh;
6547 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6548 myh->forced_local = 1;
6550 /* The elf32_arm_init_maps function only cares about symbols from input
6551 BFDs. We must make a note of this generated mapping symbol
6552 ourselves so that code byteswapping works properly in
6553 elf32_arm_write_section. */
6554 elf32_arm_section_map_add (s, 't', 0);
6557 s->size += veneer_size;
6558 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
6559 hash_table->num_stm32l4xx_fixes++;
6561 /* The offset of the veneer. */
6565 #define ARM_GLUE_SECTION_FLAGS \
6566 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
6567 | SEC_READONLY | SEC_LINKER_CREATED)
6569 /* Create a fake section for use by the ARM backend of the linker. */
6572 arm_make_glue_section (bfd * abfd, const char * name)
6576 sec = bfd_get_linker_section (abfd, name);
6581 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6584 || !bfd_set_section_alignment (abfd, sec, 2))
6587 /* Set the gc mark to prevent the section from being removed by garbage
6588 collection, despite the fact that no relocs refer to this section. */
6594 /* Set size of .plt entries. This function is called from the
6595 linker scripts in ld/emultempl/{armelf}.em. */
6598 bfd_elf32_arm_use_long_plt (void)
6600 elf32_arm_use_long_plt_entry = TRUE;
6603 /* Add the glue sections to ABFD. This function is called from the
6604 linker scripts in ld/emultempl/{armelf}.em. */
6607 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6608 struct bfd_link_info *info)
6610 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
6611 bfd_boolean dostm32l4xx = globals
6612 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
6613 bfd_boolean addglue;
6615 /* If we are only performing a partial
6616 link do not bother adding the glue. */
6617 if (bfd_link_relocatable (info))
6620 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6621 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6622 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6623 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6629 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6632 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
6633 ensures they are not marked for deletion by
6634 strip_excluded_output_sections () when veneers are going to be created
6635 later. Not doing so would trigger assert on empty section size in
6636 lang_size_sections_1 (). */
6639 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
6641 enum elf32_arm_stub_type stub_type;
6643 /* If we are only performing a partial
6644 link do not bother adding the glue. */
6645 if (bfd_link_relocatable (info))
6648 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
6651 const char *out_sec_name;
6653 if (!arm_dedicated_stub_output_section_required (stub_type))
6656 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
6657 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
6658 if (out_sec != NULL)
6659 out_sec->flags |= SEC_KEEP;
6663 /* Select a BFD to be used to hold the sections used by the glue code.
6664 This function is called from the linker scripts in ld/emultempl/
6668 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6670 struct elf32_arm_link_hash_table *globals;
6672 /* If we are only performing a partial link
6673 do not bother getting a bfd to hold the glue. */
6674 if (bfd_link_relocatable (info))
6677 /* Make sure we don't attach the glue sections to a dynamic object. */
6678 BFD_ASSERT (!(abfd->flags & DYNAMIC));
6680 globals = elf32_arm_hash_table (info);
6681 BFD_ASSERT (globals != NULL);
6683 if (globals->bfd_of_glue_owner != NULL)
6686 /* Save the bfd for later use. */
6687 globals->bfd_of_glue_owner = abfd;
6693 check_use_blx (struct elf32_arm_link_hash_table *globals)
6697 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6700 if (globals->fix_arm1176)
6702 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6703 globals->use_blx = 1;
6707 if (cpu_arch > TAG_CPU_ARCH_V4T)
6708 globals->use_blx = 1;
6713 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6714 struct bfd_link_info *link_info)
6716 Elf_Internal_Shdr *symtab_hdr;
6717 Elf_Internal_Rela *internal_relocs = NULL;
6718 Elf_Internal_Rela *irel, *irelend;
6719 bfd_byte *contents = NULL;
6722 struct elf32_arm_link_hash_table *globals;
6724 /* If we are only performing a partial link do not bother
6725 to construct any glue. */
6726 if (bfd_link_relocatable (link_info))
6729 /* Here we have a bfd that is to be included on the link. We have a
6730 hook to do reloc rummaging, before section sizes are nailed down. */
6731 globals = elf32_arm_hash_table (link_info);
6732 BFD_ASSERT (globals != NULL);
6734 check_use_blx (globals);
6736 if (globals->byteswap_code && !bfd_big_endian (abfd))
6738 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6743 /* PR 5398: If we have not decided to include any loadable sections in
6744 the output then we will not have a glue owner bfd. This is OK, it
6745 just means that there is nothing else for us to do here. */
6746 if (globals->bfd_of_glue_owner == NULL)
6749 /* Rummage around all the relocs and map the glue vectors. */
6750 sec = abfd->sections;
6755 for (; sec != NULL; sec = sec->next)
6757 if (sec->reloc_count == 0)
6760 if ((sec->flags & SEC_EXCLUDE) != 0)
6763 symtab_hdr = & elf_symtab_hdr (abfd);
6765 /* Load the relocs. */
6767 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6769 if (internal_relocs == NULL)
6772 irelend = internal_relocs + sec->reloc_count;
6773 for (irel = internal_relocs; irel < irelend; irel++)
6776 unsigned long r_index;
6778 struct elf_link_hash_entry *h;
6780 r_type = ELF32_R_TYPE (irel->r_info);
6781 r_index = ELF32_R_SYM (irel->r_info);
6783 /* These are the only relocation types we care about. */
6784 if ( r_type != R_ARM_PC24
6785 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6788 /* Get the section contents if we haven't done so already. */
6789 if (contents == NULL)
6791 /* Get cached copy if it exists. */
6792 if (elf_section_data (sec)->this_hdr.contents != NULL)
6793 contents = elf_section_data (sec)->this_hdr.contents;
6796 /* Go get them off disk. */
6797 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6802 if (r_type == R_ARM_V4BX)
6806 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6807 record_arm_bx_glue (link_info, reg);
6811 /* If the relocation is not against a symbol it cannot concern us. */
6814 /* We don't care about local symbols. */
6815 if (r_index < symtab_hdr->sh_info)
6818 /* This is an external symbol. */
6819 r_index -= symtab_hdr->sh_info;
6820 h = (struct elf_link_hash_entry *)
6821 elf_sym_hashes (abfd)[r_index];
6823 /* If the relocation is against a static symbol it must be within
6824 the current section and so cannot be a cross ARM/Thumb relocation. */
6828 /* If the call will go through a PLT entry then we do not need
6830 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6836 /* This one is a call from arm code. We need to look up
6837 the target of the call. If it is a thumb target, we
6839 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
6840 == ST_BRANCH_TO_THUMB)
6841 record_arm_to_thumb_glue (link_info, h);
6849 if (contents != NULL
6850 && elf_section_data (sec)->this_hdr.contents != contents)
6854 if (internal_relocs != NULL
6855 && elf_section_data (sec)->relocs != internal_relocs)
6856 free (internal_relocs);
6857 internal_relocs = NULL;
6863 if (contents != NULL
6864 && elf_section_data (sec)->this_hdr.contents != contents)
6866 if (internal_relocs != NULL
6867 && elf_section_data (sec)->relocs != internal_relocs)
6868 free (internal_relocs);
6875 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6878 bfd_elf32_arm_init_maps (bfd *abfd)
6880 Elf_Internal_Sym *isymbuf;
6881 Elf_Internal_Shdr *hdr;
6882 unsigned int i, localsyms;
6884 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6885 if (! is_arm_elf (abfd))
6888 if ((abfd->flags & DYNAMIC) != 0)
6891 hdr = & elf_symtab_hdr (abfd);
6892 localsyms = hdr->sh_info;
6894 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6895 should contain the number of local symbols, which should come before any
6896 global symbols. Mapping symbols are always local. */
6897 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6900 /* No internal symbols read? Skip this BFD. */
6901 if (isymbuf == NULL)
6904 for (i = 0; i < localsyms; i++)
6906 Elf_Internal_Sym *isym = &isymbuf[i];
6907 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6911 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6913 name = bfd_elf_string_from_elf_section (abfd,
6914 hdr->sh_link, isym->st_name);
6916 if (bfd_is_arm_special_symbol_name (name,
6917 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6918 elf32_arm_section_map_add (sec, name[1], isym->st_value);
6924 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6925 say what they wanted. */
6928 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6930 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6931 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6933 if (globals == NULL)
6936 if (globals->fix_cortex_a8 == -1)
6938 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6939 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6940 && (out_attr[Tag_CPU_arch_profile].i == 'A'
6941 || out_attr[Tag_CPU_arch_profile].i == 0))
6942 globals->fix_cortex_a8 = 1;
6944 globals->fix_cortex_a8 = 0;
6950 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6952 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6953 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6955 if (globals == NULL)
6957 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6958 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6960 switch (globals->vfp11_fix)
6962 case BFD_ARM_VFP11_FIX_DEFAULT:
6963 case BFD_ARM_VFP11_FIX_NONE:
6964 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6968 /* Give a warning, but do as the user requests anyway. */
6969 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6970 "workaround is not necessary for target architecture"), obfd);
6973 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
6974 /* For earlier architectures, we might need the workaround, but do not
6975 enable it by default. If users is running with broken hardware, they
6976 must enable the erratum fix explicitly. */
6977 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6981 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
6983 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6984 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6986 if (globals == NULL)
6989 /* We assume only Cortex-M4 may require the fix. */
6990 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
6991 || out_attr[Tag_CPU_arch_profile].i != 'M')
6993 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
6994 /* Give a warning, but do as the user requests anyway. */
6995 (*_bfd_error_handler)
6996 (_("%B: warning: selected STM32L4XX erratum "
6997 "workaround is not necessary for target architecture"), obfd);
7001 enum bfd_arm_vfp11_pipe
7009 /* Return a VFP register number. This is encoded as RX:X for single-precision
7010 registers, or X:RX for double-precision registers, where RX is the group of
7011 four bits in the instruction encoding and X is the single extension bit.
7012 RX and X fields are specified using their lowest (starting) bit. The return
7015 0...31: single-precision registers s0...s31
7016 32...63: double-precision registers d0...d31.
7018 Although X should be zero for VFP11 (encoding d0...d15 only), we might
7019 encounter VFP3 instructions, so we allow the full range for DP registers. */
7022 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
7026 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
7028 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
7031 /* Set bits in *WMASK according to a register number REG as encoded by
7032 bfd_arm_vfp11_regno(). Ignore d16-d31. */
7035 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
7040 *wmask |= 3 << ((reg - 32) * 2);
7043 /* Return TRUE if WMASK overwrites anything in REGS. */
7046 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
7050 for (i = 0; i < numregs; i++)
7052 unsigned int reg = regs[i];
7054 if (reg < 32 && (wmask & (1 << reg)) != 0)
7062 if ((wmask & (3 << (reg * 2))) != 0)
7069 /* In this function, we're interested in two things: finding input registers
7070 for VFP data-processing instructions, and finding the set of registers which
7071 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
7072 hold the written set, so FLDM etc. are easy to deal with (we're only
7073 interested in 32 SP registers or 16 dp registers, due to the VFP version
7074 implemented by the chip in question). DP registers are marked by setting
7075 both SP registers in the write mask). */
7077 static enum bfd_arm_vfp11_pipe
7078 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
7081 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
7082 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
7084 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
7087 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7088 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7090 pqrs = ((insn & 0x00800000) >> 20)
7091 | ((insn & 0x00300000) >> 19)
7092 | ((insn & 0x00000040) >> 6);
7096 case 0: /* fmac[sd]. */
7097 case 1: /* fnmac[sd]. */
7098 case 2: /* fmsc[sd]. */
7099 case 3: /* fnmsc[sd]. */
7101 bfd_arm_vfp11_write_mask (destmask, fd);
7103 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
7108 case 4: /* fmul[sd]. */
7109 case 5: /* fnmul[sd]. */
7110 case 6: /* fadd[sd]. */
7111 case 7: /* fsub[sd]. */
7115 case 8: /* fdiv[sd]. */
7118 bfd_arm_vfp11_write_mask (destmask, fd);
7119 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
7124 case 15: /* extended opcode. */
7126 unsigned int extn = ((insn >> 15) & 0x1e)
7127 | ((insn >> 7) & 1);
7131 case 0: /* fcpy[sd]. */
7132 case 1: /* fabs[sd]. */
7133 case 2: /* fneg[sd]. */
7134 case 8: /* fcmp[sd]. */
7135 case 9: /* fcmpe[sd]. */
7136 case 10: /* fcmpz[sd]. */
7137 case 11: /* fcmpez[sd]. */
7138 case 16: /* fuito[sd]. */
7139 case 17: /* fsito[sd]. */
7140 case 24: /* ftoui[sd]. */
7141 case 25: /* ftouiz[sd]. */
7142 case 26: /* ftosi[sd]. */
7143 case 27: /* ftosiz[sd]. */
7144 /* These instructions will not bounce due to underflow. */
7149 case 3: /* fsqrt[sd]. */
7150 /* fsqrt cannot underflow, but it can (perhaps) overwrite
7151 registers to cause the erratum in previous instructions. */
7152 bfd_arm_vfp11_write_mask (destmask, fd);
7156 case 15: /* fcvt{ds,sd}. */
7160 bfd_arm_vfp11_write_mask (destmask, fd);
7162 /* Only FCVTSD can underflow. */
7163 if ((insn & 0x100) != 0)
7182 /* Two-register transfer. */
7183 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
7185 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7187 if ((insn & 0x100000) == 0)
7190 bfd_arm_vfp11_write_mask (destmask, fm);
7193 bfd_arm_vfp11_write_mask (destmask, fm);
7194 bfd_arm_vfp11_write_mask (destmask, fm + 1);
7200 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
7202 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7203 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
7207 case 0: /* Two-reg transfer. We should catch these above. */
7210 case 2: /* fldm[sdx]. */
7214 unsigned int i, offset = insn & 0xff;
7219 for (i = fd; i < fd + offset; i++)
7220 bfd_arm_vfp11_write_mask (destmask, i);
7224 case 4: /* fld[sd]. */
7226 bfd_arm_vfp11_write_mask (destmask, fd);
7235 /* Single-register transfer. Note L==0. */
7236 else if ((insn & 0x0f100e10) == 0x0e000a10)
7238 unsigned int opcode = (insn >> 21) & 7;
7239 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
7243 case 0: /* fmsr/fmdlr. */
7244 case 1: /* fmdhr. */
7245 /* Mark fmdhr and fmdlr as writing to the whole of the DP
7246 destination register. I don't know if this is exactly right,
7247 but it is the conservative choice. */
7248 bfd_arm_vfp11_write_mask (destmask, fn);
7262 static int elf32_arm_compare_mapping (const void * a, const void * b);
7265 /* Look for potentially-troublesome code sequences which might trigger the
7266 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
7267 (available from ARM) for details of the erratum. A short version is
7268 described in ld.texinfo. */
7271 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
7274 bfd_byte *contents = NULL;
7276 int regs[3], numregs = 0;
7277 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7278 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
7280 if (globals == NULL)
7283 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
7284 The states transition as follows:
7286 0 -> 1 (vector) or 0 -> 2 (scalar)
7287 A VFP FMAC-pipeline instruction has been seen. Fill
7288 regs[0]..regs[numregs-1] with its input operands. Remember this
7289 instruction in 'first_fmac'.
7292 Any instruction, except for a VFP instruction which overwrites
7297 A VFP instruction has been seen which overwrites any of regs[*].
7298 We must make a veneer! Reset state to 0 before examining next
7302 If we fail to match anything in state 2, reset to state 0 and reset
7303 the instruction pointer to the instruction after 'first_fmac'.
7305 If the VFP11 vector mode is in use, there must be at least two unrelated
7306 instructions between anti-dependent VFP11 instructions to properly avoid
7307 triggering the erratum, hence the use of the extra state 1. */
7309 /* If we are only performing a partial link do not bother
7310 to construct any glue. */
7311 if (bfd_link_relocatable (link_info))
7314 /* Skip if this bfd does not correspond to an ELF image. */
7315 if (! is_arm_elf (abfd))
7318 /* We should have chosen a fix type by the time we get here. */
7319 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
7321 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
7324 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7325 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7328 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7330 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
7331 struct _arm_elf_section_data *sec_data;
7333 /* If we don't have executable progbits, we're not interested in this
7334 section. Also skip if section is to be excluded. */
7335 if (elf_section_type (sec) != SHT_PROGBITS
7336 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7337 || (sec->flags & SEC_EXCLUDE) != 0
7338 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7339 || sec->output_section == bfd_abs_section_ptr
7340 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
7343 sec_data = elf32_arm_section_data (sec);
7345 if (sec_data->mapcount == 0)
7348 if (elf_section_data (sec)->this_hdr.contents != NULL)
7349 contents = elf_section_data (sec)->this_hdr.contents;
7350 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7353 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7354 elf32_arm_compare_mapping);
7356 for (span = 0; span < sec_data->mapcount; span++)
7358 unsigned int span_start = sec_data->map[span].vma;
7359 unsigned int span_end = (span == sec_data->mapcount - 1)
7360 ? sec->size : sec_data->map[span + 1].vma;
7361 char span_type = sec_data->map[span].type;
7363 /* FIXME: Only ARM mode is supported at present. We may need to
7364 support Thumb-2 mode also at some point. */
7365 if (span_type != 'a')
7368 for (i = span_start; i < span_end;)
7370 unsigned int next_i = i + 4;
7371 unsigned int insn = bfd_big_endian (abfd)
7372 ? (contents[i] << 24)
7373 | (contents[i + 1] << 16)
7374 | (contents[i + 2] << 8)
7376 : (contents[i + 3] << 24)
7377 | (contents[i + 2] << 16)
7378 | (contents[i + 1] << 8)
7380 unsigned int writemask = 0;
7381 enum bfd_arm_vfp11_pipe vpipe;
7386 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
7388 /* I'm assuming the VFP11 erratum can trigger with denorm
7389 operands on either the FMAC or the DS pipeline. This might
7390 lead to slightly overenthusiastic veneer insertion. */
7391 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
7393 state = use_vector ? 1 : 2;
7395 veneer_of_insn = insn;
7401 int other_regs[3], other_numregs;
7402 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7405 if (vpipe != VFP11_BAD
7406 && bfd_arm_vfp11_antidependency (writemask, regs,
7416 int other_regs[3], other_numregs;
7417 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7420 if (vpipe != VFP11_BAD
7421 && bfd_arm_vfp11_antidependency (writemask, regs,
7427 next_i = first_fmac + 4;
7433 abort (); /* Should be unreachable. */
7438 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
7439 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7441 elf32_arm_section_data (sec)->erratumcount += 1;
7443 newerr->u.b.vfp_insn = veneer_of_insn;
7448 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
7455 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
7460 newerr->next = sec_data->erratumlist;
7461 sec_data->erratumlist = newerr;
7470 if (contents != NULL
7471 && elf_section_data (sec)->this_hdr.contents != contents)
7479 if (contents != NULL
7480 && elf_section_data (sec)->this_hdr.contents != contents)
7486 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
7487 after sections have been laid out, using specially-named symbols. */
7490 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
7491 struct bfd_link_info *link_info)
7494 struct elf32_arm_link_hash_table *globals;
7497 if (bfd_link_relocatable (link_info))
7500 /* Skip if this bfd does not correspond to an ELF image. */
7501 if (! is_arm_elf (abfd))
7504 globals = elf32_arm_hash_table (link_info);
7505 if (globals == NULL)
7508 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7509 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7511 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7513 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7514 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
7516 for (; errnode != NULL; errnode = errnode->next)
7518 struct elf_link_hash_entry *myh;
7521 switch (errnode->type)
7523 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
7524 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
7525 /* Find veneer symbol. */
7526 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7527 errnode->u.b.veneer->u.v.id);
7529 myh = elf_link_hash_lookup
7530 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7533 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7534 "`%s'"), abfd, tmp_name);
7536 vma = myh->root.u.def.section->output_section->vma
7537 + myh->root.u.def.section->output_offset
7538 + myh->root.u.def.value;
7540 errnode->u.b.veneer->vma = vma;
7543 case VFP11_ERRATUM_ARM_VENEER:
7544 case VFP11_ERRATUM_THUMB_VENEER:
7545 /* Find return location. */
7546 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7549 myh = elf_link_hash_lookup
7550 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7553 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7554 "`%s'"), abfd, tmp_name);
7556 vma = myh->root.u.def.section->output_section->vma
7557 + myh->root.u.def.section->output_offset
7558 + myh->root.u.def.value;
7560 errnode->u.v.branch->vma = vma;
7572 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
7573 return locations after sections have been laid out, using
7574 specially-named symbols. */
7577 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
7578 struct bfd_link_info *link_info)
7581 struct elf32_arm_link_hash_table *globals;
7584 if (bfd_link_relocatable (link_info))
7587 /* Skip if this bfd does not correspond to an ELF image. */
7588 if (! is_arm_elf (abfd))
7591 globals = elf32_arm_hash_table (link_info);
7592 if (globals == NULL)
7595 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7596 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7598 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7600 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7601 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
7603 for (; errnode != NULL; errnode = errnode->next)
7605 struct elf_link_hash_entry *myh;
7608 switch (errnode->type)
7610 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
7611 /* Find veneer symbol. */
7612 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7613 errnode->u.b.veneer->u.v.id);
7615 myh = elf_link_hash_lookup
7616 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7619 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7620 "`%s'"), abfd, tmp_name);
7622 vma = myh->root.u.def.section->output_section->vma
7623 + myh->root.u.def.section->output_offset
7624 + myh->root.u.def.value;
7626 errnode->u.b.veneer->vma = vma;
7629 case STM32L4XX_ERRATUM_VENEER:
7630 /* Find return location. */
7631 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7634 myh = elf_link_hash_lookup
7635 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7638 (*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7639 "`%s'"), abfd, tmp_name);
7641 vma = myh->root.u.def.section->output_section->vma
7642 + myh->root.u.def.section->output_offset
7643 + myh->root.u.def.value;
7645 errnode->u.v.branch->vma = vma;
7657 static inline bfd_boolean
7658 is_thumb2_ldmia (const insn32 insn)
7660 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
7661 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
7662 return (insn & 0xffd02000) == 0xe8900000;
7665 static inline bfd_boolean
7666 is_thumb2_ldmdb (const insn32 insn)
7668 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
7669 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
7670 return (insn & 0xffd02000) == 0xe9100000;
7673 static inline bfd_boolean
7674 is_thumb2_vldm (const insn32 insn)
7676 /* A6.5 Extension register load or store instruction
7678 We look for SP 32-bit and DP 64-bit registers.
7679 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
7680 <list> is consecutive 64-bit registers
7681 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
7682 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
7683 <list> is consecutive 32-bit registers
7684 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
7685 if P==0 && U==1 && W==1 && Rn=1101 VPOP
7686 if PUW=010 || PUW=011 || PUW=101 VLDM. */
7688 (((insn & 0xfe100f00) == 0xec100b00) ||
7689 ((insn & 0xfe100f00) == 0xec100a00))
7690 && /* (IA without !). */
7691 (((((insn << 7) >> 28) & 0xd) == 0x4)
7692 /* (IA with !), includes VPOP (when reg number is SP). */
7693 || ((((insn << 7) >> 28) & 0xd) == 0x5)
7695 || ((((insn << 7) >> 28) & 0xd) == 0x9));
7698 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
7700 - computes the number and the mode of memory accesses
7701 - decides if the replacement should be done:
7702 . replaces only if > 8-word accesses
7703 . or (testing purposes only) replaces all accesses. */
7706 stm32l4xx_need_create_replacing_stub (const insn32 insn,
7707 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
7711 /* The field encoding the register list is the same for both LDMIA
7712 and LDMDB encodings. */
7713 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
7714 nb_words = popcount (insn & 0x0000ffff);
7715 else if (is_thumb2_vldm (insn))
7716 nb_words = (insn & 0xff);
7718 /* DEFAULT mode accounts for the real bug condition situation,
7719 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
7721 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
7722 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
7725 /* Look for potentially-troublesome code sequences which might trigger
7726 the STM STM32L4XX erratum. */
7729 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
7730 struct bfd_link_info *link_info)
7733 bfd_byte *contents = NULL;
7734 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7736 if (globals == NULL)
7739 /* If we are only performing a partial link do not bother
7740 to construct any glue. */
7741 if (bfd_link_relocatable (link_info))
7744 /* Skip if this bfd does not correspond to an ELF image. */
7745 if (! is_arm_elf (abfd))
7748 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
7751 /* Skip this BFD if it corresponds to an executable or dynamic object. */
7752 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7755 for (sec = abfd->sections; sec != NULL; sec = sec->next)
7757 unsigned int i, span;
7758 struct _arm_elf_section_data *sec_data;
7760 /* If we don't have executable progbits, we're not interested in this
7761 section. Also skip if section is to be excluded. */
7762 if (elf_section_type (sec) != SHT_PROGBITS
7763 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7764 || (sec->flags & SEC_EXCLUDE) != 0
7765 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7766 || sec->output_section == bfd_abs_section_ptr
7767 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
7770 sec_data = elf32_arm_section_data (sec);
7772 if (sec_data->mapcount == 0)
7775 if (elf_section_data (sec)->this_hdr.contents != NULL)
7776 contents = elf_section_data (sec)->this_hdr.contents;
7777 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7780 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7781 elf32_arm_compare_mapping);
7783 for (span = 0; span < sec_data->mapcount; span++)
7785 unsigned int span_start = sec_data->map[span].vma;
7786 unsigned int span_end = (span == sec_data->mapcount - 1)
7787 ? sec->size : sec_data->map[span + 1].vma;
7788 char span_type = sec_data->map[span].type;
7789 int itblock_current_pos = 0;
7791 /* Only Thumb2 mode need be supported with this CM4 specific
7792 code, we should not encounter any arm mode eg span_type
7794 if (span_type != 't')
7797 for (i = span_start; i < span_end;)
7799 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
7800 bfd_boolean insn_32bit = FALSE;
7801 bfd_boolean is_ldm = FALSE;
7802 bfd_boolean is_vldm = FALSE;
7803 bfd_boolean is_not_last_in_it_block = FALSE;
7805 /* The first 16-bits of all 32-bit thumb2 instructions start
7806 with opcode[15..13]=0b111 and the encoded op1 can be anything
7807 except opcode[12..11]!=0b00.
7808 See 32-bit Thumb instruction encoding. */
7809 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
7812 /* Compute the predicate that tells if the instruction
7813 is concerned by the IT block
7814 - Creates an error if there is a ldm that is not
7815 last in the IT block thus cannot be replaced
7816 - Otherwise we can create a branch at the end of the
7817 IT block, it will be controlled naturally by IT
7818 with the proper pseudo-predicate
7819 - So the only interesting predicate is the one that
7820 tells that we are not on the last item of an IT
7822 if (itblock_current_pos != 0)
7823 is_not_last_in_it_block = !!--itblock_current_pos;
7827 /* Load the rest of the insn (in manual-friendly order). */
7828 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
7829 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
7830 is_vldm = is_thumb2_vldm (insn);
7832 /* Veneers are created for (v)ldm depending on
7833 option flags and memory accesses conditions; but
7834 if the instruction is not the last instruction of
7835 an IT block, we cannot create a jump there, so we
7837 if ((is_ldm || is_vldm) &&
7838 stm32l4xx_need_create_replacing_stub
7839 (insn, globals->stm32l4xx_fix))
7841 if (is_not_last_in_it_block)
7843 (*_bfd_error_handler)
7844 /* Note - overlong line used here to allow for translation. */
7846 %B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n"
7847 "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"),
7848 abfd, sec, (long)i);
7852 elf32_stm32l4xx_erratum_list *newerr =
7853 (elf32_stm32l4xx_erratum_list *)
7855 (sizeof (elf32_stm32l4xx_erratum_list));
7857 elf32_arm_section_data (sec)
7858 ->stm32l4xx_erratumcount += 1;
7859 newerr->u.b.insn = insn;
7860 /* We create only thumb branches. */
7862 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
7863 record_stm32l4xx_erratum_veneer
7864 (link_info, newerr, abfd, sec,
7867 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
7868 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
7870 newerr->next = sec_data->stm32l4xx_erratumlist;
7871 sec_data->stm32l4xx_erratumlist = newerr;
7878 IT blocks are only encoded in T1
7879 Encoding T1: IT{x{y{z}}} <firstcond>
7880 1 0 1 1 - 1 1 1 1 - firstcond - mask
7881 if mask = '0000' then see 'related encodings'
7882 We don't deal with UNPREDICTABLE, just ignore these.
7883 There can be no nested IT blocks so an IT block
7884 is naturally a new one for which it is worth
7885 computing its size. */
7886 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00) &&
7887 ((insn & 0x000f) != 0x0000);
7888 /* If we have a new IT block we compute its size. */
7891 /* Compute the number of instructions controlled
7892 by the IT block, it will be used to decide
7893 whether we are inside an IT block or not. */
7894 unsigned int mask = insn & 0x000f;
7895 itblock_current_pos = 4 - ctz (mask);
7899 i += insn_32bit ? 4 : 2;
7903 if (contents != NULL
7904 && elf_section_data (sec)->this_hdr.contents != contents)
7912 if (contents != NULL
7913 && elf_section_data (sec)->this_hdr.contents != contents)
7919 /* Set target relocation values needed during linking. */
7922 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
7923 struct bfd_link_info *link_info,
7925 char * target2_type,
7928 bfd_arm_vfp11_fix vfp11_fix,
7929 bfd_arm_stm32l4xx_fix stm32l4xx_fix,
7930 int no_enum_warn, int no_wchar_warn,
7931 int pic_veneer, int fix_cortex_a8,
7934 struct elf32_arm_link_hash_table *globals;
7936 globals = elf32_arm_hash_table (link_info);
7937 if (globals == NULL)
7940 globals->target1_is_rel = target1_is_rel;
7941 if (strcmp (target2_type, "rel") == 0)
7942 globals->target2_reloc = R_ARM_REL32;
7943 else if (strcmp (target2_type, "abs") == 0)
7944 globals->target2_reloc = R_ARM_ABS32;
7945 else if (strcmp (target2_type, "got-rel") == 0)
7946 globals->target2_reloc = R_ARM_GOT_PREL;
7949 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
7952 globals->fix_v4bx = fix_v4bx;
7953 globals->use_blx |= use_blx;
7954 globals->vfp11_fix = vfp11_fix;
7955 globals->stm32l4xx_fix = stm32l4xx_fix;
7956 globals->pic_veneer = pic_veneer;
7957 globals->fix_cortex_a8 = fix_cortex_a8;
7958 globals->fix_arm1176 = fix_arm1176;
7960 BFD_ASSERT (is_arm_elf (output_bfd));
7961 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
7962 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
7965 /* Replace the target offset of a Thumb bl or b.w instruction. */
7968 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
7974 BFD_ASSERT ((offset & 1) == 0);
7976 upper = bfd_get_16 (abfd, insn);
7977 lower = bfd_get_16 (abfd, insn + 2);
7978 reloc_sign = (offset < 0) ? 1 : 0;
7979 upper = (upper & ~(bfd_vma) 0x7ff)
7980 | ((offset >> 12) & 0x3ff)
7981 | (reloc_sign << 10);
7982 lower = (lower & ~(bfd_vma) 0x2fff)
7983 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
7984 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
7985 | ((offset >> 1) & 0x7ff);
7986 bfd_put_16 (abfd, upper, insn);
7987 bfd_put_16 (abfd, lower, insn + 2);
7990 /* Thumb code calling an ARM function. */
7993 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
7997 asection * input_section,
7998 bfd_byte * hit_data,
8001 bfd_signed_vma addend,
8003 char **error_message)
8007 long int ret_offset;
8008 struct elf_link_hash_entry * myh;
8009 struct elf32_arm_link_hash_table * globals;
8011 myh = find_thumb_glue (info, name, error_message);
8015 globals = elf32_arm_hash_table (info);
8016 BFD_ASSERT (globals != NULL);
8017 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8019 my_offset = myh->root.u.def.value;
8021 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8022 THUMB2ARM_GLUE_SECTION_NAME);
8024 BFD_ASSERT (s != NULL);
8025 BFD_ASSERT (s->contents != NULL);
8026 BFD_ASSERT (s->output_section != NULL);
8028 if ((my_offset & 0x01) == 0x01)
8031 && sym_sec->owner != NULL
8032 && !INTERWORK_FLAG (sym_sec->owner))
8034 (*_bfd_error_handler)
8035 (_("%B(%s): warning: interworking not enabled.\n"
8036 " first occurrence: %B: Thumb call to ARM"),
8037 sym_sec->owner, input_bfd, name);
8043 myh->root.u.def.value = my_offset;
8045 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
8046 s->contents + my_offset);
8048 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
8049 s->contents + my_offset + 2);
8052 /* Address of destination of the stub. */
8053 ((bfd_signed_vma) val)
8055 /* Offset from the start of the current section
8056 to the start of the stubs. */
8058 /* Offset of the start of this stub from the start of the stubs. */
8060 /* Address of the start of the current section. */
8061 + s->output_section->vma)
8062 /* The branch instruction is 4 bytes into the stub. */
8064 /* ARM branches work from the pc of the instruction + 8. */
8067 put_arm_insn (globals, output_bfd,
8068 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
8069 s->contents + my_offset + 4);
8072 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
8074 /* Now go back and fix up the original BL insn to point to here. */
8076 /* Address of where the stub is located. */
8077 (s->output_section->vma + s->output_offset + my_offset)
8078 /* Address of where the BL is located. */
8079 - (input_section->output_section->vma + input_section->output_offset
8081 /* Addend in the relocation. */
8083 /* Biassing for PC-relative addressing. */
8086 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
8091 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
8093 static struct elf_link_hash_entry *
8094 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
8101 char ** error_message)
8104 long int ret_offset;
8105 struct elf_link_hash_entry * myh;
8106 struct elf32_arm_link_hash_table * globals;
8108 myh = find_arm_glue (info, name, error_message);
8112 globals = elf32_arm_hash_table (info);
8113 BFD_ASSERT (globals != NULL);
8114 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8116 my_offset = myh->root.u.def.value;
8118 if ((my_offset & 0x01) == 0x01)
8121 && sym_sec->owner != NULL
8122 && !INTERWORK_FLAG (sym_sec->owner))
8124 (*_bfd_error_handler)
8125 (_("%B(%s): warning: interworking not enabled.\n"
8126 " first occurrence: %B: arm call to thumb"),
8127 sym_sec->owner, input_bfd, name);
8131 myh->root.u.def.value = my_offset;
8133 if (bfd_link_pic (info)
8134 || globals->root.is_relocatable_executable
8135 || globals->pic_veneer)
8137 /* For relocatable objects we can't use absolute addresses,
8138 so construct the address from a relative offset. */
8139 /* TODO: If the offset is small it's probably worth
8140 constructing the address with adds. */
8141 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
8142 s->contents + my_offset);
8143 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
8144 s->contents + my_offset + 4);
8145 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
8146 s->contents + my_offset + 8);
8147 /* Adjust the offset by 4 for the position of the add,
8148 and 8 for the pipeline offset. */
8149 ret_offset = (val - (s->output_offset
8150 + s->output_section->vma
8153 bfd_put_32 (output_bfd, ret_offset,
8154 s->contents + my_offset + 12);
8156 else if (globals->use_blx)
8158 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
8159 s->contents + my_offset);
8161 /* It's a thumb address. Add the low order bit. */
8162 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
8163 s->contents + my_offset + 4);
8167 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
8168 s->contents + my_offset);
8170 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
8171 s->contents + my_offset + 4);
8173 /* It's a thumb address. Add the low order bit. */
8174 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
8175 s->contents + my_offset + 8);
8181 BFD_ASSERT (my_offset <= globals->arm_glue_size);
8186 /* Arm code calling a Thumb function. */
8189 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
8193 asection * input_section,
8194 bfd_byte * hit_data,
8197 bfd_signed_vma addend,
8199 char **error_message)
8201 unsigned long int tmp;
8204 long int ret_offset;
8205 struct elf_link_hash_entry * myh;
8206 struct elf32_arm_link_hash_table * globals;
8208 globals = elf32_arm_hash_table (info);
8209 BFD_ASSERT (globals != NULL);
8210 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8212 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8213 ARM2THUMB_GLUE_SECTION_NAME);
8214 BFD_ASSERT (s != NULL);
8215 BFD_ASSERT (s->contents != NULL);
8216 BFD_ASSERT (s->output_section != NULL);
8218 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
8219 sym_sec, val, s, error_message);
8223 my_offset = myh->root.u.def.value;
8224 tmp = bfd_get_32 (input_bfd, hit_data);
8225 tmp = tmp & 0xFF000000;
8227 /* Somehow these are both 4 too far, so subtract 8. */
8228 ret_offset = (s->output_offset
8230 + s->output_section->vma
8231 - (input_section->output_offset
8232 + input_section->output_section->vma
8236 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
8238 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
8243 /* Populate Arm stub for an exported Thumb function. */
8246 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
8248 struct bfd_link_info * info = (struct bfd_link_info *) inf;
8250 struct elf_link_hash_entry * myh;
8251 struct elf32_arm_link_hash_entry *eh;
8252 struct elf32_arm_link_hash_table * globals;
8255 char *error_message;
8257 eh = elf32_arm_hash_entry (h);
8258 /* Allocate stubs for exported Thumb functions on v4t. */
8259 if (eh->export_glue == NULL)
8262 globals = elf32_arm_hash_table (info);
8263 BFD_ASSERT (globals != NULL);
8264 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8266 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8267 ARM2THUMB_GLUE_SECTION_NAME);
8268 BFD_ASSERT (s != NULL);
8269 BFD_ASSERT (s->contents != NULL);
8270 BFD_ASSERT (s->output_section != NULL);
8272 sec = eh->export_glue->root.u.def.section;
8274 BFD_ASSERT (sec->output_section != NULL);
8276 val = eh->export_glue->root.u.def.value + sec->output_offset
8277 + sec->output_section->vma;
8279 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
8280 h->root.u.def.section->owner,
8281 globals->obfd, sec, val, s,
8287 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
8290 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
8295 struct elf32_arm_link_hash_table *globals;
8297 globals = elf32_arm_hash_table (info);
8298 BFD_ASSERT (globals != NULL);
8299 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8301 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8302 ARM_BX_GLUE_SECTION_NAME);
8303 BFD_ASSERT (s != NULL);
8304 BFD_ASSERT (s->contents != NULL);
8305 BFD_ASSERT (s->output_section != NULL);
8307 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
8309 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
8311 if ((globals->bx_glue_offset[reg] & 1) == 0)
8313 p = s->contents + glue_addr;
8314 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
8315 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
8316 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
8317 globals->bx_glue_offset[reg] |= 1;
8320 return glue_addr + s->output_section->vma + s->output_offset;
8323 /* Generate Arm stubs for exported Thumb symbols. */
8325 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
8326 struct bfd_link_info *link_info)
8328 struct elf32_arm_link_hash_table * globals;
8330 if (link_info == NULL)
8331 /* Ignore this if we are not called by the ELF backend linker. */
8334 globals = elf32_arm_hash_table (link_info);
8335 if (globals == NULL)
8338 /* If blx is available then exported Thumb symbols are OK and there is
8340 if (globals->use_blx)
8343 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
8347 /* Reserve space for COUNT dynamic relocations in relocation selection
8351 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
8352 bfd_size_type count)
8354 struct elf32_arm_link_hash_table *htab;
8356 htab = elf32_arm_hash_table (info);
8357 BFD_ASSERT (htab->root.dynamic_sections_created);
8360 sreloc->size += RELOC_SIZE (htab) * count;
8363 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
8364 dynamic, the relocations should go in SRELOC, otherwise they should
8365 go in the special .rel.iplt section. */
8368 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
8369 bfd_size_type count)
8371 struct elf32_arm_link_hash_table *htab;
8373 htab = elf32_arm_hash_table (info);
8374 if (!htab->root.dynamic_sections_created)
8375 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
8378 BFD_ASSERT (sreloc != NULL);
8379 sreloc->size += RELOC_SIZE (htab) * count;
8383 /* Add relocation REL to the end of relocation section SRELOC. */
8386 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
8387 asection *sreloc, Elf_Internal_Rela *rel)
8390 struct elf32_arm_link_hash_table *htab;
8392 htab = elf32_arm_hash_table (info);
8393 if (!htab->root.dynamic_sections_created
8394 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
8395 sreloc = htab->root.irelplt;
8398 loc = sreloc->contents;
8399 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
8400 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
8402 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
8405 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
8406 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
8410 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
8411 bfd_boolean is_iplt_entry,
8412 union gotplt_union *root_plt,
8413 struct arm_plt_info *arm_plt)
8415 struct elf32_arm_link_hash_table *htab;
8419 htab = elf32_arm_hash_table (info);
8423 splt = htab->root.iplt;
8424 sgotplt = htab->root.igotplt;
8426 /* NaCl uses a special first entry in .iplt too. */
8427 if (htab->nacl_p && splt->size == 0)
8428 splt->size += htab->plt_header_size;
8430 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
8431 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
8435 splt = htab->root.splt;
8436 sgotplt = htab->root.sgotplt;
8438 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
8439 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
8441 /* If this is the first .plt entry, make room for the special
8443 if (splt->size == 0)
8444 splt->size += htab->plt_header_size;
8446 htab->next_tls_desc_index++;
8449 /* Allocate the PLT entry itself, including any leading Thumb stub. */
8450 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8451 splt->size += PLT_THUMB_STUB_SIZE;
8452 root_plt->offset = splt->size;
8453 splt->size += htab->plt_entry_size;
8455 if (!htab->symbian_p)
8457 /* We also need to make an entry in the .got.plt section, which
8458 will be placed in the .got section by the linker script. */
8460 arm_plt->got_offset = sgotplt->size;
8462 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
8468 arm_movw_immediate (bfd_vma value)
8470 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
8474 arm_movt_immediate (bfd_vma value)
8476 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
8479 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
8480 the entry lives in .iplt and resolves to (*SYM_VALUE)().
8481 Otherwise, DYNINDX is the index of the symbol in the dynamic
8482 symbol table and SYM_VALUE is undefined.
8484 ROOT_PLT points to the offset of the PLT entry from the start of its
8485 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
8486 bookkeeping information.
8488 Returns FALSE if there was a problem. */
8491 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
8492 union gotplt_union *root_plt,
8493 struct arm_plt_info *arm_plt,
8494 int dynindx, bfd_vma sym_value)
8496 struct elf32_arm_link_hash_table *htab;
8502 Elf_Internal_Rela rel;
8503 bfd_vma plt_header_size;
8504 bfd_vma got_header_size;
8506 htab = elf32_arm_hash_table (info);
8508 /* Pick the appropriate sections and sizes. */
8511 splt = htab->root.iplt;
8512 sgot = htab->root.igotplt;
8513 srel = htab->root.irelplt;
8515 /* There are no reserved entries in .igot.plt, and no special
8516 first entry in .iplt. */
8517 got_header_size = 0;
8518 plt_header_size = 0;
8522 splt = htab->root.splt;
8523 sgot = htab->root.sgotplt;
8524 srel = htab->root.srelplt;
8526 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
8527 plt_header_size = htab->plt_header_size;
8529 BFD_ASSERT (splt != NULL && srel != NULL);
8531 /* Fill in the entry in the procedure linkage table. */
8532 if (htab->symbian_p)
8534 BFD_ASSERT (dynindx >= 0);
8535 put_arm_insn (htab, output_bfd,
8536 elf32_arm_symbian_plt_entry[0],
8537 splt->contents + root_plt->offset);
8538 bfd_put_32 (output_bfd,
8539 elf32_arm_symbian_plt_entry[1],
8540 splt->contents + root_plt->offset + 4);
8542 /* Fill in the entry in the .rel.plt section. */
8543 rel.r_offset = (splt->output_section->vma
8544 + splt->output_offset
8545 + root_plt->offset + 4);
8546 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
8548 /* Get the index in the procedure linkage table which
8549 corresponds to this symbol. This is the index of this symbol
8550 in all the symbols for which we are making plt entries. The
8551 first entry in the procedure linkage table is reserved. */
8552 plt_index = ((root_plt->offset - plt_header_size)
8553 / htab->plt_entry_size);
8557 bfd_vma got_offset, got_address, plt_address;
8558 bfd_vma got_displacement, initial_got_entry;
8561 BFD_ASSERT (sgot != NULL);
8563 /* Get the offset into the .(i)got.plt table of the entry that
8564 corresponds to this function. */
8565 got_offset = (arm_plt->got_offset & -2);
8567 /* Get the index in the procedure linkage table which
8568 corresponds to this symbol. This is the index of this symbol
8569 in all the symbols for which we are making plt entries.
8570 After the reserved .got.plt entries, all symbols appear in
8571 the same order as in .plt. */
8572 plt_index = (got_offset - got_header_size) / 4;
8574 /* Calculate the address of the GOT entry. */
8575 got_address = (sgot->output_section->vma
8576 + sgot->output_offset
8579 /* ...and the address of the PLT entry. */
8580 plt_address = (splt->output_section->vma
8581 + splt->output_offset
8582 + root_plt->offset);
8584 ptr = splt->contents + root_plt->offset;
8585 if (htab->vxworks_p && bfd_link_pic (info))
8590 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8592 val = elf32_arm_vxworks_shared_plt_entry[i];
8594 val |= got_address - sgot->output_section->vma;
8596 val |= plt_index * RELOC_SIZE (htab);
8597 if (i == 2 || i == 5)
8598 bfd_put_32 (output_bfd, val, ptr);
8600 put_arm_insn (htab, output_bfd, val, ptr);
8603 else if (htab->vxworks_p)
8608 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8610 val = elf32_arm_vxworks_exec_plt_entry[i];
8614 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
8616 val |= plt_index * RELOC_SIZE (htab);
8617 if (i == 2 || i == 5)
8618 bfd_put_32 (output_bfd, val, ptr);
8620 put_arm_insn (htab, output_bfd, val, ptr);
8623 loc = (htab->srelplt2->contents
8624 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
8626 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
8627 referencing the GOT for this PLT entry. */
8628 rel.r_offset = plt_address + 8;
8629 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
8630 rel.r_addend = got_offset;
8631 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8632 loc += RELOC_SIZE (htab);
8634 /* Create the R_ARM_ABS32 relocation referencing the
8635 beginning of the PLT for this GOT entry. */
8636 rel.r_offset = got_address;
8637 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
8639 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8641 else if (htab->nacl_p)
8643 /* Calculate the displacement between the PLT slot and the
8644 common tail that's part of the special initial PLT slot. */
8645 int32_t tail_displacement
8646 = ((splt->output_section->vma + splt->output_offset
8647 + ARM_NACL_PLT_TAIL_OFFSET)
8648 - (plt_address + htab->plt_entry_size + 4));
8649 BFD_ASSERT ((tail_displacement & 3) == 0);
8650 tail_displacement >>= 2;
8652 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
8653 || (-tail_displacement & 0xff000000) == 0);
8655 /* Calculate the displacement between the PLT slot and the entry
8656 in the GOT. The offset accounts for the value produced by
8657 adding to pc in the penultimate instruction of the PLT stub. */
8658 got_displacement = (got_address
8659 - (plt_address + htab->plt_entry_size));
8661 /* NaCl does not support interworking at all. */
8662 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
8664 put_arm_insn (htab, output_bfd,
8665 elf32_arm_nacl_plt_entry[0]
8666 | arm_movw_immediate (got_displacement),
8668 put_arm_insn (htab, output_bfd,
8669 elf32_arm_nacl_plt_entry[1]
8670 | arm_movt_immediate (got_displacement),
8672 put_arm_insn (htab, output_bfd,
8673 elf32_arm_nacl_plt_entry[2],
8675 put_arm_insn (htab, output_bfd,
8676 elf32_arm_nacl_plt_entry[3]
8677 | (tail_displacement & 0x00ffffff),
8680 else if (using_thumb_only (htab))
8682 /* PR ld/16017: Generate thumb only PLT entries. */
8683 if (!using_thumb2 (htab))
8685 /* FIXME: We ought to be able to generate thumb-1 PLT
8687 _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
8692 /* Calculate the displacement between the PLT slot and the entry in
8693 the GOT. The 12-byte offset accounts for the value produced by
8694 adding to pc in the 3rd instruction of the PLT stub. */
8695 got_displacement = got_address - (plt_address + 12);
8697 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
8698 instead of 'put_thumb_insn'. */
8699 put_arm_insn (htab, output_bfd,
8700 elf32_thumb2_plt_entry[0]
8701 | ((got_displacement & 0x000000ff) << 16)
8702 | ((got_displacement & 0x00000700) << 20)
8703 | ((got_displacement & 0x00000800) >> 1)
8704 | ((got_displacement & 0x0000f000) >> 12),
8706 put_arm_insn (htab, output_bfd,
8707 elf32_thumb2_plt_entry[1]
8708 | ((got_displacement & 0x00ff0000) )
8709 | ((got_displacement & 0x07000000) << 4)
8710 | ((got_displacement & 0x08000000) >> 17)
8711 | ((got_displacement & 0xf0000000) >> 28),
8713 put_arm_insn (htab, output_bfd,
8714 elf32_thumb2_plt_entry[2],
8716 put_arm_insn (htab, output_bfd,
8717 elf32_thumb2_plt_entry[3],
8722 /* Calculate the displacement between the PLT slot and the
8723 entry in the GOT. The eight-byte offset accounts for the
8724 value produced by adding to pc in the first instruction
8726 got_displacement = got_address - (plt_address + 8);
8728 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8730 put_thumb_insn (htab, output_bfd,
8731 elf32_arm_plt_thumb_stub[0], ptr - 4);
8732 put_thumb_insn (htab, output_bfd,
8733 elf32_arm_plt_thumb_stub[1], ptr - 2);
8736 if (!elf32_arm_use_long_plt_entry)
8738 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
8740 put_arm_insn (htab, output_bfd,
8741 elf32_arm_plt_entry_short[0]
8742 | ((got_displacement & 0x0ff00000) >> 20),
8744 put_arm_insn (htab, output_bfd,
8745 elf32_arm_plt_entry_short[1]
8746 | ((got_displacement & 0x000ff000) >> 12),
8748 put_arm_insn (htab, output_bfd,
8749 elf32_arm_plt_entry_short[2]
8750 | (got_displacement & 0x00000fff),
8752 #ifdef FOUR_WORD_PLT
8753 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
8758 put_arm_insn (htab, output_bfd,
8759 elf32_arm_plt_entry_long[0]
8760 | ((got_displacement & 0xf0000000) >> 28),
8762 put_arm_insn (htab, output_bfd,
8763 elf32_arm_plt_entry_long[1]
8764 | ((got_displacement & 0x0ff00000) >> 20),
8766 put_arm_insn (htab, output_bfd,
8767 elf32_arm_plt_entry_long[2]
8768 | ((got_displacement & 0x000ff000) >> 12),
8770 put_arm_insn (htab, output_bfd,
8771 elf32_arm_plt_entry_long[3]
8772 | (got_displacement & 0x00000fff),
8777 /* Fill in the entry in the .rel(a).(i)plt section. */
8778 rel.r_offset = got_address;
8782 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
8783 The dynamic linker or static executable then calls SYM_VALUE
8784 to determine the correct run-time value of the .igot.plt entry. */
8785 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
8786 initial_got_entry = sym_value;
8790 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
8791 initial_got_entry = (splt->output_section->vma
8792 + splt->output_offset);
8795 /* Fill in the entry in the global offset table. */
8796 bfd_put_32 (output_bfd, initial_got_entry,
8797 sgot->contents + got_offset);
8801 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
8804 loc = srel->contents + plt_index * RELOC_SIZE (htab);
8805 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8811 /* Some relocations map to different relocations depending on the
8812 target. Return the real relocation. */
8815 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
8821 if (globals->target1_is_rel)
8827 return globals->target2_reloc;
8834 /* Return the base VMA address which should be subtracted from real addresses
8835 when resolving @dtpoff relocation.
8836 This is PT_TLS segment p_vaddr. */
8839 dtpoff_base (struct bfd_link_info *info)
8841 /* If tls_sec is NULL, we should have signalled an error already. */
8842 if (elf_hash_table (info)->tls_sec == NULL)
8844 return elf_hash_table (info)->tls_sec->vma;
8847 /* Return the relocation value for @tpoff relocation
8848 if STT_TLS virtual address is ADDRESS. */
8851 tpoff (struct bfd_link_info *info, bfd_vma address)
8853 struct elf_link_hash_table *htab = elf_hash_table (info);
8856 /* If tls_sec is NULL, we should have signalled an error already. */
8857 if (htab->tls_sec == NULL)
8859 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
8860 return address - htab->tls_sec->vma + base;
8863 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
8864 VALUE is the relocation value. */
8866 static bfd_reloc_status_type
8867 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
8870 return bfd_reloc_overflow;
8872 value |= bfd_get_32 (abfd, data) & 0xfffff000;
8873 bfd_put_32 (abfd, value, data);
8874 return bfd_reloc_ok;
8877 /* Handle TLS relaxations. Relaxing is possible for symbols that use
8878 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
8879 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
8881 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
8882 is to then call final_link_relocate. Return other values in the
8885 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
8886 the pre-relaxed code. It would be nice if the relocs were updated
8887 to match the optimization. */
8889 static bfd_reloc_status_type
8890 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
8891 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
8892 Elf_Internal_Rela *rel, unsigned long is_local)
8896 switch (ELF32_R_TYPE (rel->r_info))
8899 return bfd_reloc_notsupported;
8901 case R_ARM_TLS_GOTDESC:
8906 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8908 insn -= 5; /* THUMB */
8910 insn -= 8; /* ARM */
8912 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
8913 return bfd_reloc_continue;
8915 case R_ARM_THM_TLS_DESCSEQ:
8917 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
8918 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
8922 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8924 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
8928 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8931 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
8933 else if ((insn & 0xff87) == 0x4780) /* blx rx */
8937 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8940 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
8941 contents + rel->r_offset);
8945 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
8946 /* It's a 32 bit instruction, fetch the rest of it for
8947 error generation. */
8949 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
8950 (*_bfd_error_handler)
8951 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
8952 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8953 return bfd_reloc_notsupported;
8957 case R_ARM_TLS_DESCSEQ:
8959 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8960 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
8964 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
8965 contents + rel->r_offset);
8967 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
8971 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8974 bfd_put_32 (input_bfd, insn & 0xfffff000,
8975 contents + rel->r_offset);
8977 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
8981 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8984 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
8985 contents + rel->r_offset);
8989 (*_bfd_error_handler)
8990 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
8991 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8992 return bfd_reloc_notsupported;
8996 case R_ARM_TLS_CALL:
8997 /* GD->IE relaxation, turn the instruction into 'nop' or
8998 'ldr r0, [pc,r0]' */
8999 insn = is_local ? 0xe1a00000 : 0xe79f0000;
9000 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
9003 case R_ARM_THM_TLS_CALL:
9004 /* GD->IE relaxation. */
9006 /* add r0,pc; ldr r0, [r0] */
9008 else if (using_thumb2 (globals))
9015 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
9016 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
9019 return bfd_reloc_ok;
9022 /* For a given value of n, calculate the value of G_n as required to
9023 deal with group relocations. We return it in the form of an
9024 encoded constant-and-rotation, together with the final residual. If n is
9025 specified as less than zero, then final_residual is filled with the
9026 input value and no further action is performed. */
9029 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
9033 bfd_vma encoded_g_n = 0;
9034 bfd_vma residual = value; /* Also known as Y_n. */
9036 for (current_n = 0; current_n <= n; current_n++)
9040 /* Calculate which part of the value to mask. */
9047 /* Determine the most significant bit in the residual and
9048 align the resulting value to a 2-bit boundary. */
9049 for (msb = 30; msb >= 0; msb -= 2)
9050 if (residual & (3 << msb))
9053 /* The desired shift is now (msb - 6), or zero, whichever
9060 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
9061 g_n = residual & (0xff << shift);
9062 encoded_g_n = (g_n >> shift)
9063 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
9065 /* Calculate the residual for the next time around. */
9069 *final_residual = residual;
9074 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
9075 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
9078 identify_add_or_sub (bfd_vma insn)
9080 int opcode = insn & 0x1e00000;
9082 if (opcode == 1 << 23) /* ADD */
9085 if (opcode == 1 << 22) /* SUB */
9091 /* Perform a relocation as part of a final link. */
9093 static bfd_reloc_status_type
9094 elf32_arm_final_link_relocate (reloc_howto_type * howto,
9097 asection * input_section,
9098 bfd_byte * contents,
9099 Elf_Internal_Rela * rel,
9101 struct bfd_link_info * info,
9103 const char * sym_name,
9104 unsigned char st_type,
9105 enum arm_st_branch_type branch_type,
9106 struct elf_link_hash_entry * h,
9107 bfd_boolean * unresolved_reloc_p,
9108 char ** error_message)
9110 unsigned long r_type = howto->type;
9111 unsigned long r_symndx;
9112 bfd_byte * hit_data = contents + rel->r_offset;
9113 bfd_vma * local_got_offsets;
9114 bfd_vma * local_tlsdesc_gotents;
9117 asection * sreloc = NULL;
9120 bfd_signed_vma signed_addend;
9121 unsigned char dynreloc_st_type;
9122 bfd_vma dynreloc_value;
9123 struct elf32_arm_link_hash_table * globals;
9124 struct elf32_arm_link_hash_entry *eh;
9125 union gotplt_union *root_plt;
9126 struct arm_plt_info *arm_plt;
9128 bfd_vma gotplt_offset;
9129 bfd_boolean has_iplt_entry;
9131 globals = elf32_arm_hash_table (info);
9132 if (globals == NULL)
9133 return bfd_reloc_notsupported;
9135 BFD_ASSERT (is_arm_elf (input_bfd));
9137 /* Some relocation types map to different relocations depending on the
9138 target. We pick the right one here. */
9139 r_type = arm_real_reloc_type (globals, r_type);
9141 /* It is possible to have linker relaxations on some TLS access
9142 models. Update our information here. */
9143 r_type = elf32_arm_tls_transition (info, r_type, h);
9145 if (r_type != howto->type)
9146 howto = elf32_arm_howto_from_type (r_type);
9148 eh = (struct elf32_arm_link_hash_entry *) h;
9149 sgot = globals->root.sgot;
9150 local_got_offsets = elf_local_got_offsets (input_bfd);
9151 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
9153 if (globals->root.dynamic_sections_created)
9154 srelgot = globals->root.srelgot;
9158 r_symndx = ELF32_R_SYM (rel->r_info);
9160 if (globals->use_rel)
9162 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
9164 if (addend & ((howto->src_mask + 1) >> 1))
9167 signed_addend &= ~ howto->src_mask;
9168 signed_addend |= addend;
9171 signed_addend = addend;
9174 addend = signed_addend = rel->r_addend;
9176 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
9177 are resolving a function call relocation. */
9178 if (using_thumb_only (globals)
9179 && (r_type == R_ARM_THM_CALL
9180 || r_type == R_ARM_THM_JUMP24)
9181 && branch_type == ST_BRANCH_TO_ARM)
9182 branch_type = ST_BRANCH_TO_THUMB;
9184 /* Record the symbol information that should be used in dynamic
9186 dynreloc_st_type = st_type;
9187 dynreloc_value = value;
9188 if (branch_type == ST_BRANCH_TO_THUMB)
9189 dynreloc_value |= 1;
9191 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
9192 VALUE appropriately for relocations that we resolve at link time. */
9193 has_iplt_entry = FALSE;
9194 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
9195 && root_plt->offset != (bfd_vma) -1)
9197 plt_offset = root_plt->offset;
9198 gotplt_offset = arm_plt->got_offset;
9200 if (h == NULL || eh->is_iplt)
9202 has_iplt_entry = TRUE;
9203 splt = globals->root.iplt;
9205 /* Populate .iplt entries here, because not all of them will
9206 be seen by finish_dynamic_symbol. The lower bit is set if
9207 we have already populated the entry. */
9212 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
9213 -1, dynreloc_value))
9214 root_plt->offset |= 1;
9216 return bfd_reloc_notsupported;
9219 /* Static relocations always resolve to the .iplt entry. */
9221 value = (splt->output_section->vma
9222 + splt->output_offset
9224 branch_type = ST_BRANCH_TO_ARM;
9226 /* If there are non-call relocations that resolve to the .iplt
9227 entry, then all dynamic ones must too. */
9228 if (arm_plt->noncall_refcount != 0)
9230 dynreloc_st_type = st_type;
9231 dynreloc_value = value;
9235 /* We populate the .plt entry in finish_dynamic_symbol. */
9236 splt = globals->root.splt;
9241 plt_offset = (bfd_vma) -1;
9242 gotplt_offset = (bfd_vma) -1;
9248 /* We don't need to find a value for this symbol. It's just a
9250 *unresolved_reloc_p = FALSE;
9251 return bfd_reloc_ok;
9254 if (!globals->vxworks_p)
9255 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9259 case R_ARM_ABS32_NOI:
9261 case R_ARM_REL32_NOI:
9267 /* Handle relocations which should use the PLT entry. ABS32/REL32
9268 will use the symbol's value, which may point to a PLT entry, but we
9269 don't need to handle that here. If we created a PLT entry, all
9270 branches in this object should go to it, except if the PLT is too
9271 far away, in which case a long branch stub should be inserted. */
9272 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
9273 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
9274 && r_type != R_ARM_CALL
9275 && r_type != R_ARM_JUMP24
9276 && r_type != R_ARM_PLT32)
9277 && plt_offset != (bfd_vma) -1)
9279 /* If we've created a .plt section, and assigned a PLT entry
9280 to this function, it must either be a STT_GNU_IFUNC reference
9281 or not be known to bind locally. In other cases, we should
9282 have cleared the PLT entry by now. */
9283 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
9285 value = (splt->output_section->vma
9286 + splt->output_offset
9288 *unresolved_reloc_p = FALSE;
9289 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9290 contents, rel->r_offset, value,
9294 /* When generating a shared object or relocatable executable, these
9295 relocations are copied into the output file to be resolved at
9297 if ((bfd_link_pic (info)
9298 || globals->root.is_relocatable_executable)
9299 && (input_section->flags & SEC_ALLOC)
9300 && !(globals->vxworks_p
9301 && strcmp (input_section->output_section->name,
9303 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
9304 || !SYMBOL_CALLS_LOCAL (info, h))
9305 && !(input_bfd == globals->stub_bfd
9306 && strstr (input_section->name, STUB_SUFFIX))
9308 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9309 || h->root.type != bfd_link_hash_undefweak)
9310 && r_type != R_ARM_PC24
9311 && r_type != R_ARM_CALL
9312 && r_type != R_ARM_JUMP24
9313 && r_type != R_ARM_PREL31
9314 && r_type != R_ARM_PLT32)
9316 Elf_Internal_Rela outrel;
9317 bfd_boolean skip, relocate;
9319 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
9322 char *v = _("shared object");
9324 if (bfd_link_executable (info))
9325 v = _("PIE executable");
9327 (*_bfd_error_handler)
9328 (_("%B: relocation %s against external or undefined symbol `%s'"
9329 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
9330 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
9331 return bfd_reloc_notsupported;
9334 *unresolved_reloc_p = FALSE;
9336 if (sreloc == NULL && globals->root.dynamic_sections_created)
9338 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
9339 ! globals->use_rel);
9342 return bfd_reloc_notsupported;
9348 outrel.r_addend = addend;
9350 _bfd_elf_section_offset (output_bfd, info, input_section,
9352 if (outrel.r_offset == (bfd_vma) -1)
9354 else if (outrel.r_offset == (bfd_vma) -2)
9355 skip = TRUE, relocate = TRUE;
9356 outrel.r_offset += (input_section->output_section->vma
9357 + input_section->output_offset);
9360 memset (&outrel, 0, sizeof outrel);
9363 && (!bfd_link_pic (info)
9364 || !SYMBOLIC_BIND (info, h)
9365 || !h->def_regular))
9366 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
9371 /* This symbol is local, or marked to become local. */
9372 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
9373 if (globals->symbian_p)
9377 /* On Symbian OS, the data segment and text segement
9378 can be relocated independently. Therefore, we
9379 must indicate the segment to which this
9380 relocation is relative. The BPABI allows us to
9381 use any symbol in the right segment; we just use
9382 the section symbol as it is convenient. (We
9383 cannot use the symbol given by "h" directly as it
9384 will not appear in the dynamic symbol table.)
9386 Note that the dynamic linker ignores the section
9387 symbol value, so we don't subtract osec->vma
9388 from the emitted reloc addend. */
9390 osec = sym_sec->output_section;
9392 osec = input_section->output_section;
9393 symbol = elf_section_data (osec)->dynindx;
9396 struct elf_link_hash_table *htab = elf_hash_table (info);
9398 if ((osec->flags & SEC_READONLY) == 0
9399 && htab->data_index_section != NULL)
9400 osec = htab->data_index_section;
9402 osec = htab->text_index_section;
9403 symbol = elf_section_data (osec)->dynindx;
9405 BFD_ASSERT (symbol != 0);
9408 /* On SVR4-ish systems, the dynamic loader cannot
9409 relocate the text and data segments independently,
9410 so the symbol does not matter. */
9412 if (dynreloc_st_type == STT_GNU_IFUNC)
9413 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
9414 to the .iplt entry. Instead, every non-call reference
9415 must use an R_ARM_IRELATIVE relocation to obtain the
9416 correct run-time address. */
9417 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
9419 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
9420 if (globals->use_rel)
9423 outrel.r_addend += dynreloc_value;
9426 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
9428 /* If this reloc is against an external symbol, we do not want to
9429 fiddle with the addend. Otherwise, we need to include the symbol
9430 value so that it becomes an addend for the dynamic reloc. */
9432 return bfd_reloc_ok;
9434 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9435 contents, rel->r_offset,
9436 dynreloc_value, (bfd_vma) 0);
9438 else switch (r_type)
9441 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9443 case R_ARM_XPC25: /* Arm BLX instruction. */
9446 case R_ARM_PC24: /* Arm B/BL instruction. */
9449 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
9451 if (r_type == R_ARM_XPC25)
9453 /* Check for Arm calling Arm function. */
9454 /* FIXME: Should we translate the instruction into a BL
9455 instruction instead ? */
9456 if (branch_type != ST_BRANCH_TO_THUMB)
9457 (*_bfd_error_handler)
9458 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
9460 h ? h->root.root.string : "(local)");
9462 else if (r_type == R_ARM_PC24)
9464 /* Check for Arm calling Thumb function. */
9465 if (branch_type == ST_BRANCH_TO_THUMB)
9467 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
9468 output_bfd, input_section,
9469 hit_data, sym_sec, rel->r_offset,
9470 signed_addend, value,
9472 return bfd_reloc_ok;
9474 return bfd_reloc_dangerous;
9478 /* Check if a stub has to be inserted because the
9479 destination is too far or we are changing mode. */
9480 if ( r_type == R_ARM_CALL
9481 || r_type == R_ARM_JUMP24
9482 || r_type == R_ARM_PLT32)
9484 enum elf32_arm_stub_type stub_type = arm_stub_none;
9485 struct elf32_arm_link_hash_entry *hash;
9487 hash = (struct elf32_arm_link_hash_entry *) h;
9488 stub_type = arm_type_of_stub (info, input_section, rel,
9489 st_type, &branch_type,
9490 hash, value, sym_sec,
9491 input_bfd, sym_name);
9493 if (stub_type != arm_stub_none)
9495 /* The target is out of reach, so redirect the
9496 branch to the local stub for this function. */
9497 stub_entry = elf32_arm_get_stub_entry (input_section,
9502 if (stub_entry != NULL)
9503 value = (stub_entry->stub_offset
9504 + stub_entry->stub_sec->output_offset
9505 + stub_entry->stub_sec->output_section->vma);
9507 if (plt_offset != (bfd_vma) -1)
9508 *unresolved_reloc_p = FALSE;
9513 /* If the call goes through a PLT entry, make sure to
9514 check distance to the right destination address. */
9515 if (plt_offset != (bfd_vma) -1)
9517 value = (splt->output_section->vma
9518 + splt->output_offset
9520 *unresolved_reloc_p = FALSE;
9521 /* The PLT entry is in ARM mode, regardless of the
9523 branch_type = ST_BRANCH_TO_ARM;
9528 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
9530 S is the address of the symbol in the relocation.
9531 P is address of the instruction being relocated.
9532 A is the addend (extracted from the instruction) in bytes.
9534 S is held in 'value'.
9535 P is the base address of the section containing the
9536 instruction plus the offset of the reloc into that
9538 (input_section->output_section->vma +
9539 input_section->output_offset +
9541 A is the addend, converted into bytes, ie:
9544 Note: None of these operations have knowledge of the pipeline
9545 size of the processor, thus it is up to the assembler to
9546 encode this information into the addend. */
9547 value -= (input_section->output_section->vma
9548 + input_section->output_offset);
9549 value -= rel->r_offset;
9550 if (globals->use_rel)
9551 value += (signed_addend << howto->size);
9553 /* RELA addends do not have to be adjusted by howto->size. */
9554 value += signed_addend;
9556 signed_addend = value;
9557 signed_addend >>= howto->rightshift;
9559 /* A branch to an undefined weak symbol is turned into a jump to
9560 the next instruction unless a PLT entry will be created.
9561 Do the same for local undefined symbols (but not for STN_UNDEF).
9562 The jump to the next instruction is optimized as a NOP depending
9563 on the architecture. */
9564 if (h ? (h->root.type == bfd_link_hash_undefweak
9565 && plt_offset == (bfd_vma) -1)
9566 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
9568 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
9570 if (arch_has_arm_nop (globals))
9571 value |= 0x0320f000;
9573 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
9577 /* Perform a signed range check. */
9578 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
9579 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
9580 return bfd_reloc_overflow;
9582 addend = (value & 2);
9584 value = (signed_addend & howto->dst_mask)
9585 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
9587 if (r_type == R_ARM_CALL)
9589 /* Set the H bit in the BLX instruction. */
9590 if (branch_type == ST_BRANCH_TO_THUMB)
9595 value &= ~(bfd_vma)(1 << 24);
9598 /* Select the correct instruction (BL or BLX). */
9599 /* Only if we are not handling a BL to a stub. In this
9600 case, mode switching is performed by the stub. */
9601 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
9603 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
9605 value &= ~(bfd_vma)(1 << 28);
9615 if (branch_type == ST_BRANCH_TO_THUMB)
9619 case R_ARM_ABS32_NOI:
9625 if (branch_type == ST_BRANCH_TO_THUMB)
9627 value -= (input_section->output_section->vma
9628 + input_section->output_offset + rel->r_offset);
9631 case R_ARM_REL32_NOI:
9633 value -= (input_section->output_section->vma
9634 + input_section->output_offset + rel->r_offset);
9638 value -= (input_section->output_section->vma
9639 + input_section->output_offset + rel->r_offset);
9640 value += signed_addend;
9641 if (! h || h->root.type != bfd_link_hash_undefweak)
9643 /* Check for overflow. */
9644 if ((value ^ (value >> 1)) & (1 << 30))
9645 return bfd_reloc_overflow;
9647 value &= 0x7fffffff;
9648 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
9649 if (branch_type == ST_BRANCH_TO_THUMB)
9654 bfd_put_32 (input_bfd, value, hit_data);
9655 return bfd_reloc_ok;
9658 /* PR 16202: Refectch the addend using the correct size. */
9659 if (globals->use_rel)
9660 addend = bfd_get_8 (input_bfd, hit_data);
9663 /* There is no way to tell whether the user intended to use a signed or
9664 unsigned addend. When checking for overflow we accept either,
9665 as specified by the AAELF. */
9666 if ((long) value > 0xff || (long) value < -0x80)
9667 return bfd_reloc_overflow;
9669 bfd_put_8 (input_bfd, value, hit_data);
9670 return bfd_reloc_ok;
9673 /* PR 16202: Refectch the addend using the correct size. */
9674 if (globals->use_rel)
9675 addend = bfd_get_16 (input_bfd, hit_data);
9678 /* See comment for R_ARM_ABS8. */
9679 if ((long) value > 0xffff || (long) value < -0x8000)
9680 return bfd_reloc_overflow;
9682 bfd_put_16 (input_bfd, value, hit_data);
9683 return bfd_reloc_ok;
9685 case R_ARM_THM_ABS5:
9686 /* Support ldr and str instructions for the thumb. */
9687 if (globals->use_rel)
9689 /* Need to refetch addend. */
9690 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9691 /* ??? Need to determine shift amount from operand size. */
9692 addend >>= howto->rightshift;
9696 /* ??? Isn't value unsigned? */
9697 if ((long) value > 0x1f || (long) value < -0x10)
9698 return bfd_reloc_overflow;
9700 /* ??? Value needs to be properly shifted into place first. */
9701 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
9702 bfd_put_16 (input_bfd, value, hit_data);
9703 return bfd_reloc_ok;
9705 case R_ARM_THM_ALU_PREL_11_0:
9706 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
9709 bfd_signed_vma relocation;
9711 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9712 | bfd_get_16 (input_bfd, hit_data + 2);
9714 if (globals->use_rel)
9716 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
9717 | ((insn & (1 << 26)) >> 15);
9718 if (insn & 0xf00000)
9719 signed_addend = -signed_addend;
9722 relocation = value + signed_addend;
9723 relocation -= Pa (input_section->output_section->vma
9724 + input_section->output_offset
9729 if (value >= 0x1000)
9730 return bfd_reloc_overflow;
9732 insn = (insn & 0xfb0f8f00) | (value & 0xff)
9733 | ((value & 0x700) << 4)
9734 | ((value & 0x800) << 15);
9738 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9739 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9741 return bfd_reloc_ok;
9745 /* PR 10073: This reloc is not generated by the GNU toolchain,
9746 but it is supported for compatibility with third party libraries
9747 generated by other compilers, specifically the ARM/IAR. */
9750 bfd_signed_vma relocation;
9752 insn = bfd_get_16 (input_bfd, hit_data);
9754 if (globals->use_rel)
9755 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
9757 relocation = value + addend;
9758 relocation -= Pa (input_section->output_section->vma
9759 + input_section->output_offset
9764 /* We do not check for overflow of this reloc. Although strictly
9765 speaking this is incorrect, it appears to be necessary in order
9766 to work with IAR generated relocs. Since GCC and GAS do not
9767 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
9768 a problem for them. */
9771 insn = (insn & 0xff00) | (value >> 2);
9773 bfd_put_16 (input_bfd, insn, hit_data);
9775 return bfd_reloc_ok;
9778 case R_ARM_THM_PC12:
9779 /* Corresponds to: ldr.w reg, [pc, #offset]. */
9782 bfd_signed_vma relocation;
9784 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9785 | bfd_get_16 (input_bfd, hit_data + 2);
9787 if (globals->use_rel)
9789 signed_addend = insn & 0xfff;
9790 if (!(insn & (1 << 23)))
9791 signed_addend = -signed_addend;
9794 relocation = value + signed_addend;
9795 relocation -= Pa (input_section->output_section->vma
9796 + input_section->output_offset
9801 if (value >= 0x1000)
9802 return bfd_reloc_overflow;
9804 insn = (insn & 0xff7ff000) | value;
9805 if (relocation >= 0)
9808 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9809 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9811 return bfd_reloc_ok;
9814 case R_ARM_THM_XPC22:
9815 case R_ARM_THM_CALL:
9816 case R_ARM_THM_JUMP24:
9817 /* Thumb BL (branch long instruction). */
9821 bfd_boolean overflow = FALSE;
9822 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
9823 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
9824 bfd_signed_vma reloc_signed_max;
9825 bfd_signed_vma reloc_signed_min;
9827 bfd_signed_vma signed_check;
9829 const int thumb2 = using_thumb2 (globals);
9831 /* A branch to an undefined weak symbol is turned into a jump to
9832 the next instruction unless a PLT entry will be created.
9833 The jump to the next instruction is optimized as a NOP.W for
9834 Thumb-2 enabled architectures. */
9835 if (h && h->root.type == bfd_link_hash_undefweak
9836 && plt_offset == (bfd_vma) -1)
9840 bfd_put_16 (input_bfd, 0xf3af, hit_data);
9841 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
9845 bfd_put_16 (input_bfd, 0xe000, hit_data);
9846 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
9848 return bfd_reloc_ok;
9851 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
9852 with Thumb-1) involving the J1 and J2 bits. */
9853 if (globals->use_rel)
9855 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
9856 bfd_vma upper = upper_insn & 0x3ff;
9857 bfd_vma lower = lower_insn & 0x7ff;
9858 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
9859 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
9860 bfd_vma i1 = j1 ^ s ? 0 : 1;
9861 bfd_vma i2 = j2 ^ s ? 0 : 1;
9863 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
9865 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
9867 signed_addend = addend;
9870 if (r_type == R_ARM_THM_XPC22)
9872 /* Check for Thumb to Thumb call. */
9873 /* FIXME: Should we translate the instruction into a BL
9874 instruction instead ? */
9875 if (branch_type == ST_BRANCH_TO_THUMB)
9876 (*_bfd_error_handler)
9877 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
9879 h ? h->root.root.string : "(local)");
9883 /* If it is not a call to Thumb, assume call to Arm.
9884 If it is a call relative to a section name, then it is not a
9885 function call at all, but rather a long jump. Calls through
9886 the PLT do not require stubs. */
9887 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
9889 if (globals->use_blx && r_type == R_ARM_THM_CALL)
9891 /* Convert BL to BLX. */
9892 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9894 else if (( r_type != R_ARM_THM_CALL)
9895 && (r_type != R_ARM_THM_JUMP24))
9897 if (elf32_thumb_to_arm_stub
9898 (info, sym_name, input_bfd, output_bfd, input_section,
9899 hit_data, sym_sec, rel->r_offset, signed_addend, value,
9901 return bfd_reloc_ok;
9903 return bfd_reloc_dangerous;
9906 else if (branch_type == ST_BRANCH_TO_THUMB
9908 && r_type == R_ARM_THM_CALL)
9910 /* Make sure this is a BL. */
9911 lower_insn |= 0x1800;
9915 enum elf32_arm_stub_type stub_type = arm_stub_none;
9916 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
9918 /* Check if a stub has to be inserted because the destination
9920 struct elf32_arm_stub_hash_entry *stub_entry;
9921 struct elf32_arm_link_hash_entry *hash;
9923 hash = (struct elf32_arm_link_hash_entry *) h;
9925 stub_type = arm_type_of_stub (info, input_section, rel,
9926 st_type, &branch_type,
9927 hash, value, sym_sec,
9928 input_bfd, sym_name);
9930 if (stub_type != arm_stub_none)
9932 /* The target is out of reach or we are changing modes, so
9933 redirect the branch to the local stub for this
9935 stub_entry = elf32_arm_get_stub_entry (input_section,
9939 if (stub_entry != NULL)
9941 value = (stub_entry->stub_offset
9942 + stub_entry->stub_sec->output_offset
9943 + stub_entry->stub_sec->output_section->vma);
9945 if (plt_offset != (bfd_vma) -1)
9946 *unresolved_reloc_p = FALSE;
9949 /* If this call becomes a call to Arm, force BLX. */
9950 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
9953 && !arm_stub_is_thumb (stub_entry->stub_type))
9954 || branch_type != ST_BRANCH_TO_THUMB)
9955 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9960 /* Handle calls via the PLT. */
9961 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
9963 value = (splt->output_section->vma
9964 + splt->output_offset
9967 if (globals->use_blx
9968 && r_type == R_ARM_THM_CALL
9969 && ! using_thumb_only (globals))
9971 /* If the Thumb BLX instruction is available, convert
9972 the BL to a BLX instruction to call the ARM-mode
9974 lower_insn = (lower_insn & ~0x1000) | 0x0800;
9975 branch_type = ST_BRANCH_TO_ARM;
9979 if (! using_thumb_only (globals))
9980 /* Target the Thumb stub before the ARM PLT entry. */
9981 value -= PLT_THUMB_STUB_SIZE;
9982 branch_type = ST_BRANCH_TO_THUMB;
9984 *unresolved_reloc_p = FALSE;
9987 relocation = value + signed_addend;
9989 relocation -= (input_section->output_section->vma
9990 + input_section->output_offset
9993 check = relocation >> howto->rightshift;
9995 /* If this is a signed value, the rightshift just dropped
9996 leading 1 bits (assuming twos complement). */
9997 if ((bfd_signed_vma) relocation >= 0)
9998 signed_check = check;
10000 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
10002 /* Calculate the permissable maximum and minimum values for
10003 this relocation according to whether we're relocating for
10005 bitsize = howto->bitsize;
10008 reloc_signed_max = (1 << (bitsize - 1)) - 1;
10009 reloc_signed_min = ~reloc_signed_max;
10011 /* Assumes two's complement. */
10012 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10015 if ((lower_insn & 0x5000) == 0x4000)
10016 /* For a BLX instruction, make sure that the relocation is rounded up
10017 to a word boundary. This follows the semantics of the instruction
10018 which specifies that bit 1 of the target address will come from bit
10019 1 of the base address. */
10020 relocation = (relocation + 2) & ~ 3;
10022 /* Put RELOCATION back into the insn. Assumes two's complement.
10023 We use the Thumb-2 encoding, which is safe even if dealing with
10024 a Thumb-1 instruction by virtue of our overflow check above. */
10025 reloc_sign = (signed_check < 0) ? 1 : 0;
10026 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
10027 | ((relocation >> 12) & 0x3ff)
10028 | (reloc_sign << 10);
10029 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
10030 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
10031 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
10032 | ((relocation >> 1) & 0x7ff);
10034 /* Put the relocated value back in the object file: */
10035 bfd_put_16 (input_bfd, upper_insn, hit_data);
10036 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10038 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
10042 case R_ARM_THM_JUMP19:
10043 /* Thumb32 conditional branch instruction. */
10045 bfd_vma relocation;
10046 bfd_boolean overflow = FALSE;
10047 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
10048 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
10049 bfd_signed_vma reloc_signed_max = 0xffffe;
10050 bfd_signed_vma reloc_signed_min = -0x100000;
10051 bfd_signed_vma signed_check;
10052 enum elf32_arm_stub_type stub_type = arm_stub_none;
10053 struct elf32_arm_stub_hash_entry *stub_entry;
10054 struct elf32_arm_link_hash_entry *hash;
10056 /* Need to refetch the addend, reconstruct the top three bits,
10057 and squish the two 11 bit pieces together. */
10058 if (globals->use_rel)
10060 bfd_vma S = (upper_insn & 0x0400) >> 10;
10061 bfd_vma upper = (upper_insn & 0x003f);
10062 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
10063 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
10064 bfd_vma lower = (lower_insn & 0x07ff);
10068 upper |= (!S) << 8;
10069 upper -= 0x0100; /* Sign extend. */
10071 addend = (upper << 12) | (lower << 1);
10072 signed_addend = addend;
10075 /* Handle calls via the PLT. */
10076 if (plt_offset != (bfd_vma) -1)
10078 value = (splt->output_section->vma
10079 + splt->output_offset
10081 /* Target the Thumb stub before the ARM PLT entry. */
10082 value -= PLT_THUMB_STUB_SIZE;
10083 *unresolved_reloc_p = FALSE;
10086 hash = (struct elf32_arm_link_hash_entry *)h;
10088 stub_type = arm_type_of_stub (info, input_section, rel,
10089 st_type, &branch_type,
10090 hash, value, sym_sec,
10091 input_bfd, sym_name);
10092 if (stub_type != arm_stub_none)
10094 stub_entry = elf32_arm_get_stub_entry (input_section,
10098 if (stub_entry != NULL)
10100 value = (stub_entry->stub_offset
10101 + stub_entry->stub_sec->output_offset
10102 + stub_entry->stub_sec->output_section->vma);
10106 relocation = value + signed_addend;
10107 relocation -= (input_section->output_section->vma
10108 + input_section->output_offset
10110 signed_check = (bfd_signed_vma) relocation;
10112 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10115 /* Put RELOCATION back into the insn. */
10117 bfd_vma S = (relocation & 0x00100000) >> 20;
10118 bfd_vma J2 = (relocation & 0x00080000) >> 19;
10119 bfd_vma J1 = (relocation & 0x00040000) >> 18;
10120 bfd_vma hi = (relocation & 0x0003f000) >> 12;
10121 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
10123 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
10124 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
10127 /* Put the relocated value back in the object file: */
10128 bfd_put_16 (input_bfd, upper_insn, hit_data);
10129 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10131 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
10134 case R_ARM_THM_JUMP11:
10135 case R_ARM_THM_JUMP8:
10136 case R_ARM_THM_JUMP6:
10137 /* Thumb B (branch) instruction). */
10139 bfd_signed_vma relocation;
10140 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
10141 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
10142 bfd_signed_vma signed_check;
10144 /* CZB cannot jump backward. */
10145 if (r_type == R_ARM_THM_JUMP6)
10146 reloc_signed_min = 0;
10148 if (globals->use_rel)
10150 /* Need to refetch addend. */
10151 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10152 if (addend & ((howto->src_mask + 1) >> 1))
10154 signed_addend = -1;
10155 signed_addend &= ~ howto->src_mask;
10156 signed_addend |= addend;
10159 signed_addend = addend;
10160 /* The value in the insn has been right shifted. We need to
10161 undo this, so that we can perform the address calculation
10162 in terms of bytes. */
10163 signed_addend <<= howto->rightshift;
10165 relocation = value + signed_addend;
10167 relocation -= (input_section->output_section->vma
10168 + input_section->output_offset
10171 relocation >>= howto->rightshift;
10172 signed_check = relocation;
10174 if (r_type == R_ARM_THM_JUMP6)
10175 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
10177 relocation &= howto->dst_mask;
10178 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
10180 bfd_put_16 (input_bfd, relocation, hit_data);
10182 /* Assumes two's complement. */
10183 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10184 return bfd_reloc_overflow;
10186 return bfd_reloc_ok;
10189 case R_ARM_ALU_PCREL7_0:
10190 case R_ARM_ALU_PCREL15_8:
10191 case R_ARM_ALU_PCREL23_15:
10194 bfd_vma relocation;
10196 insn = bfd_get_32 (input_bfd, hit_data);
10197 if (globals->use_rel)
10199 /* Extract the addend. */
10200 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
10201 signed_addend = addend;
10203 relocation = value + signed_addend;
10205 relocation -= (input_section->output_section->vma
10206 + input_section->output_offset
10208 insn = (insn & ~0xfff)
10209 | ((howto->bitpos << 7) & 0xf00)
10210 | ((relocation >> howto->bitpos) & 0xff);
10211 bfd_put_32 (input_bfd, value, hit_data);
10213 return bfd_reloc_ok;
10215 case R_ARM_GNU_VTINHERIT:
10216 case R_ARM_GNU_VTENTRY:
10217 return bfd_reloc_ok;
10219 case R_ARM_GOTOFF32:
10220 /* Relocation is relative to the start of the
10221 global offset table. */
10223 BFD_ASSERT (sgot != NULL);
10225 return bfd_reloc_notsupported;
10227 /* If we are addressing a Thumb function, we need to adjust the
10228 address by one, so that attempts to call the function pointer will
10229 correctly interpret it as Thumb code. */
10230 if (branch_type == ST_BRANCH_TO_THUMB)
10233 /* Note that sgot->output_offset is not involved in this
10234 calculation. We always want the start of .got. If we
10235 define _GLOBAL_OFFSET_TABLE in a different way, as is
10236 permitted by the ABI, we might have to change this
10238 value -= sgot->output_section->vma;
10239 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10240 contents, rel->r_offset, value,
10244 /* Use global offset table as symbol value. */
10245 BFD_ASSERT (sgot != NULL);
10248 return bfd_reloc_notsupported;
10250 *unresolved_reloc_p = FALSE;
10251 value = sgot->output_section->vma;
10252 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10253 contents, rel->r_offset, value,
10257 case R_ARM_GOT_PREL:
10258 /* Relocation is to the entry for this symbol in the
10259 global offset table. */
10261 return bfd_reloc_notsupported;
10263 if (dynreloc_st_type == STT_GNU_IFUNC
10264 && plt_offset != (bfd_vma) -1
10265 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
10267 /* We have a relocation against a locally-binding STT_GNU_IFUNC
10268 symbol, and the relocation resolves directly to the runtime
10269 target rather than to the .iplt entry. This means that any
10270 .got entry would be the same value as the .igot.plt entry,
10271 so there's no point creating both. */
10272 sgot = globals->root.igotplt;
10273 value = sgot->output_offset + gotplt_offset;
10275 else if (h != NULL)
10279 off = h->got.offset;
10280 BFD_ASSERT (off != (bfd_vma) -1);
10281 if ((off & 1) != 0)
10283 /* We have already processsed one GOT relocation against
10286 if (globals->root.dynamic_sections_created
10287 && !SYMBOL_REFERENCES_LOCAL (info, h))
10288 *unresolved_reloc_p = FALSE;
10292 Elf_Internal_Rela outrel;
10294 if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
10296 /* If the symbol doesn't resolve locally in a static
10297 object, we have an undefined reference. If the
10298 symbol doesn't resolve locally in a dynamic object,
10299 it should be resolved by the dynamic linker. */
10300 if (globals->root.dynamic_sections_created)
10302 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
10303 *unresolved_reloc_p = FALSE;
10307 outrel.r_addend = 0;
10311 if (dynreloc_st_type == STT_GNU_IFUNC)
10312 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10313 else if (bfd_link_pic (info) &&
10314 (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10315 || h->root.type != bfd_link_hash_undefweak))
10316 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10319 outrel.r_addend = dynreloc_value;
10322 /* The GOT entry is initialized to zero by default.
10323 See if we should install a different value. */
10324 if (outrel.r_addend != 0
10325 && (outrel.r_info == 0 || globals->use_rel))
10327 bfd_put_32 (output_bfd, outrel.r_addend,
10328 sgot->contents + off);
10329 outrel.r_addend = 0;
10332 if (outrel.r_info != 0)
10334 outrel.r_offset = (sgot->output_section->vma
10335 + sgot->output_offset
10337 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10339 h->got.offset |= 1;
10341 value = sgot->output_offset + off;
10347 BFD_ASSERT (local_got_offsets != NULL &&
10348 local_got_offsets[r_symndx] != (bfd_vma) -1);
10350 off = local_got_offsets[r_symndx];
10352 /* The offset must always be a multiple of 4. We use the
10353 least significant bit to record whether we have already
10354 generated the necessary reloc. */
10355 if ((off & 1) != 0)
10359 if (globals->use_rel)
10360 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
10362 if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC)
10364 Elf_Internal_Rela outrel;
10366 outrel.r_addend = addend + dynreloc_value;
10367 outrel.r_offset = (sgot->output_section->vma
10368 + sgot->output_offset
10370 if (dynreloc_st_type == STT_GNU_IFUNC)
10371 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10373 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10374 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10377 local_got_offsets[r_symndx] |= 1;
10380 value = sgot->output_offset + off;
10382 if (r_type != R_ARM_GOT32)
10383 value += sgot->output_section->vma;
10385 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10386 contents, rel->r_offset, value,
10389 case R_ARM_TLS_LDO32:
10390 value = value - dtpoff_base (info);
10392 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10393 contents, rel->r_offset, value,
10396 case R_ARM_TLS_LDM32:
10403 off = globals->tls_ldm_got.offset;
10405 if ((off & 1) != 0)
10409 /* If we don't know the module number, create a relocation
10411 if (bfd_link_pic (info))
10413 Elf_Internal_Rela outrel;
10415 if (srelgot == NULL)
10418 outrel.r_addend = 0;
10419 outrel.r_offset = (sgot->output_section->vma
10420 + sgot->output_offset + off);
10421 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
10423 if (globals->use_rel)
10424 bfd_put_32 (output_bfd, outrel.r_addend,
10425 sgot->contents + off);
10427 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10430 bfd_put_32 (output_bfd, 1, sgot->contents + off);
10432 globals->tls_ldm_got.offset |= 1;
10435 value = sgot->output_section->vma + sgot->output_offset + off
10436 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
10438 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10439 contents, rel->r_offset, value,
10443 case R_ARM_TLS_CALL:
10444 case R_ARM_THM_TLS_CALL:
10445 case R_ARM_TLS_GD32:
10446 case R_ARM_TLS_IE32:
10447 case R_ARM_TLS_GOTDESC:
10448 case R_ARM_TLS_DESCSEQ:
10449 case R_ARM_THM_TLS_DESCSEQ:
10451 bfd_vma off, offplt;
10455 BFD_ASSERT (sgot != NULL);
10460 dyn = globals->root.dynamic_sections_created;
10461 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
10462 bfd_link_pic (info),
10464 && (!bfd_link_pic (info)
10465 || !SYMBOL_REFERENCES_LOCAL (info, h)))
10467 *unresolved_reloc_p = FALSE;
10470 off = h->got.offset;
10471 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
10472 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
10476 BFD_ASSERT (local_got_offsets != NULL);
10477 off = local_got_offsets[r_symndx];
10478 offplt = local_tlsdesc_gotents[r_symndx];
10479 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
10482 /* Linker relaxations happens from one of the
10483 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
10484 if (ELF32_R_TYPE(rel->r_info) != r_type)
10485 tls_type = GOT_TLS_IE;
10487 BFD_ASSERT (tls_type != GOT_UNKNOWN);
10489 if ((off & 1) != 0)
10493 bfd_boolean need_relocs = FALSE;
10494 Elf_Internal_Rela outrel;
10497 /* The GOT entries have not been initialized yet. Do it
10498 now, and emit any relocations. If both an IE GOT and a
10499 GD GOT are necessary, we emit the GD first. */
10501 if ((bfd_link_pic (info) || indx != 0)
10503 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10504 || h->root.type != bfd_link_hash_undefweak))
10506 need_relocs = TRUE;
10507 BFD_ASSERT (srelgot != NULL);
10510 if (tls_type & GOT_TLS_GDESC)
10514 /* We should have relaxed, unless this is an undefined
10516 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
10517 || bfd_link_pic (info));
10518 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
10519 <= globals->root.sgotplt->size);
10521 outrel.r_addend = 0;
10522 outrel.r_offset = (globals->root.sgotplt->output_section->vma
10523 + globals->root.sgotplt->output_offset
10525 + globals->sgotplt_jump_table_size);
10527 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
10528 sreloc = globals->root.srelplt;
10529 loc = sreloc->contents;
10530 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
10531 BFD_ASSERT (loc + RELOC_SIZE (globals)
10532 <= sreloc->contents + sreloc->size);
10534 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
10536 /* For globals, the first word in the relocation gets
10537 the relocation index and the top bit set, or zero,
10538 if we're binding now. For locals, it gets the
10539 symbol's offset in the tls section. */
10540 bfd_put_32 (output_bfd,
10541 !h ? value - elf_hash_table (info)->tls_sec->vma
10542 : info->flags & DF_BIND_NOW ? 0
10543 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
10544 globals->root.sgotplt->contents + offplt
10545 + globals->sgotplt_jump_table_size);
10547 /* Second word in the relocation is always zero. */
10548 bfd_put_32 (output_bfd, 0,
10549 globals->root.sgotplt->contents + offplt
10550 + globals->sgotplt_jump_table_size + 4);
10552 if (tls_type & GOT_TLS_GD)
10556 outrel.r_addend = 0;
10557 outrel.r_offset = (sgot->output_section->vma
10558 + sgot->output_offset
10560 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
10562 if (globals->use_rel)
10563 bfd_put_32 (output_bfd, outrel.r_addend,
10564 sgot->contents + cur_off);
10566 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10569 bfd_put_32 (output_bfd, value - dtpoff_base (info),
10570 sgot->contents + cur_off + 4);
10573 outrel.r_addend = 0;
10574 outrel.r_info = ELF32_R_INFO (indx,
10575 R_ARM_TLS_DTPOFF32);
10576 outrel.r_offset += 4;
10578 if (globals->use_rel)
10579 bfd_put_32 (output_bfd, outrel.r_addend,
10580 sgot->contents + cur_off + 4);
10582 elf32_arm_add_dynreloc (output_bfd, info,
10588 /* If we are not emitting relocations for a
10589 general dynamic reference, then we must be in a
10590 static link or an executable link with the
10591 symbol binding locally. Mark it as belonging
10592 to module 1, the executable. */
10593 bfd_put_32 (output_bfd, 1,
10594 sgot->contents + cur_off);
10595 bfd_put_32 (output_bfd, value - dtpoff_base (info),
10596 sgot->contents + cur_off + 4);
10602 if (tls_type & GOT_TLS_IE)
10607 outrel.r_addend = value - dtpoff_base (info);
10609 outrel.r_addend = 0;
10610 outrel.r_offset = (sgot->output_section->vma
10611 + sgot->output_offset
10613 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
10615 if (globals->use_rel)
10616 bfd_put_32 (output_bfd, outrel.r_addend,
10617 sgot->contents + cur_off);
10619 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10622 bfd_put_32 (output_bfd, tpoff (info, value),
10623 sgot->contents + cur_off);
10628 h->got.offset |= 1;
10630 local_got_offsets[r_symndx] |= 1;
10633 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
10635 else if (tls_type & GOT_TLS_GDESC)
10638 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
10639 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
10641 bfd_signed_vma offset;
10642 /* TLS stubs are arm mode. The original symbol is a
10643 data object, so branch_type is bogus. */
10644 branch_type = ST_BRANCH_TO_ARM;
10645 enum elf32_arm_stub_type stub_type
10646 = arm_type_of_stub (info, input_section, rel,
10647 st_type, &branch_type,
10648 (struct elf32_arm_link_hash_entry *)h,
10649 globals->tls_trampoline, globals->root.splt,
10650 input_bfd, sym_name);
10652 if (stub_type != arm_stub_none)
10654 struct elf32_arm_stub_hash_entry *stub_entry
10655 = elf32_arm_get_stub_entry
10656 (input_section, globals->root.splt, 0, rel,
10657 globals, stub_type);
10658 offset = (stub_entry->stub_offset
10659 + stub_entry->stub_sec->output_offset
10660 + stub_entry->stub_sec->output_section->vma);
10663 offset = (globals->root.splt->output_section->vma
10664 + globals->root.splt->output_offset
10665 + globals->tls_trampoline);
10667 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
10669 unsigned long inst;
10671 offset -= (input_section->output_section->vma
10672 + input_section->output_offset
10673 + rel->r_offset + 8);
10675 inst = offset >> 2;
10676 inst &= 0x00ffffff;
10677 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
10681 /* Thumb blx encodes the offset in a complicated
10683 unsigned upper_insn, lower_insn;
10686 offset -= (input_section->output_section->vma
10687 + input_section->output_offset
10688 + rel->r_offset + 4);
10690 if (stub_type != arm_stub_none
10691 && arm_stub_is_thumb (stub_type))
10693 lower_insn = 0xd000;
10697 lower_insn = 0xc000;
10698 /* Round up the offset to a word boundary. */
10699 offset = (offset + 2) & ~2;
10703 upper_insn = (0xf000
10704 | ((offset >> 12) & 0x3ff)
10706 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
10707 | (((!((offset >> 22) & 1)) ^ neg) << 11)
10708 | ((offset >> 1) & 0x7ff);
10709 bfd_put_16 (input_bfd, upper_insn, hit_data);
10710 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10711 return bfd_reloc_ok;
10714 /* These relocations needs special care, as besides the fact
10715 they point somewhere in .gotplt, the addend must be
10716 adjusted accordingly depending on the type of instruction
10718 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
10720 unsigned long data, insn;
10723 data = bfd_get_32 (input_bfd, hit_data);
10729 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
10730 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10731 insn = (insn << 16)
10732 | bfd_get_16 (input_bfd,
10733 contents + rel->r_offset - data + 2);
10734 if ((insn & 0xf800c000) == 0xf000c000)
10737 else if ((insn & 0xffffff00) == 0x4400)
10742 (*_bfd_error_handler)
10743 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
10744 input_bfd, input_section,
10745 (unsigned long)rel->r_offset, insn);
10746 return bfd_reloc_notsupported;
10751 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
10753 switch (insn >> 24)
10755 case 0xeb: /* bl */
10756 case 0xfa: /* blx */
10760 case 0xe0: /* add */
10765 (*_bfd_error_handler)
10766 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
10767 input_bfd, input_section,
10768 (unsigned long)rel->r_offset, insn);
10769 return bfd_reloc_notsupported;
10773 value += ((globals->root.sgotplt->output_section->vma
10774 + globals->root.sgotplt->output_offset + off)
10775 - (input_section->output_section->vma
10776 + input_section->output_offset
10778 + globals->sgotplt_jump_table_size);
10781 value = ((globals->root.sgot->output_section->vma
10782 + globals->root.sgot->output_offset + off)
10783 - (input_section->output_section->vma
10784 + input_section->output_offset + rel->r_offset));
10786 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10787 contents, rel->r_offset, value,
10791 case R_ARM_TLS_LE32:
10792 if (bfd_link_dll (info))
10794 (*_bfd_error_handler)
10795 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
10796 input_bfd, input_section,
10797 (long) rel->r_offset, howto->name);
10798 return bfd_reloc_notsupported;
10801 value = tpoff (info, value);
10803 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10804 contents, rel->r_offset, value,
10808 if (globals->fix_v4bx)
10810 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10812 /* Ensure that we have a BX instruction. */
10813 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
10815 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
10817 /* Branch to veneer. */
10819 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
10820 glue_addr -= input_section->output_section->vma
10821 + input_section->output_offset
10822 + rel->r_offset + 8;
10823 insn = (insn & 0xf0000000) | 0x0a000000
10824 | ((glue_addr >> 2) & 0x00ffffff);
10828 /* Preserve Rm (lowest four bits) and the condition code
10829 (highest four bits). Other bits encode MOV PC,Rm. */
10830 insn = (insn & 0xf000000f) | 0x01a0f000;
10833 bfd_put_32 (input_bfd, insn, hit_data);
10835 return bfd_reloc_ok;
10837 case R_ARM_MOVW_ABS_NC:
10838 case R_ARM_MOVT_ABS:
10839 case R_ARM_MOVW_PREL_NC:
10840 case R_ARM_MOVT_PREL:
10841 /* Until we properly support segment-base-relative addressing then
10842 we assume the segment base to be zero, as for the group relocations.
10843 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
10844 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
10845 case R_ARM_MOVW_BREL_NC:
10846 case R_ARM_MOVW_BREL:
10847 case R_ARM_MOVT_BREL:
10849 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10851 if (globals->use_rel)
10853 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
10854 signed_addend = (addend ^ 0x8000) - 0x8000;
10857 value += signed_addend;
10859 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
10860 value -= (input_section->output_section->vma
10861 + input_section->output_offset + rel->r_offset);
10863 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
10864 return bfd_reloc_overflow;
10866 if (branch_type == ST_BRANCH_TO_THUMB)
10869 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
10870 || r_type == R_ARM_MOVT_BREL)
10873 insn &= 0xfff0f000;
10874 insn |= value & 0xfff;
10875 insn |= (value & 0xf000) << 4;
10876 bfd_put_32 (input_bfd, insn, hit_data);
10878 return bfd_reloc_ok;
10880 case R_ARM_THM_MOVW_ABS_NC:
10881 case R_ARM_THM_MOVT_ABS:
10882 case R_ARM_THM_MOVW_PREL_NC:
10883 case R_ARM_THM_MOVT_PREL:
10884 /* Until we properly support segment-base-relative addressing then
10885 we assume the segment base to be zero, as for the above relocations.
10886 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
10887 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
10888 as R_ARM_THM_MOVT_ABS. */
10889 case R_ARM_THM_MOVW_BREL_NC:
10890 case R_ARM_THM_MOVW_BREL:
10891 case R_ARM_THM_MOVT_BREL:
10895 insn = bfd_get_16 (input_bfd, hit_data) << 16;
10896 insn |= bfd_get_16 (input_bfd, hit_data + 2);
10898 if (globals->use_rel)
10900 addend = ((insn >> 4) & 0xf000)
10901 | ((insn >> 15) & 0x0800)
10902 | ((insn >> 4) & 0x0700)
10904 signed_addend = (addend ^ 0x8000) - 0x8000;
10907 value += signed_addend;
10909 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
10910 value -= (input_section->output_section->vma
10911 + input_section->output_offset + rel->r_offset);
10913 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
10914 return bfd_reloc_overflow;
10916 if (branch_type == ST_BRANCH_TO_THUMB)
10919 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
10920 || r_type == R_ARM_THM_MOVT_BREL)
10923 insn &= 0xfbf08f00;
10924 insn |= (value & 0xf000) << 4;
10925 insn |= (value & 0x0800) << 15;
10926 insn |= (value & 0x0700) << 4;
10927 insn |= (value & 0x00ff);
10929 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10930 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10932 return bfd_reloc_ok;
10934 case R_ARM_ALU_PC_G0_NC:
10935 case R_ARM_ALU_PC_G1_NC:
10936 case R_ARM_ALU_PC_G0:
10937 case R_ARM_ALU_PC_G1:
10938 case R_ARM_ALU_PC_G2:
10939 case R_ARM_ALU_SB_G0_NC:
10940 case R_ARM_ALU_SB_G1_NC:
10941 case R_ARM_ALU_SB_G0:
10942 case R_ARM_ALU_SB_G1:
10943 case R_ARM_ALU_SB_G2:
10945 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10946 bfd_vma pc = input_section->output_section->vma
10947 + input_section->output_offset + rel->r_offset;
10948 /* sb is the origin of the *segment* containing the symbol. */
10949 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10952 bfd_signed_vma signed_value;
10955 /* Determine which group of bits to select. */
10958 case R_ARM_ALU_PC_G0_NC:
10959 case R_ARM_ALU_PC_G0:
10960 case R_ARM_ALU_SB_G0_NC:
10961 case R_ARM_ALU_SB_G0:
10965 case R_ARM_ALU_PC_G1_NC:
10966 case R_ARM_ALU_PC_G1:
10967 case R_ARM_ALU_SB_G1_NC:
10968 case R_ARM_ALU_SB_G1:
10972 case R_ARM_ALU_PC_G2:
10973 case R_ARM_ALU_SB_G2:
10981 /* If REL, extract the addend from the insn. If RELA, it will
10982 have already been fetched for us. */
10983 if (globals->use_rel)
10986 bfd_vma constant = insn & 0xff;
10987 bfd_vma rotation = (insn & 0xf00) >> 8;
10990 signed_addend = constant;
10993 /* Compensate for the fact that in the instruction, the
10994 rotation is stored in multiples of 2 bits. */
10997 /* Rotate "constant" right by "rotation" bits. */
10998 signed_addend = (constant >> rotation) |
10999 (constant << (8 * sizeof (bfd_vma) - rotation));
11002 /* Determine if the instruction is an ADD or a SUB.
11003 (For REL, this determines the sign of the addend.) */
11004 negative = identify_add_or_sub (insn);
11007 (*_bfd_error_handler)
11008 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
11009 input_bfd, input_section,
11010 (long) rel->r_offset, howto->name);
11011 return bfd_reloc_overflow;
11014 signed_addend *= negative;
11017 /* Compute the value (X) to go in the place. */
11018 if (r_type == R_ARM_ALU_PC_G0_NC
11019 || r_type == R_ARM_ALU_PC_G1_NC
11020 || r_type == R_ARM_ALU_PC_G0
11021 || r_type == R_ARM_ALU_PC_G1
11022 || r_type == R_ARM_ALU_PC_G2)
11024 signed_value = value - pc + signed_addend;
11026 /* Section base relative. */
11027 signed_value = value - sb + signed_addend;
11029 /* If the target symbol is a Thumb function, then set the
11030 Thumb bit in the address. */
11031 if (branch_type == ST_BRANCH_TO_THUMB)
11034 /* Calculate the value of the relevant G_n, in encoded
11035 constant-with-rotation format. */
11036 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11039 /* Check for overflow if required. */
11040 if ((r_type == R_ARM_ALU_PC_G0
11041 || r_type == R_ARM_ALU_PC_G1
11042 || r_type == R_ARM_ALU_PC_G2
11043 || r_type == R_ARM_ALU_SB_G0
11044 || r_type == R_ARM_ALU_SB_G1
11045 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
11047 (*_bfd_error_handler)
11048 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11049 input_bfd, input_section,
11050 (long) rel->r_offset, signed_value < 0 ? - signed_value : signed_value,
11052 return bfd_reloc_overflow;
11055 /* Mask out the value and the ADD/SUB part of the opcode; take care
11056 not to destroy the S bit. */
11057 insn &= 0xff1ff000;
11059 /* Set the opcode according to whether the value to go in the
11060 place is negative. */
11061 if (signed_value < 0)
11066 /* Encode the offset. */
11069 bfd_put_32 (input_bfd, insn, hit_data);
11071 return bfd_reloc_ok;
11073 case R_ARM_LDR_PC_G0:
11074 case R_ARM_LDR_PC_G1:
11075 case R_ARM_LDR_PC_G2:
11076 case R_ARM_LDR_SB_G0:
11077 case R_ARM_LDR_SB_G1:
11078 case R_ARM_LDR_SB_G2:
11080 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11081 bfd_vma pc = input_section->output_section->vma
11082 + input_section->output_offset + rel->r_offset;
11083 /* sb is the origin of the *segment* containing the symbol. */
11084 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11086 bfd_signed_vma signed_value;
11089 /* Determine which groups of bits to calculate. */
11092 case R_ARM_LDR_PC_G0:
11093 case R_ARM_LDR_SB_G0:
11097 case R_ARM_LDR_PC_G1:
11098 case R_ARM_LDR_SB_G1:
11102 case R_ARM_LDR_PC_G2:
11103 case R_ARM_LDR_SB_G2:
11111 /* If REL, extract the addend from the insn. If RELA, it will
11112 have already been fetched for us. */
11113 if (globals->use_rel)
11115 int negative = (insn & (1 << 23)) ? 1 : -1;
11116 signed_addend = negative * (insn & 0xfff);
11119 /* Compute the value (X) to go in the place. */
11120 if (r_type == R_ARM_LDR_PC_G0
11121 || r_type == R_ARM_LDR_PC_G1
11122 || r_type == R_ARM_LDR_PC_G2)
11124 signed_value = value - pc + signed_addend;
11126 /* Section base relative. */
11127 signed_value = value - sb + signed_addend;
11129 /* Calculate the value of the relevant G_{n-1} to obtain
11130 the residual at that stage. */
11131 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11132 group - 1, &residual);
11134 /* Check for overflow. */
11135 if (residual >= 0x1000)
11137 (*_bfd_error_handler)
11138 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11139 input_bfd, input_section,
11140 (long) rel->r_offset, labs (signed_value), howto->name);
11141 return bfd_reloc_overflow;
11144 /* Mask out the value and U bit. */
11145 insn &= 0xff7ff000;
11147 /* Set the U bit if the value to go in the place is non-negative. */
11148 if (signed_value >= 0)
11151 /* Encode the offset. */
11154 bfd_put_32 (input_bfd, insn, hit_data);
11156 return bfd_reloc_ok;
11158 case R_ARM_LDRS_PC_G0:
11159 case R_ARM_LDRS_PC_G1:
11160 case R_ARM_LDRS_PC_G2:
11161 case R_ARM_LDRS_SB_G0:
11162 case R_ARM_LDRS_SB_G1:
11163 case R_ARM_LDRS_SB_G2:
11165 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11166 bfd_vma pc = input_section->output_section->vma
11167 + input_section->output_offset + rel->r_offset;
11168 /* sb is the origin of the *segment* containing the symbol. */
11169 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11171 bfd_signed_vma signed_value;
11174 /* Determine which groups of bits to calculate. */
11177 case R_ARM_LDRS_PC_G0:
11178 case R_ARM_LDRS_SB_G0:
11182 case R_ARM_LDRS_PC_G1:
11183 case R_ARM_LDRS_SB_G1:
11187 case R_ARM_LDRS_PC_G2:
11188 case R_ARM_LDRS_SB_G2:
11196 /* If REL, extract the addend from the insn. If RELA, it will
11197 have already been fetched for us. */
11198 if (globals->use_rel)
11200 int negative = (insn & (1 << 23)) ? 1 : -1;
11201 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
11204 /* Compute the value (X) to go in the place. */
11205 if (r_type == R_ARM_LDRS_PC_G0
11206 || r_type == R_ARM_LDRS_PC_G1
11207 || r_type == R_ARM_LDRS_PC_G2)
11209 signed_value = value - pc + signed_addend;
11211 /* Section base relative. */
11212 signed_value = value - sb + signed_addend;
11214 /* Calculate the value of the relevant G_{n-1} to obtain
11215 the residual at that stage. */
11216 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11217 group - 1, &residual);
11219 /* Check for overflow. */
11220 if (residual >= 0x100)
11222 (*_bfd_error_handler)
11223 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11224 input_bfd, input_section,
11225 (long) rel->r_offset, labs (signed_value), howto->name);
11226 return bfd_reloc_overflow;
11229 /* Mask out the value and U bit. */
11230 insn &= 0xff7ff0f0;
11232 /* Set the U bit if the value to go in the place is non-negative. */
11233 if (signed_value >= 0)
11236 /* Encode the offset. */
11237 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
11239 bfd_put_32 (input_bfd, insn, hit_data);
11241 return bfd_reloc_ok;
11243 case R_ARM_LDC_PC_G0:
11244 case R_ARM_LDC_PC_G1:
11245 case R_ARM_LDC_PC_G2:
11246 case R_ARM_LDC_SB_G0:
11247 case R_ARM_LDC_SB_G1:
11248 case R_ARM_LDC_SB_G2:
11250 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11251 bfd_vma pc = input_section->output_section->vma
11252 + input_section->output_offset + rel->r_offset;
11253 /* sb is the origin of the *segment* containing the symbol. */
11254 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11256 bfd_signed_vma signed_value;
11259 /* Determine which groups of bits to calculate. */
11262 case R_ARM_LDC_PC_G0:
11263 case R_ARM_LDC_SB_G0:
11267 case R_ARM_LDC_PC_G1:
11268 case R_ARM_LDC_SB_G1:
11272 case R_ARM_LDC_PC_G2:
11273 case R_ARM_LDC_SB_G2:
11281 /* If REL, extract the addend from the insn. If RELA, it will
11282 have already been fetched for us. */
11283 if (globals->use_rel)
11285 int negative = (insn & (1 << 23)) ? 1 : -1;
11286 signed_addend = negative * ((insn & 0xff) << 2);
11289 /* Compute the value (X) to go in the place. */
11290 if (r_type == R_ARM_LDC_PC_G0
11291 || r_type == R_ARM_LDC_PC_G1
11292 || r_type == R_ARM_LDC_PC_G2)
11294 signed_value = value - pc + signed_addend;
11296 /* Section base relative. */
11297 signed_value = value - sb + signed_addend;
11299 /* Calculate the value of the relevant G_{n-1} to obtain
11300 the residual at that stage. */
11301 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11302 group - 1, &residual);
11304 /* Check for overflow. (The absolute value to go in the place must be
11305 divisible by four and, after having been divided by four, must
11306 fit in eight bits.) */
11307 if ((residual & 0x3) != 0 || residual >= 0x400)
11309 (*_bfd_error_handler)
11310 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11311 input_bfd, input_section,
11312 (long) rel->r_offset, labs (signed_value), howto->name);
11313 return bfd_reloc_overflow;
11316 /* Mask out the value and U bit. */
11317 insn &= 0xff7fff00;
11319 /* Set the U bit if the value to go in the place is non-negative. */
11320 if (signed_value >= 0)
11323 /* Encode the offset. */
11324 insn |= residual >> 2;
11326 bfd_put_32 (input_bfd, insn, hit_data);
11328 return bfd_reloc_ok;
11330 case R_ARM_THM_ALU_ABS_G0_NC:
11331 case R_ARM_THM_ALU_ABS_G1_NC:
11332 case R_ARM_THM_ALU_ABS_G2_NC:
11333 case R_ARM_THM_ALU_ABS_G3_NC:
11335 const int shift_array[4] = {0, 8, 16, 24};
11336 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
11337 bfd_vma addr = value;
11338 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
11340 /* Compute address. */
11341 if (globals->use_rel)
11342 signed_addend = insn & 0xff;
11343 addr += signed_addend;
11344 if (branch_type == ST_BRANCH_TO_THUMB)
11346 /* Clean imm8 insn. */
11348 /* And update with correct part of address. */
11349 insn |= (addr >> shift) & 0xff;
11351 bfd_put_16 (input_bfd, insn, hit_data);
11354 *unresolved_reloc_p = FALSE;
11355 return bfd_reloc_ok;
11358 return bfd_reloc_notsupported;
11362 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
11364 arm_add_to_rel (bfd * abfd,
11365 bfd_byte * address,
11366 reloc_howto_type * howto,
11367 bfd_signed_vma increment)
11369 bfd_signed_vma addend;
11371 if (howto->type == R_ARM_THM_CALL
11372 || howto->type == R_ARM_THM_JUMP24)
11374 int upper_insn, lower_insn;
11377 upper_insn = bfd_get_16 (abfd, address);
11378 lower_insn = bfd_get_16 (abfd, address + 2);
11379 upper = upper_insn & 0x7ff;
11380 lower = lower_insn & 0x7ff;
11382 addend = (upper << 12) | (lower << 1);
11383 addend += increment;
11386 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
11387 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
11389 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
11390 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
11396 contents = bfd_get_32 (abfd, address);
11398 /* Get the (signed) value from the instruction. */
11399 addend = contents & howto->src_mask;
11400 if (addend & ((howto->src_mask + 1) >> 1))
11402 bfd_signed_vma mask;
11405 mask &= ~ howto->src_mask;
11409 /* Add in the increment, (which is a byte value). */
11410 switch (howto->type)
11413 addend += increment;
11420 addend <<= howto->size;
11421 addend += increment;
11423 /* Should we check for overflow here ? */
11425 /* Drop any undesired bits. */
11426 addend >>= howto->rightshift;
11430 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
11432 bfd_put_32 (abfd, contents, address);
11436 #define IS_ARM_TLS_RELOC(R_TYPE) \
11437 ((R_TYPE) == R_ARM_TLS_GD32 \
11438 || (R_TYPE) == R_ARM_TLS_LDO32 \
11439 || (R_TYPE) == R_ARM_TLS_LDM32 \
11440 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
11441 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
11442 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
11443 || (R_TYPE) == R_ARM_TLS_LE32 \
11444 || (R_TYPE) == R_ARM_TLS_IE32 \
11445 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
11447 /* Specific set of relocations for the gnu tls dialect. */
11448 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
11449 ((R_TYPE) == R_ARM_TLS_GOTDESC \
11450 || (R_TYPE) == R_ARM_TLS_CALL \
11451 || (R_TYPE) == R_ARM_THM_TLS_CALL \
11452 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
11453 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
11455 /* Relocate an ARM ELF section. */
11458 elf32_arm_relocate_section (bfd * output_bfd,
11459 struct bfd_link_info * info,
11461 asection * input_section,
11462 bfd_byte * contents,
11463 Elf_Internal_Rela * relocs,
11464 Elf_Internal_Sym * local_syms,
11465 asection ** local_sections)
11467 Elf_Internal_Shdr *symtab_hdr;
11468 struct elf_link_hash_entry **sym_hashes;
11469 Elf_Internal_Rela *rel;
11470 Elf_Internal_Rela *relend;
11472 struct elf32_arm_link_hash_table * globals;
11474 globals = elf32_arm_hash_table (info);
11475 if (globals == NULL)
11478 symtab_hdr = & elf_symtab_hdr (input_bfd);
11479 sym_hashes = elf_sym_hashes (input_bfd);
11482 relend = relocs + input_section->reloc_count;
11483 for (; rel < relend; rel++)
11486 reloc_howto_type * howto;
11487 unsigned long r_symndx;
11488 Elf_Internal_Sym * sym;
11490 struct elf_link_hash_entry * h;
11491 bfd_vma relocation;
11492 bfd_reloc_status_type r;
11495 bfd_boolean unresolved_reloc = FALSE;
11496 char *error_message = NULL;
11498 r_symndx = ELF32_R_SYM (rel->r_info);
11499 r_type = ELF32_R_TYPE (rel->r_info);
11500 r_type = arm_real_reloc_type (globals, r_type);
11502 if ( r_type == R_ARM_GNU_VTENTRY
11503 || r_type == R_ARM_GNU_VTINHERIT)
11506 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
11507 howto = bfd_reloc.howto;
11513 if (r_symndx < symtab_hdr->sh_info)
11515 sym = local_syms + r_symndx;
11516 sym_type = ELF32_ST_TYPE (sym->st_info);
11517 sec = local_sections[r_symndx];
11519 /* An object file might have a reference to a local
11520 undefined symbol. This is a daft object file, but we
11521 should at least do something about it. V4BX & NONE
11522 relocations do not use the symbol and are explicitly
11523 allowed to use the undefined symbol, so allow those.
11524 Likewise for relocations against STN_UNDEF. */
11525 if (r_type != R_ARM_V4BX
11526 && r_type != R_ARM_NONE
11527 && r_symndx != STN_UNDEF
11528 && bfd_is_und_section (sec)
11529 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
11530 (*info->callbacks->undefined_symbol)
11531 (info, bfd_elf_string_from_elf_section
11532 (input_bfd, symtab_hdr->sh_link, sym->st_name),
11533 input_bfd, input_section,
11534 rel->r_offset, TRUE);
11536 if (globals->use_rel)
11538 relocation = (sec->output_section->vma
11539 + sec->output_offset
11541 if (!bfd_link_relocatable (info)
11542 && (sec->flags & SEC_MERGE)
11543 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11546 bfd_vma addend, value;
11550 case R_ARM_MOVW_ABS_NC:
11551 case R_ARM_MOVT_ABS:
11552 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11553 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
11554 addend = (addend ^ 0x8000) - 0x8000;
11557 case R_ARM_THM_MOVW_ABS_NC:
11558 case R_ARM_THM_MOVT_ABS:
11559 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
11561 value |= bfd_get_16 (input_bfd,
11562 contents + rel->r_offset + 2);
11563 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
11564 | ((value & 0x04000000) >> 15);
11565 addend = (addend ^ 0x8000) - 0x8000;
11569 if (howto->rightshift
11570 || (howto->src_mask & (howto->src_mask + 1)))
11572 (*_bfd_error_handler)
11573 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
11574 input_bfd, input_section,
11575 (long) rel->r_offset, howto->name);
11579 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11581 /* Get the (signed) value from the instruction. */
11582 addend = value & howto->src_mask;
11583 if (addend & ((howto->src_mask + 1) >> 1))
11585 bfd_signed_vma mask;
11588 mask &= ~ howto->src_mask;
11596 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
11598 addend += msec->output_section->vma + msec->output_offset;
11600 /* Cases here must match those in the preceding
11601 switch statement. */
11604 case R_ARM_MOVW_ABS_NC:
11605 case R_ARM_MOVT_ABS:
11606 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
11607 | (addend & 0xfff);
11608 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11611 case R_ARM_THM_MOVW_ABS_NC:
11612 case R_ARM_THM_MOVT_ABS:
11613 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
11614 | (addend & 0xff) | ((addend & 0x0800) << 15);
11615 bfd_put_16 (input_bfd, value >> 16,
11616 contents + rel->r_offset);
11617 bfd_put_16 (input_bfd, value,
11618 contents + rel->r_offset + 2);
11622 value = (value & ~ howto->dst_mask)
11623 | (addend & howto->dst_mask);
11624 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11630 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
11634 bfd_boolean warned, ignored;
11636 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
11637 r_symndx, symtab_hdr, sym_hashes,
11638 h, sec, relocation,
11639 unresolved_reloc, warned, ignored);
11641 sym_type = h->type;
11644 if (sec != NULL && discarded_section (sec))
11645 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
11646 rel, 1, relend, howto, 0, contents);
11648 if (bfd_link_relocatable (info))
11650 /* This is a relocatable link. We don't have to change
11651 anything, unless the reloc is against a section symbol,
11652 in which case we have to adjust according to where the
11653 section symbol winds up in the output section. */
11654 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11656 if (globals->use_rel)
11657 arm_add_to_rel (input_bfd, contents + rel->r_offset,
11658 howto, (bfd_signed_vma) sec->output_offset);
11660 rel->r_addend += sec->output_offset;
11666 name = h->root.root.string;
11669 name = (bfd_elf_string_from_elf_section
11670 (input_bfd, symtab_hdr->sh_link, sym->st_name));
11671 if (name == NULL || *name == '\0')
11672 name = bfd_section_name (input_bfd, sec);
11675 if (r_symndx != STN_UNDEF
11676 && r_type != R_ARM_NONE
11678 || h->root.type == bfd_link_hash_defined
11679 || h->root.type == bfd_link_hash_defweak)
11680 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
11682 (*_bfd_error_handler)
11683 ((sym_type == STT_TLS
11684 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
11685 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
11688 (long) rel->r_offset,
11693 /* We call elf32_arm_final_link_relocate unless we're completely
11694 done, i.e., the relaxation produced the final output we want,
11695 and we won't let anybody mess with it. Also, we have to do
11696 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
11697 both in relaxed and non-relaxed cases. */
11698 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
11699 || (IS_ARM_TLS_GNU_RELOC (r_type)
11700 && !((h ? elf32_arm_hash_entry (h)->tls_type :
11701 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
11704 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
11705 contents, rel, h == NULL);
11706 /* This may have been marked unresolved because it came from
11707 a shared library. But we've just dealt with that. */
11708 unresolved_reloc = 0;
11711 r = bfd_reloc_continue;
11713 if (r == bfd_reloc_continue)
11715 unsigned char branch_type =
11716 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
11717 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
11719 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
11720 input_section, contents, rel,
11721 relocation, info, sec, name,
11722 sym_type, branch_type, h,
11727 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
11728 because such sections are not SEC_ALLOC and thus ld.so will
11729 not process them. */
11730 if (unresolved_reloc
11731 && !((input_section->flags & SEC_DEBUGGING) != 0
11733 && _bfd_elf_section_offset (output_bfd, info, input_section,
11734 rel->r_offset) != (bfd_vma) -1)
11736 (*_bfd_error_handler)
11737 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
11740 (long) rel->r_offset,
11742 h->root.root.string);
11746 if (r != bfd_reloc_ok)
11750 case bfd_reloc_overflow:
11751 /* If the overflowing reloc was to an undefined symbol,
11752 we have already printed one error message and there
11753 is no point complaining again. */
11754 if (!h || h->root.type != bfd_link_hash_undefined)
11755 (*info->callbacks->reloc_overflow)
11756 (info, (h ? &h->root : NULL), name, howto->name,
11757 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
11760 case bfd_reloc_undefined:
11761 (*info->callbacks->undefined_symbol)
11762 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
11765 case bfd_reloc_outofrange:
11766 error_message = _("out of range");
11769 case bfd_reloc_notsupported:
11770 error_message = _("unsupported relocation");
11773 case bfd_reloc_dangerous:
11774 /* error_message should already be set. */
11778 error_message = _("unknown error");
11779 /* Fall through. */
11782 BFD_ASSERT (error_message != NULL);
11783 (*info->callbacks->reloc_dangerous)
11784 (info, error_message, input_bfd, input_section, rel->r_offset);
11793 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
11794 adds the edit to the start of the list. (The list must be built in order of
11795 ascending TINDEX: the function's callers are primarily responsible for
11796 maintaining that condition). */
11799 add_unwind_table_edit (arm_unwind_table_edit **head,
11800 arm_unwind_table_edit **tail,
11801 arm_unwind_edit_type type,
11802 asection *linked_section,
11803 unsigned int tindex)
11805 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
11806 xmalloc (sizeof (arm_unwind_table_edit));
11808 new_edit->type = type;
11809 new_edit->linked_section = linked_section;
11810 new_edit->index = tindex;
11814 new_edit->next = NULL;
11817 (*tail)->next = new_edit;
11819 (*tail) = new_edit;
11822 (*head) = new_edit;
11826 new_edit->next = *head;
11835 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
11837 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
11839 adjust_exidx_size(asection *exidx_sec, int adjust)
11843 if (!exidx_sec->rawsize)
11844 exidx_sec->rawsize = exidx_sec->size;
11846 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
11847 out_sec = exidx_sec->output_section;
11848 /* Adjust size of output section. */
11849 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
11852 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
11854 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
11856 struct _arm_elf_section_data *exidx_arm_data;
11858 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11859 add_unwind_table_edit (
11860 &exidx_arm_data->u.exidx.unwind_edit_list,
11861 &exidx_arm_data->u.exidx.unwind_edit_tail,
11862 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
11864 exidx_arm_data->additional_reloc_count++;
11866 adjust_exidx_size(exidx_sec, 8);
11869 /* Scan .ARM.exidx tables, and create a list describing edits which should be
11870 made to those tables, such that:
11872 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
11873 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
11874 codes which have been inlined into the index).
11876 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
11878 The edits are applied when the tables are written
11879 (in elf32_arm_write_section). */
11882 elf32_arm_fix_exidx_coverage (asection **text_section_order,
11883 unsigned int num_text_sections,
11884 struct bfd_link_info *info,
11885 bfd_boolean merge_exidx_entries)
11888 unsigned int last_second_word = 0, i;
11889 asection *last_exidx_sec = NULL;
11890 asection *last_text_sec = NULL;
11891 int last_unwind_type = -1;
11893 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
11895 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
11899 for (sec = inp->sections; sec != NULL; sec = sec->next)
11901 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
11902 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
11904 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
11907 if (elf_sec->linked_to)
11909 Elf_Internal_Shdr *linked_hdr
11910 = &elf_section_data (elf_sec->linked_to)->this_hdr;
11911 struct _arm_elf_section_data *linked_sec_arm_data
11912 = get_arm_elf_section_data (linked_hdr->bfd_section);
11914 if (linked_sec_arm_data == NULL)
11917 /* Link this .ARM.exidx section back from the text section it
11919 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
11924 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
11925 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
11926 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
11928 for (i = 0; i < num_text_sections; i++)
11930 asection *sec = text_section_order[i];
11931 asection *exidx_sec;
11932 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
11933 struct _arm_elf_section_data *exidx_arm_data;
11934 bfd_byte *contents = NULL;
11935 int deleted_exidx_bytes = 0;
11937 arm_unwind_table_edit *unwind_edit_head = NULL;
11938 arm_unwind_table_edit *unwind_edit_tail = NULL;
11939 Elf_Internal_Shdr *hdr;
11942 if (arm_data == NULL)
11945 exidx_sec = arm_data->u.text.arm_exidx_sec;
11946 if (exidx_sec == NULL)
11948 /* Section has no unwind data. */
11949 if (last_unwind_type == 0 || !last_exidx_sec)
11952 /* Ignore zero sized sections. */
11953 if (sec->size == 0)
11956 insert_cantunwind_after(last_text_sec, last_exidx_sec);
11957 last_unwind_type = 0;
11961 /* Skip /DISCARD/ sections. */
11962 if (bfd_is_abs_section (exidx_sec->output_section))
11965 hdr = &elf_section_data (exidx_sec)->this_hdr;
11966 if (hdr->sh_type != SHT_ARM_EXIDX)
11969 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11970 if (exidx_arm_data == NULL)
11973 ibfd = exidx_sec->owner;
11975 if (hdr->contents != NULL)
11976 contents = hdr->contents;
11977 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
11981 if (last_unwind_type > 0)
11983 unsigned int first_word = bfd_get_32 (ibfd, contents);
11984 /* Add cantunwind if first unwind item does not match section
11986 if (first_word != sec->vma)
11988 insert_cantunwind_after (last_text_sec, last_exidx_sec);
11989 last_unwind_type = 0;
11993 for (j = 0; j < hdr->sh_size; j += 8)
11995 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
11999 /* An EXIDX_CANTUNWIND entry. */
12000 if (second_word == 1)
12002 if (last_unwind_type == 0)
12006 /* Inlined unwinding data. Merge if equal to previous. */
12007 else if ((second_word & 0x80000000) != 0)
12009 if (merge_exidx_entries
12010 && last_second_word == second_word && last_unwind_type == 1)
12013 last_second_word = second_word;
12015 /* Normal table entry. In theory we could merge these too,
12016 but duplicate entries are likely to be much less common. */
12020 if (elide && !bfd_link_relocatable (info))
12022 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
12023 DELETE_EXIDX_ENTRY, NULL, j / 8);
12025 deleted_exidx_bytes += 8;
12028 last_unwind_type = unwind_type;
12031 /* Free contents if we allocated it ourselves. */
12032 if (contents != hdr->contents)
12035 /* Record edits to be applied later (in elf32_arm_write_section). */
12036 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
12037 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
12039 if (deleted_exidx_bytes > 0)
12040 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
12042 last_exidx_sec = exidx_sec;
12043 last_text_sec = sec;
12046 /* Add terminating CANTUNWIND entry. */
12047 if (!bfd_link_relocatable (info) && last_exidx_sec
12048 && last_unwind_type != 0)
12049 insert_cantunwind_after(last_text_sec, last_exidx_sec);
12055 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
12056 bfd *ibfd, const char *name)
12058 asection *sec, *osec;
12060 sec = bfd_get_linker_section (ibfd, name);
12061 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
12064 osec = sec->output_section;
12065 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
12068 if (! bfd_set_section_contents (obfd, osec, sec->contents,
12069 sec->output_offset, sec->size))
12076 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
12078 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
12079 asection *sec, *osec;
12081 if (globals == NULL)
12084 /* Invoke the regular ELF backend linker to do all the work. */
12085 if (!bfd_elf_final_link (abfd, info))
12088 /* Process stub sections (eg BE8 encoding, ...). */
12089 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
12091 for (i=0; i<htab->top_id; i++)
12093 sec = htab->stub_group[i].stub_sec;
12094 /* Only process it once, in its link_sec slot. */
12095 if (sec && i == htab->stub_group[i].link_sec->id)
12097 osec = sec->output_section;
12098 elf32_arm_write_section (abfd, info, sec, sec->contents);
12099 if (! bfd_set_section_contents (abfd, osec, sec->contents,
12100 sec->output_offset, sec->size))
12105 /* Write out any glue sections now that we have created all the
12107 if (globals->bfd_of_glue_owner != NULL)
12109 if (! elf32_arm_output_glue_section (info, abfd,
12110 globals->bfd_of_glue_owner,
12111 ARM2THUMB_GLUE_SECTION_NAME))
12114 if (! elf32_arm_output_glue_section (info, abfd,
12115 globals->bfd_of_glue_owner,
12116 THUMB2ARM_GLUE_SECTION_NAME))
12119 if (! elf32_arm_output_glue_section (info, abfd,
12120 globals->bfd_of_glue_owner,
12121 VFP11_ERRATUM_VENEER_SECTION_NAME))
12124 if (! elf32_arm_output_glue_section (info, abfd,
12125 globals->bfd_of_glue_owner,
12126 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
12129 if (! elf32_arm_output_glue_section (info, abfd,
12130 globals->bfd_of_glue_owner,
12131 ARM_BX_GLUE_SECTION_NAME))
12138 /* Return a best guess for the machine number based on the attributes. */
12140 static unsigned int
12141 bfd_arm_get_mach_from_attributes (bfd * abfd)
12143 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
12147 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
12148 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
12149 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
12151 case TAG_CPU_ARCH_V5TE:
12155 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
12156 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
12160 if (strcmp (name, "IWMMXT2") == 0)
12161 return bfd_mach_arm_iWMMXt2;
12163 if (strcmp (name, "IWMMXT") == 0)
12164 return bfd_mach_arm_iWMMXt;
12166 if (strcmp (name, "XSCALE") == 0)
12170 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
12171 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
12174 case 1: return bfd_mach_arm_iWMMXt;
12175 case 2: return bfd_mach_arm_iWMMXt2;
12176 default: return bfd_mach_arm_XScale;
12181 return bfd_mach_arm_5TE;
12185 return bfd_mach_arm_unknown;
12189 /* Set the right machine number. */
12192 elf32_arm_object_p (bfd *abfd)
12196 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
12198 if (mach == bfd_mach_arm_unknown)
12200 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
12201 mach = bfd_mach_arm_ep9312;
12203 mach = bfd_arm_get_mach_from_attributes (abfd);
12206 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
12210 /* Function to keep ARM specific flags in the ELF header. */
12213 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
12215 if (elf_flags_init (abfd)
12216 && elf_elfheader (abfd)->e_flags != flags)
12218 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
12220 if (flags & EF_ARM_INTERWORK)
12221 (*_bfd_error_handler)
12222 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
12226 (_("Warning: Clearing the interworking flag of %B due to outside request"),
12232 elf_elfheader (abfd)->e_flags = flags;
12233 elf_flags_init (abfd) = TRUE;
12239 /* Copy backend specific data from one object module to another. */
12242 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
12245 flagword out_flags;
12247 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
12250 in_flags = elf_elfheader (ibfd)->e_flags;
12251 out_flags = elf_elfheader (obfd)->e_flags;
12253 if (elf_flags_init (obfd)
12254 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
12255 && in_flags != out_flags)
12257 /* Cannot mix APCS26 and APCS32 code. */
12258 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
12261 /* Cannot mix float APCS and non-float APCS code. */
12262 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
12265 /* If the src and dest have different interworking flags
12266 then turn off the interworking bit. */
12267 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
12269 if (out_flags & EF_ARM_INTERWORK)
12271 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
12274 in_flags &= ~EF_ARM_INTERWORK;
12277 /* Likewise for PIC, though don't warn for this case. */
12278 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
12279 in_flags &= ~EF_ARM_PIC;
12282 elf_elfheader (obfd)->e_flags = in_flags;
12283 elf_flags_init (obfd) = TRUE;
12285 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
12288 /* Values for Tag_ABI_PCS_R9_use. */
12297 /* Values for Tag_ABI_PCS_RW_data. */
12300 AEABI_PCS_RW_data_absolute,
12301 AEABI_PCS_RW_data_PCrel,
12302 AEABI_PCS_RW_data_SBrel,
12303 AEABI_PCS_RW_data_unused
12306 /* Values for Tag_ABI_enum_size. */
12312 AEABI_enum_forced_wide
12315 /* Determine whether an object attribute tag takes an integer, a
12319 elf32_arm_obj_attrs_arg_type (int tag)
12321 if (tag == Tag_compatibility)
12322 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
12323 else if (tag == Tag_nodefaults)
12324 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
12325 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
12326 return ATTR_TYPE_FLAG_STR_VAL;
12328 return ATTR_TYPE_FLAG_INT_VAL;
12330 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
12333 /* The ABI defines that Tag_conformance should be emitted first, and that
12334 Tag_nodefaults should be second (if either is defined). This sets those
12335 two positions, and bumps up the position of all the remaining tags to
12338 elf32_arm_obj_attrs_order (int num)
12340 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
12341 return Tag_conformance;
12342 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
12343 return Tag_nodefaults;
12344 if ((num - 2) < Tag_nodefaults)
12346 if ((num - 1) < Tag_conformance)
12351 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
12353 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
12355 if ((tag & 127) < 64)
12358 (_("%B: Unknown mandatory EABI object attribute %d"),
12360 bfd_set_error (bfd_error_bad_value);
12366 (_("Warning: %B: Unknown EABI object attribute %d"),
12372 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
12373 Returns -1 if no architecture could be read. */
12376 get_secondary_compatible_arch (bfd *abfd)
12378 obj_attribute *attr =
12379 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12381 /* Note: the tag and its argument below are uleb128 values, though
12382 currently-defined values fit in one byte for each. */
12384 && attr->s[0] == Tag_CPU_arch
12385 && (attr->s[1] & 128) != 128
12386 && attr->s[2] == 0)
12389 /* This tag is "safely ignorable", so don't complain if it looks funny. */
12393 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
12394 The tag is removed if ARCH is -1. */
12397 set_secondary_compatible_arch (bfd *abfd, int arch)
12399 obj_attribute *attr =
12400 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12408 /* Note: the tag and its argument below are uleb128 values, though
12409 currently-defined values fit in one byte for each. */
12411 attr->s = (char *) bfd_alloc (abfd, 3);
12412 attr->s[0] = Tag_CPU_arch;
12417 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
12421 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
12422 int newtag, int secondary_compat)
12424 #define T(X) TAG_CPU_ARCH_##X
12425 int tagl, tagh, result;
12428 T(V6T2), /* PRE_V4. */
12430 T(V6T2), /* V4T. */
12431 T(V6T2), /* V5T. */
12432 T(V6T2), /* V5TE. */
12433 T(V6T2), /* V5TEJ. */
12436 T(V6T2) /* V6T2. */
12440 T(V6K), /* PRE_V4. */
12444 T(V6K), /* V5TE. */
12445 T(V6K), /* V5TEJ. */
12447 T(V6KZ), /* V6KZ. */
12453 T(V7), /* PRE_V4. */
12458 T(V7), /* V5TEJ. */
12471 T(V6K), /* V5TE. */
12472 T(V6K), /* V5TEJ. */
12474 T(V6KZ), /* V6KZ. */
12478 T(V6_M) /* V6_M. */
12480 const int v6s_m[] =
12486 T(V6K), /* V5TE. */
12487 T(V6K), /* V5TEJ. */
12489 T(V6KZ), /* V6KZ. */
12493 T(V6S_M), /* V6_M. */
12494 T(V6S_M) /* V6S_M. */
12496 const int v7e_m[] =
12500 T(V7E_M), /* V4T. */
12501 T(V7E_M), /* V5T. */
12502 T(V7E_M), /* V5TE. */
12503 T(V7E_M), /* V5TEJ. */
12504 T(V7E_M), /* V6. */
12505 T(V7E_M), /* V6KZ. */
12506 T(V7E_M), /* V6T2. */
12507 T(V7E_M), /* V6K. */
12508 T(V7E_M), /* V7. */
12509 T(V7E_M), /* V6_M. */
12510 T(V7E_M), /* V6S_M. */
12511 T(V7E_M) /* V7E_M. */
12515 T(V8), /* PRE_V4. */
12520 T(V8), /* V5TEJ. */
12527 T(V8), /* V6S_M. */
12528 T(V8), /* V7E_M. */
12531 const int v8m_baseline[] =
12544 T(V8M_BASE), /* V6_M. */
12545 T(V8M_BASE), /* V6S_M. */
12549 T(V8M_BASE) /* V8-M BASELINE. */
12551 const int v8m_mainline[] =
12563 T(V8M_MAIN), /* V7. */
12564 T(V8M_MAIN), /* V6_M. */
12565 T(V8M_MAIN), /* V6S_M. */
12566 T(V8M_MAIN), /* V7E_M. */
12569 T(V8M_MAIN), /* V8-M BASELINE. */
12570 T(V8M_MAIN) /* V8-M MAINLINE. */
12572 const int v4t_plus_v6_m[] =
12578 T(V5TE), /* V5TE. */
12579 T(V5TEJ), /* V5TEJ. */
12581 T(V6KZ), /* V6KZ. */
12582 T(V6T2), /* V6T2. */
12585 T(V6_M), /* V6_M. */
12586 T(V6S_M), /* V6S_M. */
12587 T(V7E_M), /* V7E_M. */
12590 T(V8M_BASE), /* V8-M BASELINE. */
12591 T(V8M_MAIN), /* V8-M MAINLINE. */
12592 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
12594 const int *comb[] =
12606 /* Pseudo-architecture. */
12610 /* Check we've not got a higher architecture than we know about. */
12612 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
12614 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
12618 /* Override old tag if we have a Tag_also_compatible_with on the output. */
12620 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
12621 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
12622 oldtag = T(V4T_PLUS_V6_M);
12624 /* And override the new tag if we have a Tag_also_compatible_with on the
12627 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
12628 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
12629 newtag = T(V4T_PLUS_V6_M);
12631 tagl = (oldtag < newtag) ? oldtag : newtag;
12632 result = tagh = (oldtag > newtag) ? oldtag : newtag;
12634 /* Architectures before V6KZ add features monotonically. */
12635 if (tagh <= TAG_CPU_ARCH_V6KZ)
12638 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
12640 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
12641 as the canonical version. */
12642 if (result == T(V4T_PLUS_V6_M))
12645 *secondary_compat_out = T(V6_M);
12648 *secondary_compat_out = -1;
12652 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
12653 ibfd, oldtag, newtag);
12661 /* Query attributes object to see if integer divide instructions may be
12662 present in an object. */
12664 elf32_arm_attributes_accept_div (const obj_attribute *attr)
12666 int arch = attr[Tag_CPU_arch].i;
12667 int profile = attr[Tag_CPU_arch_profile].i;
12669 switch (attr[Tag_DIV_use].i)
12672 /* Integer divide allowed if instruction contained in archetecture. */
12673 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
12675 else if (arch >= TAG_CPU_ARCH_V7E_M)
12681 /* Integer divide explicitly prohibited. */
12685 /* Unrecognised case - treat as allowing divide everywhere. */
12687 /* Integer divide allowed in ARM state. */
12692 /* Query attributes object to see if integer divide instructions are
12693 forbidden to be in the object. This is not the inverse of
12694 elf32_arm_attributes_accept_div. */
12696 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
12698 return attr[Tag_DIV_use].i == 1;
12701 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
12702 are conflicting attributes. */
12705 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
12707 obj_attribute *in_attr;
12708 obj_attribute *out_attr;
12709 /* Some tags have 0 = don't care, 1 = strong requirement,
12710 2 = weak requirement. */
12711 static const int order_021[3] = {0, 2, 1};
12713 bfd_boolean result = TRUE;
12714 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
12716 /* Skip the linker stubs file. This preserves previous behavior
12717 of accepting unknown attributes in the first input file - but
12719 if (ibfd->flags & BFD_LINKER_CREATED)
12722 /* Skip any input that hasn't attribute section.
12723 This enables to link object files without attribute section with
12725 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
12728 if (!elf_known_obj_attributes_proc (obfd)[0].i)
12730 /* This is the first object. Copy the attributes. */
12731 _bfd_elf_copy_obj_attributes (ibfd, obfd);
12733 out_attr = elf_known_obj_attributes_proc (obfd);
12735 /* Use the Tag_null value to indicate the attributes have been
12739 /* We do not output objects with Tag_MPextension_use_legacy - we move
12740 the attribute's value to Tag_MPextension_use. */
12741 if (out_attr[Tag_MPextension_use_legacy].i != 0)
12743 if (out_attr[Tag_MPextension_use].i != 0
12744 && out_attr[Tag_MPextension_use_legacy].i
12745 != out_attr[Tag_MPextension_use].i)
12748 (_("Error: %B has both the current and legacy "
12749 "Tag_MPextension_use attributes"), ibfd);
12753 out_attr[Tag_MPextension_use] =
12754 out_attr[Tag_MPextension_use_legacy];
12755 out_attr[Tag_MPextension_use_legacy].type = 0;
12756 out_attr[Tag_MPextension_use_legacy].i = 0;
12762 in_attr = elf_known_obj_attributes_proc (ibfd);
12763 out_attr = elf_known_obj_attributes_proc (obfd);
12764 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
12765 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
12767 /* Ignore mismatches if the object doesn't use floating point or is
12768 floating point ABI independent. */
12769 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
12770 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12771 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
12772 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
12773 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12774 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
12777 (_("error: %B uses VFP register arguments, %B does not"),
12778 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
12779 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
12784 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
12786 /* Merge this attribute with existing attributes. */
12789 case Tag_CPU_raw_name:
12791 /* These are merged after Tag_CPU_arch. */
12794 case Tag_ABI_optimization_goals:
12795 case Tag_ABI_FP_optimization_goals:
12796 /* Use the first value seen. */
12801 int secondary_compat = -1, secondary_compat_out = -1;
12802 unsigned int saved_out_attr = out_attr[i].i;
12804 static const char *name_table[] =
12806 /* These aren't real CPU names, but we can't guess
12807 that from the architecture version alone. */
12823 "ARM v8-M.baseline",
12824 "ARM v8-M.mainline",
12827 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
12828 secondary_compat = get_secondary_compatible_arch (ibfd);
12829 secondary_compat_out = get_secondary_compatible_arch (obfd);
12830 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
12831 &secondary_compat_out,
12835 /* Return with error if failed to merge. */
12836 if (arch_attr == -1)
12839 out_attr[i].i = arch_attr;
12841 set_secondary_compatible_arch (obfd, secondary_compat_out);
12843 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
12844 if (out_attr[i].i == saved_out_attr)
12845 ; /* Leave the names alone. */
12846 else if (out_attr[i].i == in_attr[i].i)
12848 /* The output architecture has been changed to match the
12849 input architecture. Use the input names. */
12850 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
12851 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
12853 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
12854 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
12859 out_attr[Tag_CPU_name].s = NULL;
12860 out_attr[Tag_CPU_raw_name].s = NULL;
12863 /* If we still don't have a value for Tag_CPU_name,
12864 make one up now. Tag_CPU_raw_name remains blank. */
12865 if (out_attr[Tag_CPU_name].s == NULL
12866 && out_attr[i].i < ARRAY_SIZE (name_table))
12867 out_attr[Tag_CPU_name].s =
12868 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
12872 case Tag_ARM_ISA_use:
12873 case Tag_THUMB_ISA_use:
12874 case Tag_WMMX_arch:
12875 case Tag_Advanced_SIMD_arch:
12876 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
12877 case Tag_ABI_FP_rounding:
12878 case Tag_ABI_FP_exceptions:
12879 case Tag_ABI_FP_user_exceptions:
12880 case Tag_ABI_FP_number_model:
12881 case Tag_FP_HP_extension:
12882 case Tag_CPU_unaligned_access:
12884 case Tag_MPextension_use:
12885 /* Use the largest value specified. */
12886 if (in_attr[i].i > out_attr[i].i)
12887 out_attr[i].i = in_attr[i].i;
12890 case Tag_ABI_align_preserved:
12891 case Tag_ABI_PCS_RO_data:
12892 /* Use the smallest value specified. */
12893 if (in_attr[i].i < out_attr[i].i)
12894 out_attr[i].i = in_attr[i].i;
12897 case Tag_ABI_align_needed:
12898 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
12899 && (in_attr[Tag_ABI_align_preserved].i == 0
12900 || out_attr[Tag_ABI_align_preserved].i == 0))
12902 /* This error message should be enabled once all non-conformant
12903 binaries in the toolchain have had the attributes set
12906 (_("error: %B: 8-byte data alignment conflicts with %B"),
12910 /* Fall through. */
12911 case Tag_ABI_FP_denormal:
12912 case Tag_ABI_PCS_GOT_use:
12913 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
12914 value if greater than 2 (for future-proofing). */
12915 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
12916 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
12917 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
12918 out_attr[i].i = in_attr[i].i;
12921 case Tag_Virtualization_use:
12922 /* The virtualization tag effectively stores two bits of
12923 information: the intended use of TrustZone (in bit 0), and the
12924 intended use of Virtualization (in bit 1). */
12925 if (out_attr[i].i == 0)
12926 out_attr[i].i = in_attr[i].i;
12927 else if (in_attr[i].i != 0
12928 && in_attr[i].i != out_attr[i].i)
12930 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
12935 (_("error: %B: unable to merge virtualization attributes "
12943 case Tag_CPU_arch_profile:
12944 if (out_attr[i].i != in_attr[i].i)
12946 /* 0 will merge with anything.
12947 'A' and 'S' merge to 'A'.
12948 'R' and 'S' merge to 'R'.
12949 'M' and 'A|R|S' is an error. */
12950 if (out_attr[i].i == 0
12951 || (out_attr[i].i == 'S'
12952 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
12953 out_attr[i].i = in_attr[i].i;
12954 else if (in_attr[i].i == 0
12955 || (in_attr[i].i == 'S'
12956 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
12957 ; /* Do nothing. */
12961 (_("error: %B: Conflicting architecture profiles %c/%c"),
12963 in_attr[i].i ? in_attr[i].i : '0',
12964 out_attr[i].i ? out_attr[i].i : '0');
12970 case Tag_DSP_extension:
12971 /* No need to change output value if any of:
12972 - pre (<=) ARMv5T input architecture (do not have DSP)
12973 - M input profile not ARMv7E-M and do not have DSP. */
12974 if (in_attr[Tag_CPU_arch].i <= 3
12975 || (in_attr[Tag_CPU_arch_profile].i == 'M'
12976 && in_attr[Tag_CPU_arch].i != 13
12977 && in_attr[i].i == 0))
12978 ; /* Do nothing. */
12979 /* Output value should be 0 if DSP part of architecture, ie.
12980 - post (>=) ARMv5te architecture output
12981 - A, R or S profile output or ARMv7E-M output architecture. */
12982 else if (out_attr[Tag_CPU_arch].i >= 4
12983 && (out_attr[Tag_CPU_arch_profile].i == 'A'
12984 || out_attr[Tag_CPU_arch_profile].i == 'R'
12985 || out_attr[Tag_CPU_arch_profile].i == 'S'
12986 || out_attr[Tag_CPU_arch].i == 13))
12988 /* Otherwise, DSP instructions are added and not part of output
12996 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
12997 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
12998 when it's 0. It might mean absence of FP hardware if
12999 Tag_FP_arch is zero. */
13001 #define VFP_VERSION_COUNT 9
13002 static const struct
13006 } vfp_versions[VFP_VERSION_COUNT] =
13022 /* If the output has no requirement about FP hardware,
13023 follow the requirement of the input. */
13024 if (out_attr[i].i == 0)
13026 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
13027 out_attr[i].i = in_attr[i].i;
13028 out_attr[Tag_ABI_HardFP_use].i
13029 = in_attr[Tag_ABI_HardFP_use].i;
13032 /* If the input has no requirement about FP hardware, do
13034 else if (in_attr[i].i == 0)
13036 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
13040 /* Both the input and the output have nonzero Tag_FP_arch.
13041 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
13043 /* If both the input and the output have zero Tag_ABI_HardFP_use,
13045 if (in_attr[Tag_ABI_HardFP_use].i == 0
13046 && out_attr[Tag_ABI_HardFP_use].i == 0)
13048 /* If the input and the output have different Tag_ABI_HardFP_use,
13049 the combination of them is 0 (implied by Tag_FP_arch). */
13050 else if (in_attr[Tag_ABI_HardFP_use].i
13051 != out_attr[Tag_ABI_HardFP_use].i)
13052 out_attr[Tag_ABI_HardFP_use].i = 0;
13054 /* Now we can handle Tag_FP_arch. */
13056 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
13057 pick the biggest. */
13058 if (in_attr[i].i >= VFP_VERSION_COUNT
13059 && in_attr[i].i > out_attr[i].i)
13061 out_attr[i] = in_attr[i];
13064 /* The output uses the superset of input features
13065 (ISA version) and registers. */
13066 ver = vfp_versions[in_attr[i].i].ver;
13067 if (ver < vfp_versions[out_attr[i].i].ver)
13068 ver = vfp_versions[out_attr[i].i].ver;
13069 regs = vfp_versions[in_attr[i].i].regs;
13070 if (regs < vfp_versions[out_attr[i].i].regs)
13071 regs = vfp_versions[out_attr[i].i].regs;
13072 /* This assumes all possible supersets are also a valid
13074 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
13076 if (regs == vfp_versions[newval].regs
13077 && ver == vfp_versions[newval].ver)
13080 out_attr[i].i = newval;
13083 case Tag_PCS_config:
13084 if (out_attr[i].i == 0)
13085 out_attr[i].i = in_attr[i].i;
13086 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
13088 /* It's sometimes ok to mix different configs, so this is only
13091 (_("Warning: %B: Conflicting platform configuration"), ibfd);
13094 case Tag_ABI_PCS_R9_use:
13095 if (in_attr[i].i != out_attr[i].i
13096 && out_attr[i].i != AEABI_R9_unused
13097 && in_attr[i].i != AEABI_R9_unused)
13100 (_("error: %B: Conflicting use of R9"), ibfd);
13103 if (out_attr[i].i == AEABI_R9_unused)
13104 out_attr[i].i = in_attr[i].i;
13106 case Tag_ABI_PCS_RW_data:
13107 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
13108 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
13109 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
13112 (_("error: %B: SB relative addressing conflicts with use of R9"),
13116 /* Use the smallest value specified. */
13117 if (in_attr[i].i < out_attr[i].i)
13118 out_attr[i].i = in_attr[i].i;
13120 case Tag_ABI_PCS_wchar_t:
13121 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
13122 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
13125 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
13126 ibfd, in_attr[i].i, out_attr[i].i);
13128 else if (in_attr[i].i && !out_attr[i].i)
13129 out_attr[i].i = in_attr[i].i;
13131 case Tag_ABI_enum_size:
13132 if (in_attr[i].i != AEABI_enum_unused)
13134 if (out_attr[i].i == AEABI_enum_unused
13135 || out_attr[i].i == AEABI_enum_forced_wide)
13137 /* The existing object is compatible with anything.
13138 Use whatever requirements the new object has. */
13139 out_attr[i].i = in_attr[i].i;
13141 else if (in_attr[i].i != AEABI_enum_forced_wide
13142 && out_attr[i].i != in_attr[i].i
13143 && !elf_arm_tdata (obfd)->no_enum_size_warning)
13145 static const char *aeabi_enum_names[] =
13146 { "", "variable-size", "32-bit", "" };
13147 const char *in_name =
13148 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
13149 ? aeabi_enum_names[in_attr[i].i]
13151 const char *out_name =
13152 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
13153 ? aeabi_enum_names[out_attr[i].i]
13156 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
13157 ibfd, in_name, out_name);
13161 case Tag_ABI_VFP_args:
13164 case Tag_ABI_WMMX_args:
13165 if (in_attr[i].i != out_attr[i].i)
13168 (_("error: %B uses iWMMXt register arguments, %B does not"),
13173 case Tag_compatibility:
13174 /* Merged in target-independent code. */
13176 case Tag_ABI_HardFP_use:
13177 /* This is handled along with Tag_FP_arch. */
13179 case Tag_ABI_FP_16bit_format:
13180 if (in_attr[i].i != 0 && out_attr[i].i != 0)
13182 if (in_attr[i].i != out_attr[i].i)
13185 (_("error: fp16 format mismatch between %B and %B"),
13190 if (in_attr[i].i != 0)
13191 out_attr[i].i = in_attr[i].i;
13195 /* A value of zero on input means that the divide instruction may
13196 be used if available in the base architecture as specified via
13197 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
13198 the user did not want divide instructions. A value of 2
13199 explicitly means that divide instructions were allowed in ARM
13200 and Thumb state. */
13201 if (in_attr[i].i == out_attr[i].i)
13202 /* Do nothing. */ ;
13203 else if (elf32_arm_attributes_forbid_div (in_attr)
13204 && !elf32_arm_attributes_accept_div (out_attr))
13206 else if (elf32_arm_attributes_forbid_div (out_attr)
13207 && elf32_arm_attributes_accept_div (in_attr))
13208 out_attr[i].i = in_attr[i].i;
13209 else if (in_attr[i].i == 2)
13210 out_attr[i].i = in_attr[i].i;
13213 case Tag_MPextension_use_legacy:
13214 /* We don't output objects with Tag_MPextension_use_legacy - we
13215 move the value to Tag_MPextension_use. */
13216 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
13218 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
13221 (_("%B has has both the current and legacy "
13222 "Tag_MPextension_use attributes"),
13228 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
13229 out_attr[Tag_MPextension_use] = in_attr[i];
13233 case Tag_nodefaults:
13234 /* This tag is set if it exists, but the value is unused (and is
13235 typically zero). We don't actually need to do anything here -
13236 the merge happens automatically when the type flags are merged
13239 case Tag_also_compatible_with:
13240 /* Already done in Tag_CPU_arch. */
13242 case Tag_conformance:
13243 /* Keep the attribute if it matches. Throw it away otherwise.
13244 No attribute means no claim to conform. */
13245 if (!in_attr[i].s || !out_attr[i].s
13246 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
13247 out_attr[i].s = NULL;
13252 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
13255 /* If out_attr was copied from in_attr then it won't have a type yet. */
13256 if (in_attr[i].type && !out_attr[i].type)
13257 out_attr[i].type = in_attr[i].type;
13260 /* Merge Tag_compatibility attributes and any common GNU ones. */
13261 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
13264 /* Check for any attributes not known on ARM. */
13265 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
13271 /* Return TRUE if the two EABI versions are incompatible. */
13274 elf32_arm_versions_compatible (unsigned iver, unsigned over)
13276 /* v4 and v5 are the same spec before and after it was released,
13277 so allow mixing them. */
13278 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
13279 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
13282 return (iver == over);
13285 /* Merge backend specific data from an object file to the output
13286 object file when linking. */
13289 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
13291 /* Display the flags field. */
13294 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
13296 FILE * file = (FILE *) ptr;
13297 unsigned long flags;
13299 BFD_ASSERT (abfd != NULL && ptr != NULL);
13301 /* Print normal ELF private data. */
13302 _bfd_elf_print_private_bfd_data (abfd, ptr);
13304 flags = elf_elfheader (abfd)->e_flags;
13305 /* Ignore init flag - it may not be set, despite the flags field
13306 containing valid data. */
13308 /* xgettext:c-format */
13309 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
13311 switch (EF_ARM_EABI_VERSION (flags))
13313 case EF_ARM_EABI_UNKNOWN:
13314 /* The following flag bits are GNU extensions and not part of the
13315 official ARM ELF extended ABI. Hence they are only decoded if
13316 the EABI version is not set. */
13317 if (flags & EF_ARM_INTERWORK)
13318 fprintf (file, _(" [interworking enabled]"));
13320 if (flags & EF_ARM_APCS_26)
13321 fprintf (file, " [APCS-26]");
13323 fprintf (file, " [APCS-32]");
13325 if (flags & EF_ARM_VFP_FLOAT)
13326 fprintf (file, _(" [VFP float format]"));
13327 else if (flags & EF_ARM_MAVERICK_FLOAT)
13328 fprintf (file, _(" [Maverick float format]"));
13330 fprintf (file, _(" [FPA float format]"));
13332 if (flags & EF_ARM_APCS_FLOAT)
13333 fprintf (file, _(" [floats passed in float registers]"));
13335 if (flags & EF_ARM_PIC)
13336 fprintf (file, _(" [position independent]"));
13338 if (flags & EF_ARM_NEW_ABI)
13339 fprintf (file, _(" [new ABI]"));
13341 if (flags & EF_ARM_OLD_ABI)
13342 fprintf (file, _(" [old ABI]"));
13344 if (flags & EF_ARM_SOFT_FLOAT)
13345 fprintf (file, _(" [software FP]"));
13347 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
13348 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
13349 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
13350 | EF_ARM_MAVERICK_FLOAT);
13353 case EF_ARM_EABI_VER1:
13354 fprintf (file, _(" [Version1 EABI]"));
13356 if (flags & EF_ARM_SYMSARESORTED)
13357 fprintf (file, _(" [sorted symbol table]"));
13359 fprintf (file, _(" [unsorted symbol table]"));
13361 flags &= ~ EF_ARM_SYMSARESORTED;
13364 case EF_ARM_EABI_VER2:
13365 fprintf (file, _(" [Version2 EABI]"));
13367 if (flags & EF_ARM_SYMSARESORTED)
13368 fprintf (file, _(" [sorted symbol table]"));
13370 fprintf (file, _(" [unsorted symbol table]"));
13372 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
13373 fprintf (file, _(" [dynamic symbols use segment index]"));
13375 if (flags & EF_ARM_MAPSYMSFIRST)
13376 fprintf (file, _(" [mapping symbols precede others]"));
13378 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
13379 | EF_ARM_MAPSYMSFIRST);
13382 case EF_ARM_EABI_VER3:
13383 fprintf (file, _(" [Version3 EABI]"));
13386 case EF_ARM_EABI_VER4:
13387 fprintf (file, _(" [Version4 EABI]"));
13390 case EF_ARM_EABI_VER5:
13391 fprintf (file, _(" [Version5 EABI]"));
13393 if (flags & EF_ARM_ABI_FLOAT_SOFT)
13394 fprintf (file, _(" [soft-float ABI]"));
13396 if (flags & EF_ARM_ABI_FLOAT_HARD)
13397 fprintf (file, _(" [hard-float ABI]"));
13399 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
13402 if (flags & EF_ARM_BE8)
13403 fprintf (file, _(" [BE8]"));
13405 if (flags & EF_ARM_LE8)
13406 fprintf (file, _(" [LE8]"));
13408 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
13412 fprintf (file, _(" <EABI version unrecognised>"));
13416 flags &= ~ EF_ARM_EABIMASK;
13418 if (flags & EF_ARM_RELEXEC)
13419 fprintf (file, _(" [relocatable executable]"));
13421 flags &= ~EF_ARM_RELEXEC;
13424 fprintf (file, _("<Unrecognised flag bits set>"));
13426 fputc ('\n', file);
13432 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
13434 switch (ELF_ST_TYPE (elf_sym->st_info))
13436 case STT_ARM_TFUNC:
13437 return ELF_ST_TYPE (elf_sym->st_info);
13439 case STT_ARM_16BIT:
13440 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
13441 This allows us to distinguish between data used by Thumb instructions
13442 and non-data (which is probably code) inside Thumb regions of an
13444 if (type != STT_OBJECT && type != STT_TLS)
13445 return ELF_ST_TYPE (elf_sym->st_info);
13456 elf32_arm_gc_mark_hook (asection *sec,
13457 struct bfd_link_info *info,
13458 Elf_Internal_Rela *rel,
13459 struct elf_link_hash_entry *h,
13460 Elf_Internal_Sym *sym)
13463 switch (ELF32_R_TYPE (rel->r_info))
13465 case R_ARM_GNU_VTINHERIT:
13466 case R_ARM_GNU_VTENTRY:
13470 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
13473 /* Update the got entry reference counts for the section being removed. */
13476 elf32_arm_gc_sweep_hook (bfd * abfd,
13477 struct bfd_link_info * info,
13479 const Elf_Internal_Rela * relocs)
13481 Elf_Internal_Shdr *symtab_hdr;
13482 struct elf_link_hash_entry **sym_hashes;
13483 bfd_signed_vma *local_got_refcounts;
13484 const Elf_Internal_Rela *rel, *relend;
13485 struct elf32_arm_link_hash_table * globals;
13487 if (bfd_link_relocatable (info))
13490 globals = elf32_arm_hash_table (info);
13491 if (globals == NULL)
13494 elf_section_data (sec)->local_dynrel = NULL;
13496 symtab_hdr = & elf_symtab_hdr (abfd);
13497 sym_hashes = elf_sym_hashes (abfd);
13498 local_got_refcounts = elf_local_got_refcounts (abfd);
13500 check_use_blx (globals);
13502 relend = relocs + sec->reloc_count;
13503 for (rel = relocs; rel < relend; rel++)
13505 unsigned long r_symndx;
13506 struct elf_link_hash_entry *h = NULL;
13507 struct elf32_arm_link_hash_entry *eh;
13509 bfd_boolean call_reloc_p;
13510 bfd_boolean may_become_dynamic_p;
13511 bfd_boolean may_need_local_target_p;
13512 union gotplt_union *root_plt;
13513 struct arm_plt_info *arm_plt;
13515 r_symndx = ELF32_R_SYM (rel->r_info);
13516 if (r_symndx >= symtab_hdr->sh_info)
13518 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13519 while (h->root.type == bfd_link_hash_indirect
13520 || h->root.type == bfd_link_hash_warning)
13521 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13523 eh = (struct elf32_arm_link_hash_entry *) h;
13525 call_reloc_p = FALSE;
13526 may_become_dynamic_p = FALSE;
13527 may_need_local_target_p = FALSE;
13529 r_type = ELF32_R_TYPE (rel->r_info);
13530 r_type = arm_real_reloc_type (globals, r_type);
13534 case R_ARM_GOT_PREL:
13535 case R_ARM_TLS_GD32:
13536 case R_ARM_TLS_IE32:
13539 if (h->got.refcount > 0)
13540 h->got.refcount -= 1;
13542 else if (local_got_refcounts != NULL)
13544 if (local_got_refcounts[r_symndx] > 0)
13545 local_got_refcounts[r_symndx] -= 1;
13549 case R_ARM_TLS_LDM32:
13550 globals->tls_ldm_got.refcount -= 1;
13558 case R_ARM_THM_CALL:
13559 case R_ARM_THM_JUMP24:
13560 case R_ARM_THM_JUMP19:
13561 call_reloc_p = TRUE;
13562 may_need_local_target_p = TRUE;
13566 if (!globals->vxworks_p)
13568 may_need_local_target_p = TRUE;
13571 /* Fall through. */
13573 case R_ARM_ABS32_NOI:
13575 case R_ARM_REL32_NOI:
13576 case R_ARM_MOVW_ABS_NC:
13577 case R_ARM_MOVT_ABS:
13578 case R_ARM_MOVW_PREL_NC:
13579 case R_ARM_MOVT_PREL:
13580 case R_ARM_THM_MOVW_ABS_NC:
13581 case R_ARM_THM_MOVT_ABS:
13582 case R_ARM_THM_MOVW_PREL_NC:
13583 case R_ARM_THM_MOVT_PREL:
13584 /* Should the interworking branches be here also? */
13585 if ((bfd_link_pic (info) || globals->root.is_relocatable_executable)
13586 && (sec->flags & SEC_ALLOC) != 0)
13589 && elf32_arm_howto_from_type (r_type)->pc_relative)
13591 call_reloc_p = TRUE;
13592 may_need_local_target_p = TRUE;
13595 may_become_dynamic_p = TRUE;
13598 may_need_local_target_p = TRUE;
13605 if (may_need_local_target_p
13606 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
13608 /* If PLT refcount book-keeping is wrong and too low, we'll
13609 see a zero value (going to -1) for the root PLT reference
13611 if (root_plt->refcount >= 0)
13613 BFD_ASSERT (root_plt->refcount != 0);
13614 root_plt->refcount -= 1;
13617 /* A value of -1 means the symbol has become local, forced
13618 or seeing a hidden definition. Any other negative value
13620 BFD_ASSERT (root_plt->refcount == -1);
13623 arm_plt->noncall_refcount--;
13625 if (r_type == R_ARM_THM_CALL)
13626 arm_plt->maybe_thumb_refcount--;
13628 if (r_type == R_ARM_THM_JUMP24
13629 || r_type == R_ARM_THM_JUMP19)
13630 arm_plt->thumb_refcount--;
13633 if (may_become_dynamic_p)
13635 struct elf_dyn_relocs **pp;
13636 struct elf_dyn_relocs *p;
13639 pp = &(eh->dyn_relocs);
13642 Elf_Internal_Sym *isym;
13644 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
13648 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
13652 for (; (p = *pp) != NULL; pp = &p->next)
13655 /* Everything must go for SEC. */
13665 /* Look through the relocs for a section during the first phase. */
13668 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
13669 asection *sec, const Elf_Internal_Rela *relocs)
13671 Elf_Internal_Shdr *symtab_hdr;
13672 struct elf_link_hash_entry **sym_hashes;
13673 const Elf_Internal_Rela *rel;
13674 const Elf_Internal_Rela *rel_end;
13677 struct elf32_arm_link_hash_table *htab;
13678 bfd_boolean call_reloc_p;
13679 bfd_boolean may_become_dynamic_p;
13680 bfd_boolean may_need_local_target_p;
13681 unsigned long nsyms;
13683 if (bfd_link_relocatable (info))
13686 BFD_ASSERT (is_arm_elf (abfd));
13688 htab = elf32_arm_hash_table (info);
13694 /* Create dynamic sections for relocatable executables so that we can
13695 copy relocations. */
13696 if (htab->root.is_relocatable_executable
13697 && ! htab->root.dynamic_sections_created)
13699 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
13703 if (htab->root.dynobj == NULL)
13704 htab->root.dynobj = abfd;
13705 if (!create_ifunc_sections (info))
13708 dynobj = htab->root.dynobj;
13710 symtab_hdr = & elf_symtab_hdr (abfd);
13711 sym_hashes = elf_sym_hashes (abfd);
13712 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
13714 rel_end = relocs + sec->reloc_count;
13715 for (rel = relocs; rel < rel_end; rel++)
13717 Elf_Internal_Sym *isym;
13718 struct elf_link_hash_entry *h;
13719 struct elf32_arm_link_hash_entry *eh;
13720 unsigned long r_symndx;
13723 r_symndx = ELF32_R_SYM (rel->r_info);
13724 r_type = ELF32_R_TYPE (rel->r_info);
13725 r_type = arm_real_reloc_type (htab, r_type);
13727 if (r_symndx >= nsyms
13728 /* PR 9934: It is possible to have relocations that do not
13729 refer to symbols, thus it is also possible to have an
13730 object file containing relocations but no symbol table. */
13731 && (r_symndx > STN_UNDEF || nsyms > 0))
13733 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
13742 if (r_symndx < symtab_hdr->sh_info)
13744 /* A local symbol. */
13745 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
13752 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13753 while (h->root.type == bfd_link_hash_indirect
13754 || h->root.type == bfd_link_hash_warning)
13755 h = (struct elf_link_hash_entry *) h->root.u.i.link;
13757 /* PR15323, ref flags aren't set for references in the
13759 h->root.non_ir_ref = 1;
13763 eh = (struct elf32_arm_link_hash_entry *) h;
13765 call_reloc_p = FALSE;
13766 may_become_dynamic_p = FALSE;
13767 may_need_local_target_p = FALSE;
13769 /* Could be done earlier, if h were already available. */
13770 r_type = elf32_arm_tls_transition (info, r_type, h);
13774 case R_ARM_GOT_PREL:
13775 case R_ARM_TLS_GD32:
13776 case R_ARM_TLS_IE32:
13777 case R_ARM_TLS_GOTDESC:
13778 case R_ARM_TLS_DESCSEQ:
13779 case R_ARM_THM_TLS_DESCSEQ:
13780 case R_ARM_TLS_CALL:
13781 case R_ARM_THM_TLS_CALL:
13782 /* This symbol requires a global offset table entry. */
13784 int tls_type, old_tls_type;
13788 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
13790 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
13792 case R_ARM_TLS_GOTDESC:
13793 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
13794 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
13795 tls_type = GOT_TLS_GDESC; break;
13797 default: tls_type = GOT_NORMAL; break;
13800 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
13801 info->flags |= DF_STATIC_TLS;
13806 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
13810 /* This is a global offset table entry for a local symbol. */
13811 if (!elf32_arm_allocate_local_sym_info (abfd))
13813 elf_local_got_refcounts (abfd)[r_symndx] += 1;
13814 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
13817 /* If a variable is accessed with both tls methods, two
13818 slots may be created. */
13819 if (GOT_TLS_GD_ANY_P (old_tls_type)
13820 && GOT_TLS_GD_ANY_P (tls_type))
13821 tls_type |= old_tls_type;
13823 /* We will already have issued an error message if there
13824 is a TLS/non-TLS mismatch, based on the symbol
13825 type. So just combine any TLS types needed. */
13826 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
13827 && tls_type != GOT_NORMAL)
13828 tls_type |= old_tls_type;
13830 /* If the symbol is accessed in both IE and GDESC
13831 method, we're able to relax. Turn off the GDESC flag,
13832 without messing up with any other kind of tls types
13833 that may be involved. */
13834 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
13835 tls_type &= ~GOT_TLS_GDESC;
13837 if (old_tls_type != tls_type)
13840 elf32_arm_hash_entry (h)->tls_type = tls_type;
13842 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
13845 /* Fall through. */
13847 case R_ARM_TLS_LDM32:
13848 if (r_type == R_ARM_TLS_LDM32)
13849 htab->tls_ldm_got.refcount++;
13850 /* Fall through. */
13852 case R_ARM_GOTOFF32:
13854 if (htab->root.sgot == NULL
13855 && !create_got_section (htab->root.dynobj, info))
13864 case R_ARM_THM_CALL:
13865 case R_ARM_THM_JUMP24:
13866 case R_ARM_THM_JUMP19:
13867 call_reloc_p = TRUE;
13868 may_need_local_target_p = TRUE;
13872 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
13873 ldr __GOTT_INDEX__ offsets. */
13874 if (!htab->vxworks_p)
13876 may_need_local_target_p = TRUE;
13879 else goto jump_over;
13881 /* Fall through. */
13883 case R_ARM_MOVW_ABS_NC:
13884 case R_ARM_MOVT_ABS:
13885 case R_ARM_THM_MOVW_ABS_NC:
13886 case R_ARM_THM_MOVT_ABS:
13887 if (bfd_link_pic (info))
13889 (*_bfd_error_handler)
13890 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
13891 abfd, elf32_arm_howto_table_1[r_type].name,
13892 (h) ? h->root.root.string : "a local symbol");
13893 bfd_set_error (bfd_error_bad_value);
13897 /* Fall through. */
13899 case R_ARM_ABS32_NOI:
13901 if (h != NULL && bfd_link_executable (info))
13903 h->pointer_equality_needed = 1;
13905 /* Fall through. */
13907 case R_ARM_REL32_NOI:
13908 case R_ARM_MOVW_PREL_NC:
13909 case R_ARM_MOVT_PREL:
13910 case R_ARM_THM_MOVW_PREL_NC:
13911 case R_ARM_THM_MOVT_PREL:
13913 /* Should the interworking branches be listed here? */
13914 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable)
13915 && (sec->flags & SEC_ALLOC) != 0)
13918 && elf32_arm_howto_from_type (r_type)->pc_relative)
13920 /* In shared libraries and relocatable executables,
13921 we treat local relative references as calls;
13922 see the related SYMBOL_CALLS_LOCAL code in
13923 allocate_dynrelocs. */
13924 call_reloc_p = TRUE;
13925 may_need_local_target_p = TRUE;
13928 /* We are creating a shared library or relocatable
13929 executable, and this is a reloc against a global symbol,
13930 or a non-PC-relative reloc against a local symbol.
13931 We may need to copy the reloc into the output. */
13932 may_become_dynamic_p = TRUE;
13935 may_need_local_target_p = TRUE;
13938 /* This relocation describes the C++ object vtable hierarchy.
13939 Reconstruct it for later use during GC. */
13940 case R_ARM_GNU_VTINHERIT:
13941 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
13945 /* This relocation describes which C++ vtable entries are actually
13946 used. Record for later use during GC. */
13947 case R_ARM_GNU_VTENTRY:
13948 BFD_ASSERT (h != NULL);
13950 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
13958 /* We may need a .plt entry if the function this reloc
13959 refers to is in a different object, regardless of the
13960 symbol's type. We can't tell for sure yet, because
13961 something later might force the symbol local. */
13963 else if (may_need_local_target_p)
13964 /* If this reloc is in a read-only section, we might
13965 need a copy reloc. We can't check reliably at this
13966 stage whether the section is read-only, as input
13967 sections have not yet been mapped to output sections.
13968 Tentatively set the flag for now, and correct in
13969 adjust_dynamic_symbol. */
13970 h->non_got_ref = 1;
13973 if (may_need_local_target_p
13974 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
13976 union gotplt_union *root_plt;
13977 struct arm_plt_info *arm_plt;
13978 struct arm_local_iplt_info *local_iplt;
13982 root_plt = &h->plt;
13983 arm_plt = &eh->plt;
13987 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
13988 if (local_iplt == NULL)
13990 root_plt = &local_iplt->root;
13991 arm_plt = &local_iplt->arm;
13994 /* If the symbol is a function that doesn't bind locally,
13995 this relocation will need a PLT entry. */
13996 if (root_plt->refcount != -1)
13997 root_plt->refcount += 1;
14000 arm_plt->noncall_refcount++;
14002 /* It's too early to use htab->use_blx here, so we have to
14003 record possible blx references separately from
14004 relocs that definitely need a thumb stub. */
14006 if (r_type == R_ARM_THM_CALL)
14007 arm_plt->maybe_thumb_refcount += 1;
14009 if (r_type == R_ARM_THM_JUMP24
14010 || r_type == R_ARM_THM_JUMP19)
14011 arm_plt->thumb_refcount += 1;
14014 if (may_become_dynamic_p)
14016 struct elf_dyn_relocs *p, **head;
14018 /* Create a reloc section in dynobj. */
14019 if (sreloc == NULL)
14021 sreloc = _bfd_elf_make_dynamic_reloc_section
14022 (sec, dynobj, 2, abfd, ! htab->use_rel);
14024 if (sreloc == NULL)
14027 /* BPABI objects never have dynamic relocations mapped. */
14028 if (htab->symbian_p)
14032 flags = bfd_get_section_flags (dynobj, sreloc);
14033 flags &= ~(SEC_LOAD | SEC_ALLOC);
14034 bfd_set_section_flags (dynobj, sreloc, flags);
14038 /* If this is a global symbol, count the number of
14039 relocations we need for this symbol. */
14041 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
14044 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
14050 if (p == NULL || p->sec != sec)
14052 bfd_size_type amt = sizeof *p;
14054 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
14064 if (elf32_arm_howto_from_type (r_type)->pc_relative)
14073 /* Unwinding tables are not referenced directly. This pass marks them as
14074 required if the corresponding code section is marked. */
14077 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
14078 elf_gc_mark_hook_fn gc_mark_hook)
14081 Elf_Internal_Shdr **elf_shdrp;
14084 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
14086 /* Marking EH data may cause additional code sections to be marked,
14087 requiring multiple passes. */
14092 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
14096 if (! is_arm_elf (sub))
14099 elf_shdrp = elf_elfsections (sub);
14100 for (o = sub->sections; o != NULL; o = o->next)
14102 Elf_Internal_Shdr *hdr;
14104 hdr = &elf_section_data (o)->this_hdr;
14105 if (hdr->sh_type == SHT_ARM_EXIDX
14107 && hdr->sh_link < elf_numsections (sub)
14109 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
14112 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
14122 /* Treat mapping symbols as special target symbols. */
14125 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
14127 return bfd_is_arm_special_symbol_name (sym->name,
14128 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
14131 /* This is a copy of elf_find_function() from elf.c except that
14132 ARM mapping symbols are ignored when looking for function names
14133 and STT_ARM_TFUNC is considered to a function type. */
14136 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
14137 asymbol ** symbols,
14138 asection * section,
14140 const char ** filename_ptr,
14141 const char ** functionname_ptr)
14143 const char * filename = NULL;
14144 asymbol * func = NULL;
14145 bfd_vma low_func = 0;
14148 for (p = symbols; *p != NULL; p++)
14150 elf_symbol_type *q;
14152 q = (elf_symbol_type *) *p;
14154 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
14159 filename = bfd_asymbol_name (&q->symbol);
14162 case STT_ARM_TFUNC:
14164 /* Skip mapping symbols. */
14165 if ((q->symbol.flags & BSF_LOCAL)
14166 && bfd_is_arm_special_symbol_name (q->symbol.name,
14167 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
14169 /* Fall through. */
14170 if (bfd_get_section (&q->symbol) == section
14171 && q->symbol.value >= low_func
14172 && q->symbol.value <= offset)
14174 func = (asymbol *) q;
14175 low_func = q->symbol.value;
14185 *filename_ptr = filename;
14186 if (functionname_ptr)
14187 *functionname_ptr = bfd_asymbol_name (func);
14193 /* Find the nearest line to a particular section and offset, for error
14194 reporting. This code is a duplicate of the code in elf.c, except
14195 that it uses arm_elf_find_function. */
14198 elf32_arm_find_nearest_line (bfd * abfd,
14199 asymbol ** symbols,
14200 asection * section,
14202 const char ** filename_ptr,
14203 const char ** functionname_ptr,
14204 unsigned int * line_ptr,
14205 unsigned int * discriminator_ptr)
14207 bfd_boolean found = FALSE;
14209 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
14210 filename_ptr, functionname_ptr,
14211 line_ptr, discriminator_ptr,
14212 dwarf_debug_sections, 0,
14213 & elf_tdata (abfd)->dwarf2_find_line_info))
14215 if (!*functionname_ptr)
14216 arm_elf_find_function (abfd, symbols, section, offset,
14217 *filename_ptr ? NULL : filename_ptr,
14223 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
14226 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
14227 & found, filename_ptr,
14228 functionname_ptr, line_ptr,
14229 & elf_tdata (abfd)->line_info))
14232 if (found && (*functionname_ptr || *line_ptr))
14235 if (symbols == NULL)
14238 if (! arm_elf_find_function (abfd, symbols, section, offset,
14239 filename_ptr, functionname_ptr))
14247 elf32_arm_find_inliner_info (bfd * abfd,
14248 const char ** filename_ptr,
14249 const char ** functionname_ptr,
14250 unsigned int * line_ptr)
14253 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
14254 functionname_ptr, line_ptr,
14255 & elf_tdata (abfd)->dwarf2_find_line_info);
14259 /* Adjust a symbol defined by a dynamic object and referenced by a
14260 regular object. The current definition is in some section of the
14261 dynamic object, but we're not including those sections. We have to
14262 change the definition to something the rest of the link can
14266 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
14267 struct elf_link_hash_entry * h)
14271 struct elf32_arm_link_hash_entry * eh;
14272 struct elf32_arm_link_hash_table *globals;
14274 globals = elf32_arm_hash_table (info);
14275 if (globals == NULL)
14278 dynobj = elf_hash_table (info)->dynobj;
14280 /* Make sure we know what is going on here. */
14281 BFD_ASSERT (dynobj != NULL
14283 || h->type == STT_GNU_IFUNC
14284 || h->u.weakdef != NULL
14287 && !h->def_regular)));
14289 eh = (struct elf32_arm_link_hash_entry *) h;
14291 /* If this is a function, put it in the procedure linkage table. We
14292 will fill in the contents of the procedure linkage table later,
14293 when we know the address of the .got section. */
14294 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
14296 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
14297 symbol binds locally. */
14298 if (h->plt.refcount <= 0
14299 || (h->type != STT_GNU_IFUNC
14300 && (SYMBOL_CALLS_LOCAL (info, h)
14301 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
14302 && h->root.type == bfd_link_hash_undefweak))))
14304 /* This case can occur if we saw a PLT32 reloc in an input
14305 file, but the symbol was never referred to by a dynamic
14306 object, or if all references were garbage collected. In
14307 such a case, we don't actually need to build a procedure
14308 linkage table, and we can just do a PC24 reloc instead. */
14309 h->plt.offset = (bfd_vma) -1;
14310 eh->plt.thumb_refcount = 0;
14311 eh->plt.maybe_thumb_refcount = 0;
14312 eh->plt.noncall_refcount = 0;
14320 /* It's possible that we incorrectly decided a .plt reloc was
14321 needed for an R_ARM_PC24 or similar reloc to a non-function sym
14322 in check_relocs. We can't decide accurately between function
14323 and non-function syms in check-relocs; Objects loaded later in
14324 the link may change h->type. So fix it now. */
14325 h->plt.offset = (bfd_vma) -1;
14326 eh->plt.thumb_refcount = 0;
14327 eh->plt.maybe_thumb_refcount = 0;
14328 eh->plt.noncall_refcount = 0;
14331 /* If this is a weak symbol, and there is a real definition, the
14332 processor independent code will have arranged for us to see the
14333 real definition first, and we can just use the same value. */
14334 if (h->u.weakdef != NULL)
14336 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
14337 || h->u.weakdef->root.type == bfd_link_hash_defweak);
14338 h->root.u.def.section = h->u.weakdef->root.u.def.section;
14339 h->root.u.def.value = h->u.weakdef->root.u.def.value;
14343 /* If there are no non-GOT references, we do not need a copy
14345 if (!h->non_got_ref)
14348 /* This is a reference to a symbol defined by a dynamic object which
14349 is not a function. */
14351 /* If we are creating a shared library, we must presume that the
14352 only references to the symbol are via the global offset table.
14353 For such cases we need not do anything here; the relocations will
14354 be handled correctly by relocate_section. Relocatable executables
14355 can reference data in shared objects directly, so we don't need to
14356 do anything here. */
14357 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
14360 /* We must allocate the symbol in our .dynbss section, which will
14361 become part of the .bss section of the executable. There will be
14362 an entry for this symbol in the .dynsym section. The dynamic
14363 object will contain position independent code, so all references
14364 from the dynamic object to this symbol will go through the global
14365 offset table. The dynamic linker will use the .dynsym entry to
14366 determine the address it must put in the global offset table, so
14367 both the dynamic object and the regular object will refer to the
14368 same memory location for the variable. */
14369 s = bfd_get_linker_section (dynobj, ".dynbss");
14370 BFD_ASSERT (s != NULL);
14372 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
14373 linker to copy the initial value out of the dynamic object and into
14374 the runtime process image. We need to remember the offset into the
14375 .rel(a).bss section we are going to use. */
14376 if (info->nocopyreloc == 0
14377 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
14382 srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
14383 elf32_arm_allocate_dynrelocs (info, srel, 1);
14387 return _bfd_elf_adjust_dynamic_copy (info, h, s);
14390 /* Allocate space in .plt, .got and associated reloc sections for
14394 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
14396 struct bfd_link_info *info;
14397 struct elf32_arm_link_hash_table *htab;
14398 struct elf32_arm_link_hash_entry *eh;
14399 struct elf_dyn_relocs *p;
14401 if (h->root.type == bfd_link_hash_indirect)
14404 eh = (struct elf32_arm_link_hash_entry *) h;
14406 info = (struct bfd_link_info *) inf;
14407 htab = elf32_arm_hash_table (info);
14411 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
14412 && h->plt.refcount > 0)
14414 /* Make sure this symbol is output as a dynamic symbol.
14415 Undefined weak syms won't yet be marked as dynamic. */
14416 if (h->dynindx == -1
14417 && !h->forced_local)
14419 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14423 /* If the call in the PLT entry binds locally, the associated
14424 GOT entry should use an R_ARM_IRELATIVE relocation instead of
14425 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
14426 than the .plt section. */
14427 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
14430 if (eh->plt.noncall_refcount == 0
14431 && SYMBOL_REFERENCES_LOCAL (info, h))
14432 /* All non-call references can be resolved directly.
14433 This means that they can (and in some cases, must)
14434 resolve directly to the run-time target, rather than
14435 to the PLT. That in turns means that any .got entry
14436 would be equal to the .igot.plt entry, so there's
14437 no point having both. */
14438 h->got.refcount = 0;
14441 if (bfd_link_pic (info)
14443 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
14445 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
14447 /* If this symbol is not defined in a regular file, and we are
14448 not generating a shared library, then set the symbol to this
14449 location in the .plt. This is required to make function
14450 pointers compare as equal between the normal executable and
14451 the shared library. */
14452 if (! bfd_link_pic (info)
14453 && !h->def_regular)
14455 h->root.u.def.section = htab->root.splt;
14456 h->root.u.def.value = h->plt.offset;
14458 /* Make sure the function is not marked as Thumb, in case
14459 it is the target of an ABS32 relocation, which will
14460 point to the PLT entry. */
14461 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14464 /* VxWorks executables have a second set of relocations for
14465 each PLT entry. They go in a separate relocation section,
14466 which is processed by the kernel loader. */
14467 if (htab->vxworks_p && !bfd_link_pic (info))
14469 /* There is a relocation for the initial PLT entry:
14470 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
14471 if (h->plt.offset == htab->plt_header_size)
14472 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
14474 /* There are two extra relocations for each subsequent
14475 PLT entry: an R_ARM_32 relocation for the GOT entry,
14476 and an R_ARM_32 relocation for the PLT entry. */
14477 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
14482 h->plt.offset = (bfd_vma) -1;
14488 h->plt.offset = (bfd_vma) -1;
14492 eh = (struct elf32_arm_link_hash_entry *) h;
14493 eh->tlsdesc_got = (bfd_vma) -1;
14495 if (h->got.refcount > 0)
14499 int tls_type = elf32_arm_hash_entry (h)->tls_type;
14502 /* Make sure this symbol is output as a dynamic symbol.
14503 Undefined weak syms won't yet be marked as dynamic. */
14504 if (h->dynindx == -1
14505 && !h->forced_local)
14507 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14511 if (!htab->symbian_p)
14513 s = htab->root.sgot;
14514 h->got.offset = s->size;
14516 if (tls_type == GOT_UNKNOWN)
14519 if (tls_type == GOT_NORMAL)
14520 /* Non-TLS symbols need one GOT slot. */
14524 if (tls_type & GOT_TLS_GDESC)
14526 /* R_ARM_TLS_DESC needs 2 GOT slots. */
14528 = (htab->root.sgotplt->size
14529 - elf32_arm_compute_jump_table_size (htab));
14530 htab->root.sgotplt->size += 8;
14531 h->got.offset = (bfd_vma) -2;
14532 /* plt.got_offset needs to know there's a TLS_DESC
14533 reloc in the middle of .got.plt. */
14534 htab->num_tls_desc++;
14537 if (tls_type & GOT_TLS_GD)
14539 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
14540 the symbol is both GD and GDESC, got.offset may
14541 have been overwritten. */
14542 h->got.offset = s->size;
14546 if (tls_type & GOT_TLS_IE)
14547 /* R_ARM_TLS_IE32 needs one GOT slot. */
14551 dyn = htab->root.dynamic_sections_created;
14554 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
14555 bfd_link_pic (info),
14557 && (!bfd_link_pic (info)
14558 || !SYMBOL_REFERENCES_LOCAL (info, h)))
14561 if (tls_type != GOT_NORMAL
14562 && (bfd_link_pic (info) || indx != 0)
14563 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14564 || h->root.type != bfd_link_hash_undefweak))
14566 if (tls_type & GOT_TLS_IE)
14567 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14569 if (tls_type & GOT_TLS_GD)
14570 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14572 if (tls_type & GOT_TLS_GDESC)
14574 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
14575 /* GDESC needs a trampoline to jump to. */
14576 htab->tls_trampoline = -1;
14579 /* Only GD needs it. GDESC just emits one relocation per
14581 if ((tls_type & GOT_TLS_GD) && indx != 0)
14582 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14584 else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
14586 if (htab->root.dynamic_sections_created)
14587 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
14588 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14590 else if (h->type == STT_GNU_IFUNC
14591 && eh->plt.noncall_refcount == 0)
14592 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
14593 they all resolve dynamically instead. Reserve room for the
14594 GOT entry's R_ARM_IRELATIVE relocation. */
14595 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
14596 else if (bfd_link_pic (info)
14597 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14598 || h->root.type != bfd_link_hash_undefweak))
14599 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
14600 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14604 h->got.offset = (bfd_vma) -1;
14606 /* Allocate stubs for exported Thumb functions on v4t. */
14607 if (!htab->use_blx && h->dynindx != -1
14609 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
14610 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
14612 struct elf_link_hash_entry * th;
14613 struct bfd_link_hash_entry * bh;
14614 struct elf_link_hash_entry * myh;
14618 /* Create a new symbol to regist the real location of the function. */
14619 s = h->root.u.def.section;
14620 sprintf (name, "__real_%s", h->root.root.string);
14621 _bfd_generic_link_add_one_symbol (info, s->owner,
14622 name, BSF_GLOBAL, s,
14623 h->root.u.def.value,
14624 NULL, TRUE, FALSE, &bh);
14626 myh = (struct elf_link_hash_entry *) bh;
14627 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14628 myh->forced_local = 1;
14629 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
14630 eh->export_glue = myh;
14631 th = record_arm_to_thumb_glue (info, h);
14632 /* Point the symbol at the stub. */
14633 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
14634 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14635 h->root.u.def.section = th->root.u.def.section;
14636 h->root.u.def.value = th->root.u.def.value & ~1;
14639 if (eh->dyn_relocs == NULL)
14642 /* In the shared -Bsymbolic case, discard space allocated for
14643 dynamic pc-relative relocs against symbols which turn out to be
14644 defined in regular objects. For the normal shared case, discard
14645 space for pc-relative relocs that have become local due to symbol
14646 visibility changes. */
14648 if (bfd_link_pic (info) || htab->root.is_relocatable_executable)
14650 /* Relocs that use pc_count are PC-relative forms, which will appear
14651 on something like ".long foo - ." or "movw REG, foo - .". We want
14652 calls to protected symbols to resolve directly to the function
14653 rather than going via the plt. If people want function pointer
14654 comparisons to work as expected then they should avoid writing
14655 assembly like ".long foo - .". */
14656 if (SYMBOL_CALLS_LOCAL (info, h))
14658 struct elf_dyn_relocs **pp;
14660 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14662 p->count -= p->pc_count;
14671 if (htab->vxworks_p)
14673 struct elf_dyn_relocs **pp;
14675 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14677 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
14684 /* Also discard relocs on undefined weak syms with non-default
14686 if (eh->dyn_relocs != NULL
14687 && h->root.type == bfd_link_hash_undefweak)
14689 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
14690 eh->dyn_relocs = NULL;
14692 /* Make sure undefined weak symbols are output as a dynamic
14694 else if (h->dynindx == -1
14695 && !h->forced_local)
14697 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14702 else if (htab->root.is_relocatable_executable && h->dynindx == -1
14703 && h->root.type == bfd_link_hash_new)
14705 /* Output absolute symbols so that we can create relocations
14706 against them. For normal symbols we output a relocation
14707 against the section that contains them. */
14708 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14715 /* For the non-shared case, discard space for relocs against
14716 symbols which turn out to need copy relocs or are not
14719 if (!h->non_got_ref
14720 && ((h->def_dynamic
14721 && !h->def_regular)
14722 || (htab->root.dynamic_sections_created
14723 && (h->root.type == bfd_link_hash_undefweak
14724 || h->root.type == bfd_link_hash_undefined))))
14726 /* Make sure this symbol is output as a dynamic symbol.
14727 Undefined weak syms won't yet be marked as dynamic. */
14728 if (h->dynindx == -1
14729 && !h->forced_local)
14731 if (! bfd_elf_link_record_dynamic_symbol (info, h))
14735 /* If that succeeded, we know we'll be keeping all the
14737 if (h->dynindx != -1)
14741 eh->dyn_relocs = NULL;
14746 /* Finally, allocate space. */
14747 for (p = eh->dyn_relocs; p != NULL; p = p->next)
14749 asection *sreloc = elf_section_data (p->sec)->sreloc;
14750 if (h->type == STT_GNU_IFUNC
14751 && eh->plt.noncall_refcount == 0
14752 && SYMBOL_REFERENCES_LOCAL (info, h))
14753 elf32_arm_allocate_irelocs (info, sreloc, p->count);
14755 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
14761 /* Find any dynamic relocs that apply to read-only sections. */
14764 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
14766 struct elf32_arm_link_hash_entry * eh;
14767 struct elf_dyn_relocs * p;
14769 eh = (struct elf32_arm_link_hash_entry *) h;
14770 for (p = eh->dyn_relocs; p != NULL; p = p->next)
14772 asection *s = p->sec;
14774 if (s != NULL && (s->flags & SEC_READONLY) != 0)
14776 struct bfd_link_info *info = (struct bfd_link_info *) inf;
14778 info->flags |= DF_TEXTREL;
14780 /* Not an error, just cut short the traversal. */
14788 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
14791 struct elf32_arm_link_hash_table *globals;
14793 globals = elf32_arm_hash_table (info);
14794 if (globals == NULL)
14797 globals->byteswap_code = byteswap_code;
14800 /* Set the sizes of the dynamic sections. */
14803 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
14804 struct bfd_link_info * info)
14809 bfd_boolean relocs;
14811 struct elf32_arm_link_hash_table *htab;
14813 htab = elf32_arm_hash_table (info);
14817 dynobj = elf_hash_table (info)->dynobj;
14818 BFD_ASSERT (dynobj != NULL);
14819 check_use_blx (htab);
14821 if (elf_hash_table (info)->dynamic_sections_created)
14823 /* Set the contents of the .interp section to the interpreter. */
14824 if (bfd_link_executable (info) && !info->nointerp)
14826 s = bfd_get_linker_section (dynobj, ".interp");
14827 BFD_ASSERT (s != NULL);
14828 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
14829 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
14833 /* Set up .got offsets for local syms, and space for local dynamic
14835 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
14837 bfd_signed_vma *local_got;
14838 bfd_signed_vma *end_local_got;
14839 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
14840 char *local_tls_type;
14841 bfd_vma *local_tlsdesc_gotent;
14842 bfd_size_type locsymcount;
14843 Elf_Internal_Shdr *symtab_hdr;
14845 bfd_boolean is_vxworks = htab->vxworks_p;
14846 unsigned int symndx;
14848 if (! is_arm_elf (ibfd))
14851 for (s = ibfd->sections; s != NULL; s = s->next)
14853 struct elf_dyn_relocs *p;
14855 for (p = (struct elf_dyn_relocs *)
14856 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
14858 if (!bfd_is_abs_section (p->sec)
14859 && bfd_is_abs_section (p->sec->output_section))
14861 /* Input section has been discarded, either because
14862 it is a copy of a linkonce section or due to
14863 linker script /DISCARD/, so we'll be discarding
14866 else if (is_vxworks
14867 && strcmp (p->sec->output_section->name,
14870 /* Relocations in vxworks .tls_vars sections are
14871 handled specially by the loader. */
14873 else if (p->count != 0)
14875 srel = elf_section_data (p->sec)->sreloc;
14876 elf32_arm_allocate_dynrelocs (info, srel, p->count);
14877 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
14878 info->flags |= DF_TEXTREL;
14883 local_got = elf_local_got_refcounts (ibfd);
14887 symtab_hdr = & elf_symtab_hdr (ibfd);
14888 locsymcount = symtab_hdr->sh_info;
14889 end_local_got = local_got + locsymcount;
14890 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
14891 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
14892 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
14894 s = htab->root.sgot;
14895 srel = htab->root.srelgot;
14896 for (; local_got < end_local_got;
14897 ++local_got, ++local_iplt_ptr, ++local_tls_type,
14898 ++local_tlsdesc_gotent, ++symndx)
14900 *local_tlsdesc_gotent = (bfd_vma) -1;
14901 local_iplt = *local_iplt_ptr;
14902 if (local_iplt != NULL)
14904 struct elf_dyn_relocs *p;
14906 if (local_iplt->root.refcount > 0)
14908 elf32_arm_allocate_plt_entry (info, TRUE,
14911 if (local_iplt->arm.noncall_refcount == 0)
14912 /* All references to the PLT are calls, so all
14913 non-call references can resolve directly to the
14914 run-time target. This means that the .got entry
14915 would be the same as the .igot.plt entry, so there's
14916 no point creating both. */
14921 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
14922 local_iplt->root.offset = (bfd_vma) -1;
14925 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
14929 psrel = elf_section_data (p->sec)->sreloc;
14930 if (local_iplt->arm.noncall_refcount == 0)
14931 elf32_arm_allocate_irelocs (info, psrel, p->count);
14933 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
14936 if (*local_got > 0)
14938 Elf_Internal_Sym *isym;
14940 *local_got = s->size;
14941 if (*local_tls_type & GOT_TLS_GD)
14942 /* TLS_GD relocs need an 8-byte structure in the GOT. */
14944 if (*local_tls_type & GOT_TLS_GDESC)
14946 *local_tlsdesc_gotent = htab->root.sgotplt->size
14947 - elf32_arm_compute_jump_table_size (htab);
14948 htab->root.sgotplt->size += 8;
14949 *local_got = (bfd_vma) -2;
14950 /* plt.got_offset needs to know there's a TLS_DESC
14951 reloc in the middle of .got.plt. */
14952 htab->num_tls_desc++;
14954 if (*local_tls_type & GOT_TLS_IE)
14957 if (*local_tls_type & GOT_NORMAL)
14959 /* If the symbol is both GD and GDESC, *local_got
14960 may have been overwritten. */
14961 *local_got = s->size;
14965 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
14969 /* If all references to an STT_GNU_IFUNC PLT are calls,
14970 then all non-call references, including this GOT entry,
14971 resolve directly to the run-time target. */
14972 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
14973 && (local_iplt == NULL
14974 || local_iplt->arm.noncall_refcount == 0))
14975 elf32_arm_allocate_irelocs (info, srel, 1);
14976 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC)
14978 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC))
14979 || *local_tls_type & GOT_TLS_GD)
14980 elf32_arm_allocate_dynrelocs (info, srel, 1);
14982 if (bfd_link_pic (info) && *local_tls_type & GOT_TLS_GDESC)
14984 elf32_arm_allocate_dynrelocs (info,
14985 htab->root.srelplt, 1);
14986 htab->tls_trampoline = -1;
14991 *local_got = (bfd_vma) -1;
14995 if (htab->tls_ldm_got.refcount > 0)
14997 /* Allocate two GOT entries and one dynamic relocation (if necessary)
14998 for R_ARM_TLS_LDM32 relocations. */
14999 htab->tls_ldm_got.offset = htab->root.sgot->size;
15000 htab->root.sgot->size += 8;
15001 if (bfd_link_pic (info))
15002 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
15005 htab->tls_ldm_got.offset = -1;
15007 /* Allocate global sym .plt and .got entries, and space for global
15008 sym dynamic relocs. */
15009 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
15011 /* Here we rummage through the found bfds to collect glue information. */
15012 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
15014 if (! is_arm_elf (ibfd))
15017 /* Initialise mapping tables for code/data. */
15018 bfd_elf32_arm_init_maps (ibfd);
15020 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
15021 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
15022 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
15023 /* xgettext:c-format */
15024 _bfd_error_handler (_("Errors encountered processing file %s"),
15028 /* Allocate space for the glue sections now that we've sized them. */
15029 bfd_elf32_arm_allocate_interworking_sections (info);
15031 /* For every jump slot reserved in the sgotplt, reloc_count is
15032 incremented. However, when we reserve space for TLS descriptors,
15033 it's not incremented, so in order to compute the space reserved
15034 for them, it suffices to multiply the reloc count by the jump
15036 if (htab->root.srelplt)
15037 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
15039 if (htab->tls_trampoline)
15041 if (htab->root.splt->size == 0)
15042 htab->root.splt->size += htab->plt_header_size;
15044 htab->tls_trampoline = htab->root.splt->size;
15045 htab->root.splt->size += htab->plt_entry_size;
15047 /* If we're not using lazy TLS relocations, don't generate the
15048 PLT and GOT entries they require. */
15049 if (!(info->flags & DF_BIND_NOW))
15051 htab->dt_tlsdesc_got = htab->root.sgot->size;
15052 htab->root.sgot->size += 4;
15054 htab->dt_tlsdesc_plt = htab->root.splt->size;
15055 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
15059 /* The check_relocs and adjust_dynamic_symbol entry points have
15060 determined the sizes of the various dynamic sections. Allocate
15061 memory for them. */
15064 for (s = dynobj->sections; s != NULL; s = s->next)
15068 if ((s->flags & SEC_LINKER_CREATED) == 0)
15071 /* It's OK to base decisions on the section name, because none
15072 of the dynobj section names depend upon the input files. */
15073 name = bfd_get_section_name (dynobj, s);
15075 if (s == htab->root.splt)
15077 /* Remember whether there is a PLT. */
15078 plt = s->size != 0;
15080 else if (CONST_STRNEQ (name, ".rel"))
15084 /* Remember whether there are any reloc sections other
15085 than .rel(a).plt and .rela.plt.unloaded. */
15086 if (s != htab->root.srelplt && s != htab->srelplt2)
15089 /* We use the reloc_count field as a counter if we need
15090 to copy relocs into the output file. */
15091 s->reloc_count = 0;
15094 else if (s != htab->root.sgot
15095 && s != htab->root.sgotplt
15096 && s != htab->root.iplt
15097 && s != htab->root.igotplt
15098 && s != htab->sdynbss)
15100 /* It's not one of our sections, so don't allocate space. */
15106 /* If we don't need this section, strip it from the
15107 output file. This is mostly to handle .rel(a).bss and
15108 .rel(a).plt. We must create both sections in
15109 create_dynamic_sections, because they must be created
15110 before the linker maps input sections to output
15111 sections. The linker does that before
15112 adjust_dynamic_symbol is called, and it is that
15113 function which decides whether anything needs to go
15114 into these sections. */
15115 s->flags |= SEC_EXCLUDE;
15119 if ((s->flags & SEC_HAS_CONTENTS) == 0)
15122 /* Allocate memory for the section contents. */
15123 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
15124 if (s->contents == NULL)
15128 if (elf_hash_table (info)->dynamic_sections_created)
15130 /* Add some entries to the .dynamic section. We fill in the
15131 values later, in elf32_arm_finish_dynamic_sections, but we
15132 must add the entries now so that we get the correct size for
15133 the .dynamic section. The DT_DEBUG entry is filled in by the
15134 dynamic linker and used by the debugger. */
15135 #define add_dynamic_entry(TAG, VAL) \
15136 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
15138 if (bfd_link_executable (info))
15140 if (!add_dynamic_entry (DT_DEBUG, 0))
15146 if ( !add_dynamic_entry (DT_PLTGOT, 0)
15147 || !add_dynamic_entry (DT_PLTRELSZ, 0)
15148 || !add_dynamic_entry (DT_PLTREL,
15149 htab->use_rel ? DT_REL : DT_RELA)
15150 || !add_dynamic_entry (DT_JMPREL, 0))
15153 if (htab->dt_tlsdesc_plt &&
15154 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
15155 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
15163 if (!add_dynamic_entry (DT_REL, 0)
15164 || !add_dynamic_entry (DT_RELSZ, 0)
15165 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
15170 if (!add_dynamic_entry (DT_RELA, 0)
15171 || !add_dynamic_entry (DT_RELASZ, 0)
15172 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
15177 /* If any dynamic relocs apply to a read-only section,
15178 then we need a DT_TEXTREL entry. */
15179 if ((info->flags & DF_TEXTREL) == 0)
15180 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
15183 if ((info->flags & DF_TEXTREL) != 0)
15185 if (!add_dynamic_entry (DT_TEXTREL, 0))
15188 if (htab->vxworks_p
15189 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
15192 #undef add_dynamic_entry
15197 /* Size sections even though they're not dynamic. We use it to setup
15198 _TLS_MODULE_BASE_, if needed. */
15201 elf32_arm_always_size_sections (bfd *output_bfd,
15202 struct bfd_link_info *info)
15206 if (bfd_link_relocatable (info))
15209 tls_sec = elf_hash_table (info)->tls_sec;
15213 struct elf_link_hash_entry *tlsbase;
15215 tlsbase = elf_link_hash_lookup
15216 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
15220 struct bfd_link_hash_entry *bh = NULL;
15221 const struct elf_backend_data *bed
15222 = get_elf_backend_data (output_bfd);
15224 if (!(_bfd_generic_link_add_one_symbol
15225 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
15226 tls_sec, 0, NULL, FALSE,
15227 bed->collect, &bh)))
15230 tlsbase->type = STT_TLS;
15231 tlsbase = (struct elf_link_hash_entry *)bh;
15232 tlsbase->def_regular = 1;
15233 tlsbase->other = STV_HIDDEN;
15234 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
15240 /* Finish up dynamic symbol handling. We set the contents of various
15241 dynamic sections here. */
15244 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
15245 struct bfd_link_info * info,
15246 struct elf_link_hash_entry * h,
15247 Elf_Internal_Sym * sym)
15249 struct elf32_arm_link_hash_table *htab;
15250 struct elf32_arm_link_hash_entry *eh;
15252 htab = elf32_arm_hash_table (info);
15256 eh = (struct elf32_arm_link_hash_entry *) h;
15258 if (h->plt.offset != (bfd_vma) -1)
15262 BFD_ASSERT (h->dynindx != -1);
15263 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
15268 if (!h->def_regular)
15270 /* Mark the symbol as undefined, rather than as defined in
15271 the .plt section. */
15272 sym->st_shndx = SHN_UNDEF;
15273 /* If the symbol is weak we need to clear the value.
15274 Otherwise, the PLT entry would provide a definition for
15275 the symbol even if the symbol wasn't defined anywhere,
15276 and so the symbol would never be NULL. Leave the value if
15277 there were any relocations where pointer equality matters
15278 (this is a clue for the dynamic linker, to make function
15279 pointer comparisons work between an application and shared
15281 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
15284 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
15286 /* At least one non-call relocation references this .iplt entry,
15287 so the .iplt entry is the function's canonical address. */
15288 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
15289 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
15290 sym->st_shndx = (_bfd_elf_section_from_bfd_section
15291 (output_bfd, htab->root.iplt->output_section));
15292 sym->st_value = (h->plt.offset
15293 + htab->root.iplt->output_section->vma
15294 + htab->root.iplt->output_offset);
15301 Elf_Internal_Rela rel;
15303 /* This symbol needs a copy reloc. Set it up. */
15304 BFD_ASSERT (h->dynindx != -1
15305 && (h->root.type == bfd_link_hash_defined
15306 || h->root.type == bfd_link_hash_defweak));
15309 BFD_ASSERT (s != NULL);
15312 rel.r_offset = (h->root.u.def.value
15313 + h->root.u.def.section->output_section->vma
15314 + h->root.u.def.section->output_offset);
15315 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
15316 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
15319 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
15320 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
15321 to the ".got" section. */
15322 if (h == htab->root.hdynamic
15323 || (!htab->vxworks_p && h == htab->root.hgot))
15324 sym->st_shndx = SHN_ABS;
15330 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15332 const unsigned long *template, unsigned count)
15336 for (ix = 0; ix != count; ix++)
15338 unsigned long insn = template[ix];
15340 /* Emit mov pc,rx if bx is not permitted. */
15341 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
15342 insn = (insn & 0xf000000f) | 0x01a0f000;
15343 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
15347 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
15348 other variants, NaCl needs this entry in a static executable's
15349 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
15350 zero. For .iplt really only the last bundle is useful, and .iplt
15351 could have a shorter first entry, with each individual PLT entry's
15352 relative branch calculated differently so it targets the last
15353 bundle instead of the instruction before it (labelled .Lplt_tail
15354 above). But it's simpler to keep the size and layout of PLT0
15355 consistent with the dynamic case, at the cost of some dead code at
15356 the start of .iplt and the one dead store to the stack at the start
15359 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15360 asection *plt, bfd_vma got_displacement)
15364 put_arm_insn (htab, output_bfd,
15365 elf32_arm_nacl_plt0_entry[0]
15366 | arm_movw_immediate (got_displacement),
15367 plt->contents + 0);
15368 put_arm_insn (htab, output_bfd,
15369 elf32_arm_nacl_plt0_entry[1]
15370 | arm_movt_immediate (got_displacement),
15371 plt->contents + 4);
15373 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
15374 put_arm_insn (htab, output_bfd,
15375 elf32_arm_nacl_plt0_entry[i],
15376 plt->contents + (i * 4));
15379 /* Finish up the dynamic sections. */
15382 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
15387 struct elf32_arm_link_hash_table *htab;
15389 htab = elf32_arm_hash_table (info);
15393 dynobj = elf_hash_table (info)->dynobj;
15395 sgot = htab->root.sgotplt;
15396 /* A broken linker script might have discarded the dynamic sections.
15397 Catch this here so that we do not seg-fault later on. */
15398 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
15400 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
15402 if (elf_hash_table (info)->dynamic_sections_created)
15405 Elf32_External_Dyn *dyncon, *dynconend;
15407 splt = htab->root.splt;
15408 BFD_ASSERT (splt != NULL && sdyn != NULL);
15409 BFD_ASSERT (htab->symbian_p || sgot != NULL);
15411 dyncon = (Elf32_External_Dyn *) sdyn->contents;
15412 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
15414 for (; dyncon < dynconend; dyncon++)
15416 Elf_Internal_Dyn dyn;
15420 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
15427 if (htab->vxworks_p
15428 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
15429 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15434 goto get_vma_if_bpabi;
15437 goto get_vma_if_bpabi;
15440 goto get_vma_if_bpabi;
15442 name = ".gnu.version";
15443 goto get_vma_if_bpabi;
15445 name = ".gnu.version_d";
15446 goto get_vma_if_bpabi;
15448 name = ".gnu.version_r";
15449 goto get_vma_if_bpabi;
15452 name = htab->symbian_p ? ".got" : ".got.plt";
15455 name = RELOC_SECTION (htab, ".plt");
15457 s = bfd_get_linker_section (dynobj, name);
15460 (*_bfd_error_handler)
15461 (_("could not find section %s"), name);
15462 bfd_set_error (bfd_error_invalid_operation);
15465 if (!htab->symbian_p)
15466 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
15468 /* In the BPABI, tags in the PT_DYNAMIC section point
15469 at the file offset, not the memory address, for the
15470 convenience of the post linker. */
15471 dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
15472 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15476 if (htab->symbian_p)
15481 s = htab->root.srelplt;
15482 BFD_ASSERT (s != NULL);
15483 dyn.d_un.d_val = s->size;
15484 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15489 if (!htab->symbian_p)
15491 /* My reading of the SVR4 ABI indicates that the
15492 procedure linkage table relocs (DT_JMPREL) should be
15493 included in the overall relocs (DT_REL). This is
15494 what Solaris does. However, UnixWare can not handle
15495 that case. Therefore, we override the DT_RELSZ entry
15496 here to make it not include the JMPREL relocs. Since
15497 the linker script arranges for .rel(a).plt to follow all
15498 other relocation sections, we don't have to worry
15499 about changing the DT_REL entry. */
15500 s = htab->root.srelplt;
15502 dyn.d_un.d_val -= s->size;
15503 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15506 /* Fall through. */
15510 /* In the BPABI, the DT_REL tag must point at the file
15511 offset, not the VMA, of the first relocation
15512 section. So, we use code similar to that in
15513 elflink.c, but do not check for SHF_ALLOC on the
15514 relcoation section, since relocations sections are
15515 never allocated under the BPABI. The comments above
15516 about Unixware notwithstanding, we include all of the
15517 relocations here. */
15518 if (htab->symbian_p)
15521 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
15522 ? SHT_REL : SHT_RELA);
15523 dyn.d_un.d_val = 0;
15524 for (i = 1; i < elf_numsections (output_bfd); i++)
15526 Elf_Internal_Shdr *hdr
15527 = elf_elfsections (output_bfd)[i];
15528 if (hdr->sh_type == type)
15530 if (dyn.d_tag == DT_RELSZ
15531 || dyn.d_tag == DT_RELASZ)
15532 dyn.d_un.d_val += hdr->sh_size;
15533 else if ((ufile_ptr) hdr->sh_offset
15534 <= dyn.d_un.d_val - 1)
15535 dyn.d_un.d_val = hdr->sh_offset;
15538 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15542 case DT_TLSDESC_PLT:
15543 s = htab->root.splt;
15544 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15545 + htab->dt_tlsdesc_plt);
15546 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15549 case DT_TLSDESC_GOT:
15550 s = htab->root.sgot;
15551 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15552 + htab->dt_tlsdesc_got);
15553 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15556 /* Set the bottom bit of DT_INIT/FINI if the
15557 corresponding function is Thumb. */
15559 name = info->init_function;
15562 name = info->fini_function;
15564 /* If it wasn't set by elf_bfd_final_link
15565 then there is nothing to adjust. */
15566 if (dyn.d_un.d_val != 0)
15568 struct elf_link_hash_entry * eh;
15570 eh = elf_link_hash_lookup (elf_hash_table (info), name,
15571 FALSE, FALSE, TRUE);
15573 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
15574 == ST_BRANCH_TO_THUMB)
15576 dyn.d_un.d_val |= 1;
15577 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15584 /* Fill in the first entry in the procedure linkage table. */
15585 if (splt->size > 0 && htab->plt_header_size)
15587 const bfd_vma *plt0_entry;
15588 bfd_vma got_address, plt_address, got_displacement;
15590 /* Calculate the addresses of the GOT and PLT. */
15591 got_address = sgot->output_section->vma + sgot->output_offset;
15592 plt_address = splt->output_section->vma + splt->output_offset;
15594 if (htab->vxworks_p)
15596 /* The VxWorks GOT is relocated by the dynamic linker.
15597 Therefore, we must emit relocations rather than simply
15598 computing the values now. */
15599 Elf_Internal_Rela rel;
15601 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
15602 put_arm_insn (htab, output_bfd, plt0_entry[0],
15603 splt->contents + 0);
15604 put_arm_insn (htab, output_bfd, plt0_entry[1],
15605 splt->contents + 4);
15606 put_arm_insn (htab, output_bfd, plt0_entry[2],
15607 splt->contents + 8);
15608 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
15610 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
15611 rel.r_offset = plt_address + 12;
15612 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15614 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
15615 htab->srelplt2->contents);
15617 else if (htab->nacl_p)
15618 arm_nacl_put_plt0 (htab, output_bfd, splt,
15619 got_address + 8 - (plt_address + 16));
15620 else if (using_thumb_only (htab))
15622 got_displacement = got_address - (plt_address + 12);
15624 plt0_entry = elf32_thumb2_plt0_entry;
15625 put_arm_insn (htab, output_bfd, plt0_entry[0],
15626 splt->contents + 0);
15627 put_arm_insn (htab, output_bfd, plt0_entry[1],
15628 splt->contents + 4);
15629 put_arm_insn (htab, output_bfd, plt0_entry[2],
15630 splt->contents + 8);
15632 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
15636 got_displacement = got_address - (plt_address + 16);
15638 plt0_entry = elf32_arm_plt0_entry;
15639 put_arm_insn (htab, output_bfd, plt0_entry[0],
15640 splt->contents + 0);
15641 put_arm_insn (htab, output_bfd, plt0_entry[1],
15642 splt->contents + 4);
15643 put_arm_insn (htab, output_bfd, plt0_entry[2],
15644 splt->contents + 8);
15645 put_arm_insn (htab, output_bfd, plt0_entry[3],
15646 splt->contents + 12);
15648 #ifdef FOUR_WORD_PLT
15649 /* The displacement value goes in the otherwise-unused
15650 last word of the second entry. */
15651 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
15653 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
15658 /* UnixWare sets the entsize of .plt to 4, although that doesn't
15659 really seem like the right value. */
15660 if (splt->output_section->owner == output_bfd)
15661 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
15663 if (htab->dt_tlsdesc_plt)
15665 bfd_vma got_address
15666 = sgot->output_section->vma + sgot->output_offset;
15667 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
15668 + htab->root.sgot->output_offset);
15669 bfd_vma plt_address
15670 = splt->output_section->vma + splt->output_offset;
15672 arm_put_trampoline (htab, output_bfd,
15673 splt->contents + htab->dt_tlsdesc_plt,
15674 dl_tlsdesc_lazy_trampoline, 6);
15676 bfd_put_32 (output_bfd,
15677 gotplt_address + htab->dt_tlsdesc_got
15678 - (plt_address + htab->dt_tlsdesc_plt)
15679 - dl_tlsdesc_lazy_trampoline[6],
15680 splt->contents + htab->dt_tlsdesc_plt + 24);
15681 bfd_put_32 (output_bfd,
15682 got_address - (plt_address + htab->dt_tlsdesc_plt)
15683 - dl_tlsdesc_lazy_trampoline[7],
15684 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
15687 if (htab->tls_trampoline)
15689 arm_put_trampoline (htab, output_bfd,
15690 splt->contents + htab->tls_trampoline,
15691 tls_trampoline, 3);
15692 #ifdef FOUR_WORD_PLT
15693 bfd_put_32 (output_bfd, 0x00000000,
15694 splt->contents + htab->tls_trampoline + 12);
15698 if (htab->vxworks_p
15699 && !bfd_link_pic (info)
15700 && htab->root.splt->size > 0)
15702 /* Correct the .rel(a).plt.unloaded relocations. They will have
15703 incorrect symbol indexes. */
15707 num_plts = ((htab->root.splt->size - htab->plt_header_size)
15708 / htab->plt_entry_size);
15709 p = htab->srelplt2->contents + RELOC_SIZE (htab);
15711 for (; num_plts; num_plts--)
15713 Elf_Internal_Rela rel;
15715 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15716 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15717 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15718 p += RELOC_SIZE (htab);
15720 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15721 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
15722 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15723 p += RELOC_SIZE (htab);
15728 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
15729 /* NaCl uses a special first entry in .iplt too. */
15730 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
15732 /* Fill in the first three entries in the global offset table. */
15735 if (sgot->size > 0)
15738 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
15740 bfd_put_32 (output_bfd,
15741 sdyn->output_section->vma + sdyn->output_offset,
15743 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
15744 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
15747 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
15754 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
15756 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
15757 struct elf32_arm_link_hash_table *globals;
15758 struct elf_segment_map *m;
15760 i_ehdrp = elf_elfheader (abfd);
15762 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
15763 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
15765 _bfd_elf_post_process_headers (abfd, link_info);
15766 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
15770 globals = elf32_arm_hash_table (link_info);
15771 if (globals != NULL && globals->byteswap_code)
15772 i_ehdrp->e_flags |= EF_ARM_BE8;
15775 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
15776 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
15778 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
15779 if (abi == AEABI_VFP_args_vfp)
15780 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
15782 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
15785 /* Scan segment to set p_flags attribute if it contains only sections with
15786 SHF_ARM_NOREAD flag. */
15787 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
15793 for (j = 0; j < m->count; j++)
15795 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_NOREAD))
15801 m->p_flags_valid = 1;
15806 static enum elf_reloc_type_class
15807 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
15808 const asection *rel_sec ATTRIBUTE_UNUSED,
15809 const Elf_Internal_Rela *rela)
15811 switch ((int) ELF32_R_TYPE (rela->r_info))
15813 case R_ARM_RELATIVE:
15814 return reloc_class_relative;
15815 case R_ARM_JUMP_SLOT:
15816 return reloc_class_plt;
15818 return reloc_class_copy;
15819 case R_ARM_IRELATIVE:
15820 return reloc_class_ifunc;
15822 return reloc_class_normal;
15827 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
15829 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
15832 /* Return TRUE if this is an unwinding table entry. */
15835 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
15837 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
15838 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
15842 /* Set the type and flags for an ARM section. We do this by
15843 the section name, which is a hack, but ought to work. */
15846 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
15850 name = bfd_get_section_name (abfd, sec);
15852 if (is_arm_elf_unwind_section_name (abfd, name))
15854 hdr->sh_type = SHT_ARM_EXIDX;
15855 hdr->sh_flags |= SHF_LINK_ORDER;
15858 if (sec->flags & SEC_ELF_NOREAD)
15859 hdr->sh_flags |= SHF_ARM_NOREAD;
15864 /* Handle an ARM specific section when reading an object file. This is
15865 called when bfd_section_from_shdr finds a section with an unknown
15869 elf32_arm_section_from_shdr (bfd *abfd,
15870 Elf_Internal_Shdr * hdr,
15874 /* There ought to be a place to keep ELF backend specific flags, but
15875 at the moment there isn't one. We just keep track of the
15876 sections by their name, instead. Fortunately, the ABI gives
15877 names for all the ARM specific sections, so we will probably get
15879 switch (hdr->sh_type)
15881 case SHT_ARM_EXIDX:
15882 case SHT_ARM_PREEMPTMAP:
15883 case SHT_ARM_ATTRIBUTES:
15890 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
15896 static _arm_elf_section_data *
15897 get_arm_elf_section_data (asection * sec)
15899 if (sec && sec->owner && is_arm_elf (sec->owner))
15900 return elf32_arm_section_data (sec);
15908 struct bfd_link_info *info;
15911 int (*func) (void *, const char *, Elf_Internal_Sym *,
15912 asection *, struct elf_link_hash_entry *);
15913 } output_arch_syminfo;
15915 enum map_symbol_type
15923 /* Output a single mapping symbol. */
15926 elf32_arm_output_map_sym (output_arch_syminfo *osi,
15927 enum map_symbol_type type,
15930 static const char *names[3] = {"$a", "$t", "$d"};
15931 Elf_Internal_Sym sym;
15933 sym.st_value = osi->sec->output_section->vma
15934 + osi->sec->output_offset
15938 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
15939 sym.st_shndx = osi->sec_shndx;
15940 sym.st_target_internal = 0;
15941 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
15942 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
15945 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
15946 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
15949 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
15950 bfd_boolean is_iplt_entry_p,
15951 union gotplt_union *root_plt,
15952 struct arm_plt_info *arm_plt)
15954 struct elf32_arm_link_hash_table *htab;
15955 bfd_vma addr, plt_header_size;
15957 if (root_plt->offset == (bfd_vma) -1)
15960 htab = elf32_arm_hash_table (osi->info);
15964 if (is_iplt_entry_p)
15966 osi->sec = htab->root.iplt;
15967 plt_header_size = 0;
15971 osi->sec = htab->root.splt;
15972 plt_header_size = htab->plt_header_size;
15974 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
15975 (osi->info->output_bfd, osi->sec->output_section));
15977 addr = root_plt->offset & -2;
15978 if (htab->symbian_p)
15980 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15982 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
15985 else if (htab->vxworks_p)
15987 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15989 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
15991 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
15993 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
15996 else if (htab->nacl_p)
15998 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16001 else if (using_thumb_only (htab))
16003 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
16008 bfd_boolean thumb_stub_p;
16010 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
16013 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
16016 #ifdef FOUR_WORD_PLT
16017 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16019 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
16022 /* A three-word PLT with no Thumb thunk contains only Arm code,
16023 so only need to output a mapping symbol for the first PLT entry and
16024 entries with thumb thunks. */
16025 if (thumb_stub_p || addr == plt_header_size)
16027 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16036 /* Output mapping symbols for PLT entries associated with H. */
16039 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
16041 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
16042 struct elf32_arm_link_hash_entry *eh;
16044 if (h->root.type == bfd_link_hash_indirect)
16047 if (h->root.type == bfd_link_hash_warning)
16048 /* When warning symbols are created, they **replace** the "real"
16049 entry in the hash table, thus we never get to see the real
16050 symbol in a hash traversal. So look at it now. */
16051 h = (struct elf_link_hash_entry *) h->root.u.i.link;
16053 eh = (struct elf32_arm_link_hash_entry *) h;
16054 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
16055 &h->plt, &eh->plt);
16058 /* Bind a veneered symbol to its veneer identified by its hash entry
16059 STUB_ENTRY. The veneered location thus loose its symbol. */
16062 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
16064 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
16067 hash->root.root.u.def.section = stub_entry->stub_sec;
16068 hash->root.root.u.def.value = stub_entry->stub_offset;
16069 hash->root.size = stub_entry->stub_size;
16072 /* Output a single local symbol for a generated stub. */
16075 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
16076 bfd_vma offset, bfd_vma size)
16078 Elf_Internal_Sym sym;
16080 sym.st_value = osi->sec->output_section->vma
16081 + osi->sec->output_offset
16083 sym.st_size = size;
16085 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16086 sym.st_shndx = osi->sec_shndx;
16087 sym.st_target_internal = 0;
16088 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
16092 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
16095 struct elf32_arm_stub_hash_entry *stub_entry;
16096 asection *stub_sec;
16099 output_arch_syminfo *osi;
16100 const insn_sequence *template_sequence;
16101 enum stub_insn_type prev_type;
16104 enum map_symbol_type sym_type;
16106 /* Massage our args to the form they really have. */
16107 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16108 osi = (output_arch_syminfo *) in_arg;
16110 stub_sec = stub_entry->stub_sec;
16112 /* Ensure this stub is attached to the current section being
16114 if (stub_sec != osi->sec)
16117 addr = (bfd_vma) stub_entry->stub_offset;
16118 template_sequence = stub_entry->stub_template;
16120 if (arm_stub_sym_claimed (stub_entry->stub_type))
16121 arm_stub_claim_sym (stub_entry);
16124 stub_name = stub_entry->output_name;
16125 switch (template_sequence[0].type)
16128 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
16129 stub_entry->stub_size))
16134 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
16135 stub_entry->stub_size))
16144 prev_type = DATA_TYPE;
16146 for (i = 0; i < stub_entry->stub_template_size; i++)
16148 switch (template_sequence[i].type)
16151 sym_type = ARM_MAP_ARM;
16156 sym_type = ARM_MAP_THUMB;
16160 sym_type = ARM_MAP_DATA;
16168 if (template_sequence[i].type != prev_type)
16170 prev_type = template_sequence[i].type;
16171 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
16175 switch (template_sequence[i].type)
16199 /* Output mapping symbols for linker generated sections,
16200 and for those data-only sections that do not have a
16204 elf32_arm_output_arch_local_syms (bfd *output_bfd,
16205 struct bfd_link_info *info,
16207 int (*func) (void *, const char *,
16208 Elf_Internal_Sym *,
16210 struct elf_link_hash_entry *))
16212 output_arch_syminfo osi;
16213 struct elf32_arm_link_hash_table *htab;
16215 bfd_size_type size;
16218 htab = elf32_arm_hash_table (info);
16222 check_use_blx (htab);
16224 osi.flaginfo = flaginfo;
16228 /* Add a $d mapping symbol to data-only sections that
16229 don't have any mapping symbol. This may result in (harmless) redundant
16230 mapping symbols. */
16231 for (input_bfd = info->input_bfds;
16233 input_bfd = input_bfd->link.next)
16235 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
16236 for (osi.sec = input_bfd->sections;
16238 osi.sec = osi.sec->next)
16240 if (osi.sec->output_section != NULL
16241 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
16243 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
16244 == SEC_HAS_CONTENTS
16245 && get_arm_elf_section_data (osi.sec) != NULL
16246 && get_arm_elf_section_data (osi.sec)->mapcount == 0
16247 && osi.sec->size > 0
16248 && (osi.sec->flags & SEC_EXCLUDE) == 0)
16250 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16251 (output_bfd, osi.sec->output_section);
16252 if (osi.sec_shndx != (int)SHN_BAD)
16253 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
16258 /* ARM->Thumb glue. */
16259 if (htab->arm_glue_size > 0)
16261 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16262 ARM2THUMB_GLUE_SECTION_NAME);
16264 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16265 (output_bfd, osi.sec->output_section);
16266 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
16267 || htab->pic_veneer)
16268 size = ARM2THUMB_PIC_GLUE_SIZE;
16269 else if (htab->use_blx)
16270 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
16272 size = ARM2THUMB_STATIC_GLUE_SIZE;
16274 for (offset = 0; offset < htab->arm_glue_size; offset += size)
16276 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
16277 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
16281 /* Thumb->ARM glue. */
16282 if (htab->thumb_glue_size > 0)
16284 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16285 THUMB2ARM_GLUE_SECTION_NAME);
16287 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16288 (output_bfd, osi.sec->output_section);
16289 size = THUMB2ARM_GLUE_SIZE;
16291 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
16293 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
16294 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
16298 /* ARMv4 BX veneers. */
16299 if (htab->bx_glue_size > 0)
16301 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16302 ARM_BX_GLUE_SECTION_NAME);
16304 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16305 (output_bfd, osi.sec->output_section);
16307 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
16310 /* Long calls stubs. */
16311 if (htab->stub_bfd && htab->stub_bfd->sections)
16313 asection* stub_sec;
16315 for (stub_sec = htab->stub_bfd->sections;
16317 stub_sec = stub_sec->next)
16319 /* Ignore non-stub sections. */
16320 if (!strstr (stub_sec->name, STUB_SUFFIX))
16323 osi.sec = stub_sec;
16325 osi.sec_shndx = _bfd_elf_section_from_bfd_section
16326 (output_bfd, osi.sec->output_section);
16328 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
16332 /* Finally, output mapping symbols for the PLT. */
16333 if (htab->root.splt && htab->root.splt->size > 0)
16335 osi.sec = htab->root.splt;
16336 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16337 (output_bfd, osi.sec->output_section));
16339 /* Output mapping symbols for the plt header. SymbianOS does not have a
16341 if (htab->vxworks_p)
16343 /* VxWorks shared libraries have no PLT header. */
16344 if (!bfd_link_pic (info))
16346 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16348 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16352 else if (htab->nacl_p)
16354 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16357 else if (using_thumb_only (htab))
16359 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
16361 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16363 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
16366 else if (!htab->symbian_p)
16368 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16370 #ifndef FOUR_WORD_PLT
16371 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
16376 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
16378 /* NaCl uses a special first entry in .iplt too. */
16379 osi.sec = htab->root.iplt;
16380 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16381 (output_bfd, osi.sec->output_section));
16382 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16385 if ((htab->root.splt && htab->root.splt->size > 0)
16386 || (htab->root.iplt && htab->root.iplt->size > 0))
16388 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
16389 for (input_bfd = info->input_bfds;
16391 input_bfd = input_bfd->link.next)
16393 struct arm_local_iplt_info **local_iplt;
16394 unsigned int i, num_syms;
16396 local_iplt = elf32_arm_local_iplt (input_bfd);
16397 if (local_iplt != NULL)
16399 num_syms = elf_symtab_hdr (input_bfd).sh_info;
16400 for (i = 0; i < num_syms; i++)
16401 if (local_iplt[i] != NULL
16402 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
16403 &local_iplt[i]->root,
16404 &local_iplt[i]->arm))
16409 if (htab->dt_tlsdesc_plt != 0)
16411 /* Mapping symbols for the lazy tls trampoline. */
16412 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
16415 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16416 htab->dt_tlsdesc_plt + 24))
16419 if (htab->tls_trampoline != 0)
16421 /* Mapping symbols for the tls trampoline. */
16422 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
16424 #ifdef FOUR_WORD_PLT
16425 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16426 htab->tls_trampoline + 12))
16434 /* Allocate target specific section data. */
16437 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
16439 if (!sec->used_by_bfd)
16441 _arm_elf_section_data *sdata;
16442 bfd_size_type amt = sizeof (*sdata);
16444 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
16447 sec->used_by_bfd = sdata;
16450 return _bfd_elf_new_section_hook (abfd, sec);
16454 /* Used to order a list of mapping symbols by address. */
16457 elf32_arm_compare_mapping (const void * a, const void * b)
16459 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
16460 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
16462 if (amap->vma > bmap->vma)
16464 else if (amap->vma < bmap->vma)
16466 else if (amap->type > bmap->type)
16467 /* Ensure results do not depend on the host qsort for objects with
16468 multiple mapping symbols at the same address by sorting on type
16471 else if (amap->type < bmap->type)
16477 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
16479 static unsigned long
16480 offset_prel31 (unsigned long addr, bfd_vma offset)
16482 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
16485 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
16489 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
16491 unsigned long first_word = bfd_get_32 (output_bfd, from);
16492 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
16494 /* High bit of first word is supposed to be zero. */
16495 if ((first_word & 0x80000000ul) == 0)
16496 first_word = offset_prel31 (first_word, offset);
16498 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
16499 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
16500 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
16501 second_word = offset_prel31 (second_word, offset);
16503 bfd_put_32 (output_bfd, first_word, to);
16504 bfd_put_32 (output_bfd, second_word, to + 4);
16507 /* Data for make_branch_to_a8_stub(). */
16509 struct a8_branch_to_stub_data
16511 asection *writing_section;
16512 bfd_byte *contents;
16516 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
16517 places for a particular section. */
16520 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
16523 struct elf32_arm_stub_hash_entry *stub_entry;
16524 struct a8_branch_to_stub_data *data;
16525 bfd_byte *contents;
16526 unsigned long branch_insn;
16527 bfd_vma veneered_insn_loc, veneer_entry_loc;
16528 bfd_signed_vma branch_offset;
16532 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16533 data = (struct a8_branch_to_stub_data *) in_arg;
16535 if (stub_entry->target_section != data->writing_section
16536 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
16539 contents = data->contents;
16541 /* We use target_section as Cortex-A8 erratum workaround stubs are only
16542 generated when both source and target are in the same section. */
16543 veneered_insn_loc = stub_entry->target_section->output_section->vma
16544 + stub_entry->target_section->output_offset
16545 + stub_entry->source_value;
16547 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
16548 + stub_entry->stub_sec->output_offset
16549 + stub_entry->stub_offset;
16551 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
16552 veneered_insn_loc &= ~3u;
16554 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
16556 abfd = stub_entry->target_section->owner;
16557 loc = stub_entry->source_value;
16559 /* We attempt to avoid this condition by setting stubs_always_after_branch
16560 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
16561 This check is just to be on the safe side... */
16562 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
16564 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
16565 "allocated in unsafe location"), abfd);
16569 switch (stub_entry->stub_type)
16571 case arm_stub_a8_veneer_b:
16572 case arm_stub_a8_veneer_b_cond:
16573 branch_insn = 0xf0009000;
16576 case arm_stub_a8_veneer_blx:
16577 branch_insn = 0xf000e800;
16580 case arm_stub_a8_veneer_bl:
16582 unsigned int i1, j1, i2, j2, s;
16584 branch_insn = 0xf000d000;
16587 if (branch_offset < -16777216 || branch_offset > 16777214)
16589 /* There's not much we can do apart from complain if this
16591 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
16592 "of range (input file too large)"), abfd);
16596 /* i1 = not(j1 eor s), so:
16598 j1 = (not i1) eor s. */
16600 branch_insn |= (branch_offset >> 1) & 0x7ff;
16601 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
16602 i2 = (branch_offset >> 22) & 1;
16603 i1 = (branch_offset >> 23) & 1;
16604 s = (branch_offset >> 24) & 1;
16607 branch_insn |= j2 << 11;
16608 branch_insn |= j1 << 13;
16609 branch_insn |= s << 26;
16618 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
16619 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
16624 /* Beginning of stm32l4xx work-around. */
16626 /* Functions encoding instructions necessary for the emission of the
16627 fix-stm32l4xx-629360.
16628 Encoding is extracted from the
16629 ARM (C) Architecture Reference Manual
16630 ARMv7-A and ARMv7-R edition
16631 ARM DDI 0406C.b (ID072512). */
16633 static inline bfd_vma
16634 create_instruction_branch_absolute (int branch_offset)
16636 /* A8.8.18 B (A8-334)
16637 B target_address (Encoding T4). */
16638 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
16639 /* jump offset is: S:I1:I2:imm10:imm11:0. */
16640 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
16642 int s = ((branch_offset & 0x1000000) >> 24);
16643 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
16644 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
16646 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
16647 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
16649 bfd_vma patched_inst = 0xf0009000
16651 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
16652 | j1 << 13 /* J1. */
16653 | j2 << 11 /* J2. */
16654 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
16656 return patched_inst;
16659 static inline bfd_vma
16660 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
16662 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
16663 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
16664 bfd_vma patched_inst = 0xe8900000
16665 | (/*W=*/wback << 21)
16667 | (reg_mask & 0x0000ffff);
16669 return patched_inst;
16672 static inline bfd_vma
16673 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
16675 /* A8.8.60 LDMDB/LDMEA (A8-402)
16676 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
16677 bfd_vma patched_inst = 0xe9100000
16678 | (/*W=*/wback << 21)
16680 | (reg_mask & 0x0000ffff);
16682 return patched_inst;
16685 static inline bfd_vma
16686 create_instruction_mov (int target_reg, int source_reg)
16688 /* A8.8.103 MOV (register) (A8-486)
16689 MOV Rd, Rm (Encoding T1). */
16690 bfd_vma patched_inst = 0x4600
16691 | (target_reg & 0x7)
16692 | ((target_reg & 0x8) >> 3) << 7
16693 | (source_reg << 3);
16695 return patched_inst;
16698 static inline bfd_vma
16699 create_instruction_sub (int target_reg, int source_reg, int value)
16701 /* A8.8.221 SUB (immediate) (A8-708)
16702 SUB Rd, Rn, #value (Encoding T3). */
16703 bfd_vma patched_inst = 0xf1a00000
16704 | (target_reg << 8)
16705 | (source_reg << 16)
16707 | ((value & 0x800) >> 11) << 26
16708 | ((value & 0x700) >> 8) << 12
16711 return patched_inst;
16714 static inline bfd_vma
16715 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
16718 /* A8.8.332 VLDM (A8-922)
16719 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
16720 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
16721 | (/*W=*/wback << 21)
16723 | (num_words & 0x000000ff)
16724 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
16725 | (first_reg & 0x00000001) << 22;
16727 return patched_inst;
16730 static inline bfd_vma
16731 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
16734 /* A8.8.332 VLDM (A8-922)
16735 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
16736 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
16738 | (num_words & 0x000000ff)
16739 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
16740 | (first_reg & 0x00000001) << 22;
16742 return patched_inst;
16745 static inline bfd_vma
16746 create_instruction_udf_w (int value)
16748 /* A8.8.247 UDF (A8-758)
16749 Undefined (Encoding T2). */
16750 bfd_vma patched_inst = 0xf7f0a000
16751 | (value & 0x00000fff)
16752 | (value & 0x000f0000) << 16;
16754 return patched_inst;
16757 static inline bfd_vma
16758 create_instruction_udf (int value)
16760 /* A8.8.247 UDF (A8-758)
16761 Undefined (Encoding T1). */
16762 bfd_vma patched_inst = 0xde00
16765 return patched_inst;
16768 /* Functions writing an instruction in memory, returning the next
16769 memory position to write to. */
16771 static inline bfd_byte *
16772 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
16773 bfd * output_bfd, bfd_byte *pt, insn32 insn)
16775 put_thumb2_insn (htab, output_bfd, insn, pt);
16779 static inline bfd_byte *
16780 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
16781 bfd * output_bfd, bfd_byte *pt, insn32 insn)
16783 put_thumb_insn (htab, output_bfd, insn, pt);
16787 /* Function filling up a region in memory with T1 and T2 UDFs taking
16788 care of alignment. */
16791 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
16793 const bfd_byte * const base_stub_contents,
16794 bfd_byte * const from_stub_contents,
16795 const bfd_byte * const end_stub_contents)
16797 bfd_byte *current_stub_contents = from_stub_contents;
16799 /* Fill the remaining of the stub with deterministic contents : UDF
16801 Check if realignment is needed on modulo 4 frontier using T1, to
16803 if ((current_stub_contents < end_stub_contents)
16804 && !((current_stub_contents - base_stub_contents) % 2)
16805 && ((current_stub_contents - base_stub_contents) % 4))
16806 current_stub_contents =
16807 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16808 create_instruction_udf (0));
16810 for (; current_stub_contents < end_stub_contents;)
16811 current_stub_contents =
16812 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16813 create_instruction_udf_w (0));
16815 return current_stub_contents;
16818 /* Functions writing the stream of instructions equivalent to the
16819 derived sequence for ldmia, ldmdb, vldm respectively. */
16822 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
16824 const insn32 initial_insn,
16825 const bfd_byte *const initial_insn_addr,
16826 bfd_byte *const base_stub_contents)
16828 int wback = (initial_insn & 0x00200000) >> 21;
16829 int ri, rn = (initial_insn & 0x000F0000) >> 16;
16830 int insn_all_registers = initial_insn & 0x0000ffff;
16831 int insn_low_registers, insn_high_registers;
16832 int usable_register_mask;
16833 int nb_registers = popcount (insn_all_registers);
16834 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16835 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16836 bfd_byte *current_stub_contents = base_stub_contents;
16838 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
16840 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16841 smaller than 8 registers load sequences that do not cause the
16843 if (nb_registers <= 8)
16845 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16846 current_stub_contents =
16847 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16850 /* B initial_insn_addr+4. */
16852 current_stub_contents =
16853 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16854 create_instruction_branch_absolute
16855 (initial_insn_addr - current_stub_contents));
16858 /* Fill the remaining of the stub with deterministic contents. */
16859 current_stub_contents =
16860 stm32l4xx_fill_stub_udf (htab, output_bfd,
16861 base_stub_contents, current_stub_contents,
16862 base_stub_contents +
16863 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16868 /* - reg_list[13] == 0. */
16869 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
16871 /* - reg_list[14] & reg_list[15] != 1. */
16872 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
16874 /* - if (wback==1) reg_list[rn] == 0. */
16875 BFD_ASSERT (!wback || !restore_rn);
16877 /* - nb_registers > 8. */
16878 BFD_ASSERT (popcount (insn_all_registers) > 8);
16880 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
16882 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
16883 - One with the 7 lowest registers (register mask 0x007F)
16884 This LDM will finally contain between 2 and 7 registers
16885 - One with the 7 highest registers (register mask 0xDF80)
16886 This ldm will finally contain between 2 and 7 registers. */
16887 insn_low_registers = insn_all_registers & 0x007F;
16888 insn_high_registers = insn_all_registers & 0xDF80;
16890 /* A spare register may be needed during this veneer to temporarily
16891 handle the base register. This register will be restored with the
16892 last LDM operation.
16893 The usable register may be any general purpose register (that
16894 excludes PC, SP, LR : register mask is 0x1FFF). */
16895 usable_register_mask = 0x1FFF;
16897 /* Generate the stub function. */
16900 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
16901 current_stub_contents =
16902 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16903 create_instruction_ldmia
16904 (rn, /*wback=*/1, insn_low_registers));
16906 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
16907 current_stub_contents =
16908 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16909 create_instruction_ldmia
16910 (rn, /*wback=*/1, insn_high_registers));
16913 /* B initial_insn_addr+4. */
16914 current_stub_contents =
16915 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16916 create_instruction_branch_absolute
16917 (initial_insn_addr - current_stub_contents));
16920 else /* if (!wback). */
16924 /* If Rn is not part of the high-register-list, move it there. */
16925 if (!(insn_high_registers & (1 << rn)))
16927 /* Choose a Ri in the high-register-list that will be restored. */
16928 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
16931 current_stub_contents =
16932 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16933 create_instruction_mov (ri, rn));
16936 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
16937 current_stub_contents =
16938 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16939 create_instruction_ldmia
16940 (ri, /*wback=*/1, insn_low_registers));
16942 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
16943 current_stub_contents =
16944 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16945 create_instruction_ldmia
16946 (ri, /*wback=*/0, insn_high_registers));
16950 /* B initial_insn_addr+4. */
16951 current_stub_contents =
16952 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16953 create_instruction_branch_absolute
16954 (initial_insn_addr - current_stub_contents));
16958 /* Fill the remaining of the stub with deterministic contents. */
16959 current_stub_contents =
16960 stm32l4xx_fill_stub_udf (htab, output_bfd,
16961 base_stub_contents, current_stub_contents,
16962 base_stub_contents +
16963 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16967 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
16969 const insn32 initial_insn,
16970 const bfd_byte *const initial_insn_addr,
16971 bfd_byte *const base_stub_contents)
16973 int wback = (initial_insn & 0x00200000) >> 21;
16974 int ri, rn = (initial_insn & 0x000f0000) >> 16;
16975 int insn_all_registers = initial_insn & 0x0000ffff;
16976 int insn_low_registers, insn_high_registers;
16977 int usable_register_mask;
16978 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16979 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16980 int nb_registers = popcount (insn_all_registers);
16981 bfd_byte *current_stub_contents = base_stub_contents;
16983 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
16985 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16986 smaller than 8 registers load sequences that do not cause the
16988 if (nb_registers <= 8)
16990 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
16991 current_stub_contents =
16992 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16995 /* B initial_insn_addr+4. */
16996 current_stub_contents =
16997 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16998 create_instruction_branch_absolute
16999 (initial_insn_addr - current_stub_contents));
17001 /* Fill the remaining of the stub with deterministic contents. */
17002 current_stub_contents =
17003 stm32l4xx_fill_stub_udf (htab, output_bfd,
17004 base_stub_contents, current_stub_contents,
17005 base_stub_contents +
17006 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17011 /* - reg_list[13] == 0. */
17012 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
17014 /* - reg_list[14] & reg_list[15] != 1. */
17015 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
17017 /* - if (wback==1) reg_list[rn] == 0. */
17018 BFD_ASSERT (!wback || !restore_rn);
17020 /* - nb_registers > 8. */
17021 BFD_ASSERT (popcount (insn_all_registers) > 8);
17023 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
17025 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
17026 - One with the 7 lowest registers (register mask 0x007F)
17027 This LDM will finally contain between 2 and 7 registers
17028 - One with the 7 highest registers (register mask 0xDF80)
17029 This ldm will finally contain between 2 and 7 registers. */
17030 insn_low_registers = insn_all_registers & 0x007F;
17031 insn_high_registers = insn_all_registers & 0xDF80;
17033 /* A spare register may be needed during this veneer to temporarily
17034 handle the base register. This register will be restored with
17035 the last LDM operation.
17036 The usable register may be any general purpose register (that excludes
17037 PC, SP, LR : register mask is 0x1FFF). */
17038 usable_register_mask = 0x1FFF;
17040 /* Generate the stub function. */
17041 if (!wback && !restore_pc && !restore_rn)
17043 /* Choose a Ri in the low-register-list that will be restored. */
17044 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
17047 current_stub_contents =
17048 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17049 create_instruction_mov (ri, rn));
17051 /* LDMDB Ri!, {R-high-register-list}. */
17052 current_stub_contents =
17053 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17054 create_instruction_ldmdb
17055 (ri, /*wback=*/1, insn_high_registers));
17057 /* LDMDB Ri, {R-low-register-list}. */
17058 current_stub_contents =
17059 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17060 create_instruction_ldmdb
17061 (ri, /*wback=*/0, insn_low_registers));
17063 /* B initial_insn_addr+4. */
17064 current_stub_contents =
17065 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17066 create_instruction_branch_absolute
17067 (initial_insn_addr - current_stub_contents));
17069 else if (wback && !restore_pc && !restore_rn)
17071 /* LDMDB Rn!, {R-high-register-list}. */
17072 current_stub_contents =
17073 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17074 create_instruction_ldmdb
17075 (rn, /*wback=*/1, insn_high_registers));
17077 /* LDMDB Rn!, {R-low-register-list}. */
17078 current_stub_contents =
17079 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17080 create_instruction_ldmdb
17081 (rn, /*wback=*/1, insn_low_registers));
17083 /* B initial_insn_addr+4. */
17084 current_stub_contents =
17085 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17086 create_instruction_branch_absolute
17087 (initial_insn_addr - current_stub_contents));
17089 else if (!wback && restore_pc && !restore_rn)
17091 /* Choose a Ri in the high-register-list that will be restored. */
17092 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17094 /* SUB Ri, Rn, #(4*nb_registers). */
17095 current_stub_contents =
17096 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17097 create_instruction_sub (ri, rn, (4 * nb_registers)));
17099 /* LDMIA Ri!, {R-low-register-list}. */
17100 current_stub_contents =
17101 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17102 create_instruction_ldmia
17103 (ri, /*wback=*/1, insn_low_registers));
17105 /* LDMIA Ri, {R-high-register-list}. */
17106 current_stub_contents =
17107 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17108 create_instruction_ldmia
17109 (ri, /*wback=*/0, insn_high_registers));
17111 else if (wback && restore_pc && !restore_rn)
17113 /* Choose a Ri in the high-register-list that will be restored. */
17114 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17116 /* SUB Rn, Rn, #(4*nb_registers) */
17117 current_stub_contents =
17118 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17119 create_instruction_sub (rn, rn, (4 * nb_registers)));
17122 current_stub_contents =
17123 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17124 create_instruction_mov (ri, rn));
17126 /* LDMIA Ri!, {R-low-register-list}. */
17127 current_stub_contents =
17128 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17129 create_instruction_ldmia
17130 (ri, /*wback=*/1, insn_low_registers));
17132 /* LDMIA Ri, {R-high-register-list}. */
17133 current_stub_contents =
17134 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17135 create_instruction_ldmia
17136 (ri, /*wback=*/0, insn_high_registers));
17138 else if (!wback && !restore_pc && restore_rn)
17141 if (!(insn_low_registers & (1 << rn)))
17143 /* Choose a Ri in the low-register-list that will be restored. */
17144 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
17147 current_stub_contents =
17148 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17149 create_instruction_mov (ri, rn));
17152 /* LDMDB Ri!, {R-high-register-list}. */
17153 current_stub_contents =
17154 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17155 create_instruction_ldmdb
17156 (ri, /*wback=*/1, insn_high_registers));
17158 /* LDMDB Ri, {R-low-register-list}. */
17159 current_stub_contents =
17160 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17161 create_instruction_ldmdb
17162 (ri, /*wback=*/0, insn_low_registers));
17164 /* B initial_insn_addr+4. */
17165 current_stub_contents =
17166 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17167 create_instruction_branch_absolute
17168 (initial_insn_addr - current_stub_contents));
17170 else if (!wback && restore_pc && restore_rn)
17173 if (!(insn_high_registers & (1 << rn)))
17175 /* Choose a Ri in the high-register-list that will be restored. */
17176 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17179 /* SUB Ri, Rn, #(4*nb_registers). */
17180 current_stub_contents =
17181 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17182 create_instruction_sub (ri, rn, (4 * nb_registers)));
17184 /* LDMIA Ri!, {R-low-register-list}. */
17185 current_stub_contents =
17186 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17187 create_instruction_ldmia
17188 (ri, /*wback=*/1, insn_low_registers));
17190 /* LDMIA Ri, {R-high-register-list}. */
17191 current_stub_contents =
17192 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17193 create_instruction_ldmia
17194 (ri, /*wback=*/0, insn_high_registers));
17196 else if (wback && restore_rn)
17198 /* The assembler should not have accepted to encode this. */
17199 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
17200 "undefined behavior.\n");
17203 /* Fill the remaining of the stub with deterministic contents. */
17204 current_stub_contents =
17205 stm32l4xx_fill_stub_udf (htab, output_bfd,
17206 base_stub_contents, current_stub_contents,
17207 base_stub_contents +
17208 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17213 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
17215 const insn32 initial_insn,
17216 const bfd_byte *const initial_insn_addr,
17217 bfd_byte *const base_stub_contents)
17219 int num_words = ((unsigned int) initial_insn << 24) >> 24;
17220 bfd_byte *current_stub_contents = base_stub_contents;
17222 BFD_ASSERT (is_thumb2_vldm (initial_insn));
17224 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17225 smaller than 8 words load sequences that do not cause the
17227 if (num_words <= 8)
17229 /* Untouched instruction. */
17230 current_stub_contents =
17231 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17234 /* B initial_insn_addr+4. */
17235 current_stub_contents =
17236 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17237 create_instruction_branch_absolute
17238 (initial_insn_addr - current_stub_contents));
17242 bfd_boolean is_dp = /* DP encoding. */
17243 (initial_insn & 0xfe100f00) == 0xec100b00;
17244 bfd_boolean is_ia_nobang = /* (IA without !). */
17245 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
17246 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
17247 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
17248 bfd_boolean is_db_bang = /* (DB with !). */
17249 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
17250 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
17251 /* d = UInt (Vd:D);. */
17252 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
17253 | (((unsigned int)initial_insn << 9) >> 31);
17255 /* Compute the number of 8-words chunks needed to split. */
17256 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
17259 /* The test coverage has been done assuming the following
17260 hypothesis that exactly one of the previous is_ predicates is
17262 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
17263 && !(is_ia_nobang & is_ia_bang & is_db_bang));
17265 /* We treat the cutting of the words in one pass for all
17266 cases, then we emit the adjustments:
17269 -> vldm rx!, {8_words_or_less} for each needed 8_word
17270 -> sub rx, rx, #size (list)
17273 -> vldm rx!, {8_words_or_less} for each needed 8_word
17274 This also handles vpop instruction (when rx is sp)
17277 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
17278 for (chunk = 0; chunk < chunks; ++chunk)
17280 bfd_vma new_insn = 0;
17282 if (is_ia_nobang || is_ia_bang)
17284 new_insn = create_instruction_vldmia
17288 chunks - (chunk + 1) ?
17289 8 : num_words - chunk * 8,
17290 first_reg + chunk * 8);
17292 else if (is_db_bang)
17294 new_insn = create_instruction_vldmdb
17297 chunks - (chunk + 1) ?
17298 8 : num_words - chunk * 8,
17299 first_reg + chunk * 8);
17303 current_stub_contents =
17304 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17308 /* Only this case requires the base register compensation
17312 current_stub_contents =
17313 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17314 create_instruction_sub
17315 (base_reg, base_reg, 4*num_words));
17318 /* B initial_insn_addr+4. */
17319 current_stub_contents =
17320 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17321 create_instruction_branch_absolute
17322 (initial_insn_addr - current_stub_contents));
17325 /* Fill the remaining of the stub with deterministic contents. */
17326 current_stub_contents =
17327 stm32l4xx_fill_stub_udf (htab, output_bfd,
17328 base_stub_contents, current_stub_contents,
17329 base_stub_contents +
17330 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
17334 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
17336 const insn32 wrong_insn,
17337 const bfd_byte *const wrong_insn_addr,
17338 bfd_byte *const stub_contents)
17340 if (is_thumb2_ldmia (wrong_insn))
17341 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
17342 wrong_insn, wrong_insn_addr,
17344 else if (is_thumb2_ldmdb (wrong_insn))
17345 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
17346 wrong_insn, wrong_insn_addr,
17348 else if (is_thumb2_vldm (wrong_insn))
17349 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
17350 wrong_insn, wrong_insn_addr,
17354 /* End of stm32l4xx work-around. */
17358 elf32_arm_add_relocation (bfd *output_bfd, struct bfd_link_info *info,
17359 asection *output_sec, Elf_Internal_Rela *rel)
17361 BFD_ASSERT (output_sec && rel);
17362 struct bfd_elf_section_reloc_data *output_reldata;
17363 struct elf32_arm_link_hash_table *htab;
17364 struct bfd_elf_section_data *oesd = elf_section_data (output_sec);
17365 Elf_Internal_Shdr *rel_hdr;
17370 rel_hdr = oesd->rel.hdr;
17371 output_reldata = &(oesd->rel);
17373 else if (oesd->rela.hdr)
17375 rel_hdr = oesd->rela.hdr;
17376 output_reldata = &(oesd->rela);
17383 bfd_byte *erel = rel_hdr->contents;
17384 erel += output_reldata->count * rel_hdr->sh_entsize;
17385 htab = elf32_arm_hash_table (info);
17386 SWAP_RELOC_OUT (htab) (output_bfd, rel, erel);
17387 output_reldata->count++;
17390 /* Do code byteswapping. Return FALSE afterwards so that the section is
17391 written out as normal. */
17394 elf32_arm_write_section (bfd *output_bfd,
17395 struct bfd_link_info *link_info,
17397 bfd_byte *contents)
17399 unsigned int mapcount, errcount;
17400 _arm_elf_section_data *arm_data;
17401 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
17402 elf32_arm_section_map *map;
17403 elf32_vfp11_erratum_list *errnode;
17404 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
17407 bfd_vma offset = sec->output_section->vma + sec->output_offset;
17411 if (globals == NULL)
17414 /* If this section has not been allocated an _arm_elf_section_data
17415 structure then we cannot record anything. */
17416 arm_data = get_arm_elf_section_data (sec);
17417 if (arm_data == NULL)
17420 mapcount = arm_data->mapcount;
17421 map = arm_data->map;
17422 errcount = arm_data->erratumcount;
17426 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
17428 for (errnode = arm_data->erratumlist; errnode != 0;
17429 errnode = errnode->next)
17431 bfd_vma target = errnode->vma - offset;
17433 switch (errnode->type)
17435 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
17437 bfd_vma branch_to_veneer;
17438 /* Original condition code of instruction, plus bit mask for
17439 ARM B instruction. */
17440 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
17443 /* The instruction is before the label. */
17446 /* Above offset included in -4 below. */
17447 branch_to_veneer = errnode->u.b.veneer->vma
17448 - errnode->vma - 4;
17450 if ((signed) branch_to_veneer < -(1 << 25)
17451 || (signed) branch_to_veneer >= (1 << 25))
17452 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17453 "range"), output_bfd);
17455 insn |= (branch_to_veneer >> 2) & 0xffffff;
17456 contents[endianflip ^ target] = insn & 0xff;
17457 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17458 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17459 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17463 case VFP11_ERRATUM_ARM_VENEER:
17465 bfd_vma branch_from_veneer;
17468 /* Take size of veneer into account. */
17469 branch_from_veneer = errnode->u.v.branch->vma
17470 - errnode->vma - 12;
17472 if ((signed) branch_from_veneer < -(1 << 25)
17473 || (signed) branch_from_veneer >= (1 << 25))
17474 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17475 "range"), output_bfd);
17477 /* Original instruction. */
17478 insn = errnode->u.v.branch->u.b.vfp_insn;
17479 contents[endianflip ^ target] = insn & 0xff;
17480 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17481 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17482 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17484 /* Branch back to insn after original insn. */
17485 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
17486 contents[endianflip ^ (target + 4)] = insn & 0xff;
17487 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
17488 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
17489 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
17499 if (arm_data->stm32l4xx_erratumcount != 0)
17501 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
17502 stm32l4xx_errnode != 0;
17503 stm32l4xx_errnode = stm32l4xx_errnode->next)
17505 bfd_vma target = stm32l4xx_errnode->vma - offset;
17507 switch (stm32l4xx_errnode->type)
17509 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
17512 bfd_vma branch_to_veneer =
17513 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
17515 if ((signed) branch_to_veneer < -(1 << 24)
17516 || (signed) branch_to_veneer >= (1 << 24))
17518 bfd_vma out_of_range =
17519 ((signed) branch_to_veneer < -(1 << 24)) ?
17520 - branch_to_veneer - (1 << 24) :
17521 ((signed) branch_to_veneer >= (1 << 24)) ?
17522 branch_to_veneer - (1 << 24) : 0;
17524 (*_bfd_error_handler)
17525 (_("%B(%#x): error: Cannot create STM32L4XX veneer. "
17526 "Jump out of range by %ld bytes. "
17527 "Cannot encode branch instruction. "),
17529 (long) (stm32l4xx_errnode->vma - 4),
17534 insn = create_instruction_branch_absolute
17535 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
17537 /* The instruction is before the label. */
17540 put_thumb2_insn (globals, output_bfd,
17541 (bfd_vma) insn, contents + target);
17545 case STM32L4XX_ERRATUM_VENEER:
17548 bfd_byte * veneer_r;
17551 veneer = contents + target;
17553 + stm32l4xx_errnode->u.b.veneer->vma
17554 - stm32l4xx_errnode->vma - 4;
17556 if ((signed) (veneer_r - veneer -
17557 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
17558 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
17559 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
17560 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
17561 || (signed) (veneer_r - veneer) >= (1 << 24))
17563 (*_bfd_error_handler) (_("%B: error: Cannot create STM32L4XX "
17564 "veneer."), output_bfd);
17568 /* Original instruction. */
17569 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
17571 stm32l4xx_create_replacing_stub
17572 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
17582 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
17584 arm_unwind_table_edit *edit_node
17585 = arm_data->u.exidx.unwind_edit_list;
17586 /* Now, sec->size is the size of the section we will write. The original
17587 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
17588 markers) was sec->rawsize. (This isn't the case if we perform no
17589 edits, then rawsize will be zero and we should use size). */
17590 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
17591 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
17592 unsigned int in_index, out_index;
17593 bfd_vma add_to_offsets = 0;
17595 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
17599 unsigned int edit_index = edit_node->index;
17601 if (in_index < edit_index && in_index * 8 < input_size)
17603 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17604 contents + in_index * 8, add_to_offsets);
17608 else if (in_index == edit_index
17609 || (in_index * 8 >= input_size
17610 && edit_index == UINT_MAX))
17612 switch (edit_node->type)
17614 case DELETE_EXIDX_ENTRY:
17616 add_to_offsets += 8;
17619 case INSERT_EXIDX_CANTUNWIND_AT_END:
17621 asection *text_sec = edit_node->linked_section;
17622 bfd_vma text_offset = text_sec->output_section->vma
17623 + text_sec->output_offset
17625 bfd_vma exidx_offset = offset + out_index * 8;
17626 unsigned long prel31_offset;
17628 /* Note: this is meant to be equivalent to an
17629 R_ARM_PREL31 relocation. These synthetic
17630 EXIDX_CANTUNWIND markers are not relocated by the
17631 usual BFD method. */
17632 prel31_offset = (text_offset - exidx_offset)
17634 if (bfd_link_relocatable (link_info))
17636 /* Here relocation for new EXIDX_CANTUNWIND is
17637 created, so there is no need to
17638 adjust offset by hand. */
17639 prel31_offset = text_sec->output_offset
17642 /* New relocation entity. */
17643 asection *text_out = text_sec->output_section;
17644 Elf_Internal_Rela rel;
17646 rel.r_offset = exidx_offset;
17647 rel.r_info = ELF32_R_INFO (text_out->target_index,
17650 elf32_arm_add_relocation (output_bfd, link_info,
17651 sec->output_section,
17655 /* First address we can't unwind. */
17656 bfd_put_32 (output_bfd, prel31_offset,
17657 &edited_contents[out_index * 8]);
17659 /* Code for EXIDX_CANTUNWIND. */
17660 bfd_put_32 (output_bfd, 0x1,
17661 &edited_contents[out_index * 8 + 4]);
17664 add_to_offsets -= 8;
17669 edit_node = edit_node->next;
17674 /* No more edits, copy remaining entries verbatim. */
17675 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17676 contents + in_index * 8, add_to_offsets);
17682 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
17683 bfd_set_section_contents (output_bfd, sec->output_section,
17685 (file_ptr) sec->output_offset, sec->size);
17690 /* Fix code to point to Cortex-A8 erratum stubs. */
17691 if (globals->fix_cortex_a8)
17693 struct a8_branch_to_stub_data data;
17695 data.writing_section = sec;
17696 data.contents = contents;
17698 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
17705 if (globals->byteswap_code)
17707 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
17710 for (i = 0; i < mapcount; i++)
17712 if (i == mapcount - 1)
17715 end = map[i + 1].vma;
17717 switch (map[i].type)
17720 /* Byte swap code words. */
17721 while (ptr + 3 < end)
17723 tmp = contents[ptr];
17724 contents[ptr] = contents[ptr + 3];
17725 contents[ptr + 3] = tmp;
17726 tmp = contents[ptr + 1];
17727 contents[ptr + 1] = contents[ptr + 2];
17728 contents[ptr + 2] = tmp;
17734 /* Byte swap code halfwords. */
17735 while (ptr + 1 < end)
17737 tmp = contents[ptr];
17738 contents[ptr] = contents[ptr + 1];
17739 contents[ptr + 1] = tmp;
17745 /* Leave data alone. */
17753 arm_data->mapcount = -1;
17754 arm_data->mapsize = 0;
17755 arm_data->map = NULL;
17760 /* Mangle thumb function symbols as we read them in. */
17763 elf32_arm_swap_symbol_in (bfd * abfd,
17766 Elf_Internal_Sym *dst)
17768 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
17770 dst->st_target_internal = 0;
17772 /* New EABI objects mark thumb function symbols by setting the low bit of
17774 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
17775 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
17777 if (dst->st_value & 1)
17779 dst->st_value &= ~(bfd_vma) 1;
17780 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
17781 ST_BRANCH_TO_THUMB);
17784 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
17786 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
17788 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
17789 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
17791 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
17792 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
17794 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
17800 /* Mangle thumb function symbols as we write them out. */
17803 elf32_arm_swap_symbol_out (bfd *abfd,
17804 const Elf_Internal_Sym *src,
17808 Elf_Internal_Sym newsym;
17810 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
17811 of the address set, as per the new EABI. We do this unconditionally
17812 because objcopy does not set the elf header flags until after
17813 it writes out the symbol table. */
17814 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
17817 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
17818 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
17819 if (newsym.st_shndx != SHN_UNDEF)
17821 /* Do this only for defined symbols. At link type, the static
17822 linker will simulate the work of dynamic linker of resolving
17823 symbols and will carry over the thumbness of found symbols to
17824 the output symbol table. It's not clear how it happens, but
17825 the thumbness of undefined symbols can well be different at
17826 runtime, and writing '1' for them will be confusing for users
17827 and possibly for dynamic linker itself.
17829 newsym.st_value |= 1;
17834 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
17837 /* Add the PT_ARM_EXIDX program header. */
17840 elf32_arm_modify_segment_map (bfd *abfd,
17841 struct bfd_link_info *info ATTRIBUTE_UNUSED)
17843 struct elf_segment_map *m;
17846 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17847 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17849 /* If there is already a PT_ARM_EXIDX header, then we do not
17850 want to add another one. This situation arises when running
17851 "strip"; the input binary already has the header. */
17852 m = elf_seg_map (abfd);
17853 while (m && m->p_type != PT_ARM_EXIDX)
17857 m = (struct elf_segment_map *)
17858 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
17861 m->p_type = PT_ARM_EXIDX;
17863 m->sections[0] = sec;
17865 m->next = elf_seg_map (abfd);
17866 elf_seg_map (abfd) = m;
17873 /* We may add a PT_ARM_EXIDX program header. */
17876 elf32_arm_additional_program_headers (bfd *abfd,
17877 struct bfd_link_info *info ATTRIBUTE_UNUSED)
17881 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17882 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17888 /* Hook called by the linker routine which adds symbols from an object
17892 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
17893 Elf_Internal_Sym *sym, const char **namep,
17894 flagword *flagsp, asection **secp, bfd_vma *valp)
17896 if (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
17897 && (abfd->flags & DYNAMIC) == 0
17898 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
17899 elf_tdata (info->output_bfd)->has_gnu_symbols |= elf_gnu_symbol_ifunc;
17901 if (elf32_arm_hash_table (info) == NULL)
17904 if (elf32_arm_hash_table (info)->vxworks_p
17905 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
17906 flagsp, secp, valp))
17912 /* We use this to override swap_symbol_in and swap_symbol_out. */
17913 const struct elf_size_info elf32_arm_size_info =
17915 sizeof (Elf32_External_Ehdr),
17916 sizeof (Elf32_External_Phdr),
17917 sizeof (Elf32_External_Shdr),
17918 sizeof (Elf32_External_Rel),
17919 sizeof (Elf32_External_Rela),
17920 sizeof (Elf32_External_Sym),
17921 sizeof (Elf32_External_Dyn),
17922 sizeof (Elf_External_Note),
17926 ELFCLASS32, EV_CURRENT,
17927 bfd_elf32_write_out_phdrs,
17928 bfd_elf32_write_shdrs_and_ehdr,
17929 bfd_elf32_checksum_contents,
17930 bfd_elf32_write_relocs,
17931 elf32_arm_swap_symbol_in,
17932 elf32_arm_swap_symbol_out,
17933 bfd_elf32_slurp_reloc_table,
17934 bfd_elf32_slurp_symbol_table,
17935 bfd_elf32_swap_dyn_in,
17936 bfd_elf32_swap_dyn_out,
17937 bfd_elf32_swap_reloc_in,
17938 bfd_elf32_swap_reloc_out,
17939 bfd_elf32_swap_reloca_in,
17940 bfd_elf32_swap_reloca_out
17944 read_code32 (const bfd *abfd, const bfd_byte *addr)
17946 /* V7 BE8 code is always little endian. */
17947 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
17948 return bfd_getl32 (addr);
17950 return bfd_get_32 (abfd, addr);
17954 read_code16 (const bfd *abfd, const bfd_byte *addr)
17956 /* V7 BE8 code is always little endian. */
17957 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
17958 return bfd_getl16 (addr);
17960 return bfd_get_16 (abfd, addr);
17963 /* Return size of plt0 entry starting at ADDR
17964 or (bfd_vma) -1 if size can not be determined. */
17967 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
17969 bfd_vma first_word;
17972 first_word = read_code32 (abfd, addr);
17974 if (first_word == elf32_arm_plt0_entry[0])
17975 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
17976 else if (first_word == elf32_thumb2_plt0_entry[0])
17977 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
17979 /* We don't yet handle this PLT format. */
17980 return (bfd_vma) -1;
17985 /* Return size of plt entry starting at offset OFFSET
17986 of plt section located at address START
17987 or (bfd_vma) -1 if size can not be determined. */
17990 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
17992 bfd_vma first_insn;
17993 bfd_vma plt_size = 0;
17994 const bfd_byte *addr = start + offset;
17996 /* PLT entry size if fixed on Thumb-only platforms. */
17997 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
17998 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
18000 /* Respect Thumb stub if necessary. */
18001 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
18003 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
18006 /* Strip immediate from first add. */
18007 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
18009 #ifdef FOUR_WORD_PLT
18010 if (first_insn == elf32_arm_plt_entry[0])
18011 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
18013 if (first_insn == elf32_arm_plt_entry_long[0])
18014 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
18015 else if (first_insn == elf32_arm_plt_entry_short[0])
18016 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
18019 /* We don't yet handle this PLT format. */
18020 return (bfd_vma) -1;
18025 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
18028 elf32_arm_get_synthetic_symtab (bfd *abfd,
18029 long symcount ATTRIBUTE_UNUSED,
18030 asymbol **syms ATTRIBUTE_UNUSED,
18040 Elf_Internal_Shdr *hdr;
18048 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
18051 if (dynsymcount <= 0)
18054 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
18055 if (relplt == NULL)
18058 hdr = &elf_section_data (relplt)->this_hdr;
18059 if (hdr->sh_link != elf_dynsymtab (abfd)
18060 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
18063 plt = bfd_get_section_by_name (abfd, ".plt");
18067 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
18070 data = plt->contents;
18073 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
18075 bfd_cache_section_contents((asection *) plt, data);
18078 count = relplt->size / hdr->sh_entsize;
18079 size = count * sizeof (asymbol);
18080 p = relplt->relocation;
18081 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
18083 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
18084 if (p->addend != 0)
18085 size += sizeof ("+0x") - 1 + 8;
18088 s = *ret = (asymbol *) bfd_malloc (size);
18092 offset = elf32_arm_plt0_size (abfd, data);
18093 if (offset == (bfd_vma) -1)
18096 names = (char *) (s + count);
18097 p = relplt->relocation;
18099 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
18103 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
18104 if (plt_size == (bfd_vma) -1)
18107 *s = **p->sym_ptr_ptr;
18108 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
18109 we are defining a symbol, ensure one of them is set. */
18110 if ((s->flags & BSF_LOCAL) == 0)
18111 s->flags |= BSF_GLOBAL;
18112 s->flags |= BSF_SYNTHETIC;
18117 len = strlen ((*p->sym_ptr_ptr)->name);
18118 memcpy (names, (*p->sym_ptr_ptr)->name, len);
18120 if (p->addend != 0)
18124 memcpy (names, "+0x", sizeof ("+0x") - 1);
18125 names += sizeof ("+0x") - 1;
18126 bfd_sprintf_vma (abfd, buf, p->addend);
18127 for (a = buf; *a == '0'; ++a)
18130 memcpy (names, a, len);
18133 memcpy (names, "@plt", sizeof ("@plt"));
18134 names += sizeof ("@plt");
18136 offset += plt_size;
18143 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
18145 if (hdr->sh_flags & SHF_ARM_NOREAD)
18146 *flags |= SEC_ELF_NOREAD;
18151 elf32_arm_lookup_section_flags (char *flag_name)
18153 if (!strcmp (flag_name, "SHF_ARM_NOREAD"))
18154 return SHF_ARM_NOREAD;
18156 return SEC_NO_FLAGS;
18159 static unsigned int
18160 elf32_arm_count_additional_relocs (asection *sec)
18162 struct _arm_elf_section_data *arm_data;
18163 arm_data = get_arm_elf_section_data (sec);
18164 return arm_data->additional_reloc_count;
18167 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
18168 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
18169 FALSE otherwise. ISECTION is the best guess matching section from the
18170 input bfd IBFD, but it might be NULL. */
18173 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
18174 bfd *obfd ATTRIBUTE_UNUSED,
18175 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
18176 Elf_Internal_Shdr *osection)
18178 switch (osection->sh_type)
18180 case SHT_ARM_EXIDX:
18182 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
18183 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
18186 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
18187 osection->sh_info = 0;
18189 /* The sh_link field must be set to the text section associated with
18190 this index section. Unfortunately the ARM EHABI does not specify
18191 exactly how to determine this association. Our caller does try
18192 to match up OSECTION with its corresponding input section however
18193 so that is a good first guess. */
18194 if (isection != NULL
18195 && osection->bfd_section != NULL
18196 && isection->bfd_section != NULL
18197 && isection->bfd_section->output_section != NULL
18198 && isection->bfd_section->output_section == osection->bfd_section
18199 && iheaders != NULL
18200 && isection->sh_link > 0
18201 && isection->sh_link < elf_numsections (ibfd)
18202 && iheaders[isection->sh_link]->bfd_section != NULL
18203 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
18206 for (i = elf_numsections (obfd); i-- > 0;)
18207 if (oheaders[i]->bfd_section
18208 == iheaders[isection->sh_link]->bfd_section->output_section)
18214 /* Failing that we have to find a matching section ourselves. If
18215 we had the output section name available we could compare that
18216 with input section names. Unfortunately we don't. So instead
18217 we use a simple heuristic and look for the nearest executable
18218 section before this one. */
18219 for (i = elf_numsections (obfd); i-- > 0;)
18220 if (oheaders[i] == osection)
18226 if (oheaders[i]->sh_type == SHT_PROGBITS
18227 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
18228 == (SHF_ALLOC | SHF_EXECINSTR))
18234 osection->sh_link = i;
18235 /* If the text section was part of a group
18236 then the index section should be too. */
18237 if (oheaders[i]->sh_flags & SHF_GROUP)
18238 osection->sh_flags |= SHF_GROUP;
18244 case SHT_ARM_PREEMPTMAP:
18245 osection->sh_flags = SHF_ALLOC;
18248 case SHT_ARM_ATTRIBUTES:
18249 case SHT_ARM_DEBUGOVERLAY:
18250 case SHT_ARM_OVERLAYSECTION:
18258 #undef elf_backend_copy_special_section_fields
18259 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
18261 #define ELF_ARCH bfd_arch_arm
18262 #define ELF_TARGET_ID ARM_ELF_DATA
18263 #define ELF_MACHINE_CODE EM_ARM
18264 #ifdef __QNXTARGET__
18265 #define ELF_MAXPAGESIZE 0x1000
18267 #define ELF_MAXPAGESIZE 0x10000
18269 #define ELF_MINPAGESIZE 0x1000
18270 #define ELF_COMMONPAGESIZE 0x1000
18272 #define bfd_elf32_mkobject elf32_arm_mkobject
18274 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
18275 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
18276 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
18277 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
18278 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
18279 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
18280 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
18281 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
18282 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
18283 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
18284 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
18285 #define bfd_elf32_bfd_final_link elf32_arm_final_link
18286 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
18288 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
18289 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
18290 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
18291 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
18292 #define elf_backend_check_relocs elf32_arm_check_relocs
18293 #define elf_backend_relocate_section elf32_arm_relocate_section
18294 #define elf_backend_write_section elf32_arm_write_section
18295 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
18296 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
18297 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
18298 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
18299 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
18300 #define elf_backend_always_size_sections elf32_arm_always_size_sections
18301 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
18302 #define elf_backend_post_process_headers elf32_arm_post_process_headers
18303 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
18304 #define elf_backend_object_p elf32_arm_object_p
18305 #define elf_backend_fake_sections elf32_arm_fake_sections
18306 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
18307 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18308 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
18309 #define elf_backend_size_info elf32_arm_size_info
18310 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18311 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
18312 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
18313 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
18314 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
18315 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
18317 #define elf_backend_can_refcount 1
18318 #define elf_backend_can_gc_sections 1
18319 #define elf_backend_plt_readonly 1
18320 #define elf_backend_want_got_plt 1
18321 #define elf_backend_want_plt_sym 0
18322 #define elf_backend_may_use_rel_p 1
18323 #define elf_backend_may_use_rela_p 0
18324 #define elf_backend_default_use_rela_p 0
18326 #define elf_backend_got_header_size 12
18327 #define elf_backend_extern_protected_data 1
18329 #undef elf_backend_obj_attrs_vendor
18330 #define elf_backend_obj_attrs_vendor "aeabi"
18331 #undef elf_backend_obj_attrs_section
18332 #define elf_backend_obj_attrs_section ".ARM.attributes"
18333 #undef elf_backend_obj_attrs_arg_type
18334 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
18335 #undef elf_backend_obj_attrs_section_type
18336 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
18337 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
18338 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
18340 #undef elf_backend_section_flags
18341 #define elf_backend_section_flags elf32_arm_section_flags
18342 #undef elf_backend_lookup_section_flags_hook
18343 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
18345 #include "elf32-target.h"
18347 /* Native Client targets. */
18349 #undef TARGET_LITTLE_SYM
18350 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
18351 #undef TARGET_LITTLE_NAME
18352 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
18353 #undef TARGET_BIG_SYM
18354 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
18355 #undef TARGET_BIG_NAME
18356 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
18358 /* Like elf32_arm_link_hash_table_create -- but overrides
18359 appropriately for NaCl. */
18361 static struct bfd_link_hash_table *
18362 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
18364 struct bfd_link_hash_table *ret;
18366 ret = elf32_arm_link_hash_table_create (abfd);
18369 struct elf32_arm_link_hash_table *htab
18370 = (struct elf32_arm_link_hash_table *) ret;
18374 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
18375 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
18380 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
18381 really need to use elf32_arm_modify_segment_map. But we do it
18382 anyway just to reduce gratuitous differences with the stock ARM backend. */
18385 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
18387 return (elf32_arm_modify_segment_map (abfd, info)
18388 && nacl_modify_segment_map (abfd, info));
18392 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
18394 elf32_arm_final_write_processing (abfd, linker);
18395 nacl_final_write_processing (abfd, linker);
18399 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
18400 const arelent *rel ATTRIBUTE_UNUSED)
18403 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
18404 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
18408 #define elf32_bed elf32_arm_nacl_bed
18409 #undef bfd_elf32_bfd_link_hash_table_create
18410 #define bfd_elf32_bfd_link_hash_table_create \
18411 elf32_arm_nacl_link_hash_table_create
18412 #undef elf_backend_plt_alignment
18413 #define elf_backend_plt_alignment 4
18414 #undef elf_backend_modify_segment_map
18415 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
18416 #undef elf_backend_modify_program_headers
18417 #define elf_backend_modify_program_headers nacl_modify_program_headers
18418 #undef elf_backend_final_write_processing
18419 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
18420 #undef bfd_elf32_get_synthetic_symtab
18421 #undef elf_backend_plt_sym_val
18422 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
18423 #undef elf_backend_copy_special_section_fields
18425 #undef ELF_MINPAGESIZE
18426 #undef ELF_COMMONPAGESIZE
18429 #include "elf32-target.h"
18431 /* Reset to defaults. */
18432 #undef elf_backend_plt_alignment
18433 #undef elf_backend_modify_segment_map
18434 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
18435 #undef elf_backend_modify_program_headers
18436 #undef elf_backend_final_write_processing
18437 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18438 #undef ELF_MINPAGESIZE
18439 #define ELF_MINPAGESIZE 0x1000
18440 #undef ELF_COMMONPAGESIZE
18441 #define ELF_COMMONPAGESIZE 0x1000
18444 /* VxWorks Targets. */
18446 #undef TARGET_LITTLE_SYM
18447 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
18448 #undef TARGET_LITTLE_NAME
18449 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
18450 #undef TARGET_BIG_SYM
18451 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
18452 #undef TARGET_BIG_NAME
18453 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
18455 /* Like elf32_arm_link_hash_table_create -- but overrides
18456 appropriately for VxWorks. */
18458 static struct bfd_link_hash_table *
18459 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
18461 struct bfd_link_hash_table *ret;
18463 ret = elf32_arm_link_hash_table_create (abfd);
18466 struct elf32_arm_link_hash_table *htab
18467 = (struct elf32_arm_link_hash_table *) ret;
18469 htab->vxworks_p = 1;
18475 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
18477 elf32_arm_final_write_processing (abfd, linker);
18478 elf_vxworks_final_write_processing (abfd, linker);
18482 #define elf32_bed elf32_arm_vxworks_bed
18484 #undef bfd_elf32_bfd_link_hash_table_create
18485 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
18486 #undef elf_backend_final_write_processing
18487 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
18488 #undef elf_backend_emit_relocs
18489 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
18491 #undef elf_backend_may_use_rel_p
18492 #define elf_backend_may_use_rel_p 0
18493 #undef elf_backend_may_use_rela_p
18494 #define elf_backend_may_use_rela_p 1
18495 #undef elf_backend_default_use_rela_p
18496 #define elf_backend_default_use_rela_p 1
18497 #undef elf_backend_want_plt_sym
18498 #define elf_backend_want_plt_sym 1
18499 #undef ELF_MAXPAGESIZE
18500 #define ELF_MAXPAGESIZE 0x1000
18502 #include "elf32-target.h"
18505 /* Merge backend specific data from an object file to the output
18506 object file when linking. */
18509 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
18511 flagword out_flags;
18513 bfd_boolean flags_compatible = TRUE;
18516 /* Check if we have the same endianness. */
18517 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
18520 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
18523 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
18526 /* The input BFD must have had its flags initialised. */
18527 /* The following seems bogus to me -- The flags are initialized in
18528 the assembler but I don't think an elf_flags_init field is
18529 written into the object. */
18530 /* BFD_ASSERT (elf_flags_init (ibfd)); */
18532 in_flags = elf_elfheader (ibfd)->e_flags;
18533 out_flags = elf_elfheader (obfd)->e_flags;
18535 /* In theory there is no reason why we couldn't handle this. However
18536 in practice it isn't even close to working and there is no real
18537 reason to want it. */
18538 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
18539 && !(ibfd->flags & DYNAMIC)
18540 && (in_flags & EF_ARM_BE8))
18542 _bfd_error_handler (_("error: %B is already in final BE8 format"),
18547 if (!elf_flags_init (obfd))
18549 /* If the input is the default architecture and had the default
18550 flags then do not bother setting the flags for the output
18551 architecture, instead allow future merges to do this. If no
18552 future merges ever set these flags then they will retain their
18553 uninitialised values, which surprise surprise, correspond
18554 to the default values. */
18555 if (bfd_get_arch_info (ibfd)->the_default
18556 && elf_elfheader (ibfd)->e_flags == 0)
18559 elf_flags_init (obfd) = TRUE;
18560 elf_elfheader (obfd)->e_flags = in_flags;
18562 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
18563 && bfd_get_arch_info (obfd)->the_default)
18564 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
18569 /* Determine what should happen if the input ARM architecture
18570 does not match the output ARM architecture. */
18571 if (! bfd_arm_merge_machines (ibfd, obfd))
18574 /* Identical flags must be compatible. */
18575 if (in_flags == out_flags)
18578 /* Check to see if the input BFD actually contains any sections. If
18579 not, its flags may not have been initialised either, but it
18580 cannot actually cause any incompatiblity. Do not short-circuit
18581 dynamic objects; their section list may be emptied by
18582 elf_link_add_object_symbols.
18584 Also check to see if there are no code sections in the input.
18585 In this case there is no need to check for code specific flags.
18586 XXX - do we need to worry about floating-point format compatability
18587 in data sections ? */
18588 if (!(ibfd->flags & DYNAMIC))
18590 bfd_boolean null_input_bfd = TRUE;
18591 bfd_boolean only_data_sections = TRUE;
18593 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
18595 /* Ignore synthetic glue sections. */
18596 if (strcmp (sec->name, ".glue_7")
18597 && strcmp (sec->name, ".glue_7t"))
18599 if ((bfd_get_section_flags (ibfd, sec)
18600 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18601 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18602 only_data_sections = FALSE;
18604 null_input_bfd = FALSE;
18609 if (null_input_bfd || only_data_sections)
18613 /* Complain about various flag mismatches. */
18614 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
18615 EF_ARM_EABI_VERSION (out_flags)))
18618 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
18620 (in_flags & EF_ARM_EABIMASK) >> 24,
18621 (out_flags & EF_ARM_EABIMASK) >> 24);
18625 /* Not sure what needs to be checked for EABI versions >= 1. */
18626 /* VxWorks libraries do not use these flags. */
18627 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
18628 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
18629 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
18631 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
18634 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
18636 in_flags & EF_ARM_APCS_26 ? 26 : 32,
18637 out_flags & EF_ARM_APCS_26 ? 26 : 32);
18638 flags_compatible = FALSE;
18641 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
18643 if (in_flags & EF_ARM_APCS_FLOAT)
18645 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
18649 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
18652 flags_compatible = FALSE;
18655 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
18657 if (in_flags & EF_ARM_VFP_FLOAT)
18659 (_("error: %B uses VFP instructions, whereas %B does not"),
18663 (_("error: %B uses FPA instructions, whereas %B does not"),
18666 flags_compatible = FALSE;
18669 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
18671 if (in_flags & EF_ARM_MAVERICK_FLOAT)
18673 (_("error: %B uses Maverick instructions, whereas %B does not"),
18677 (_("error: %B does not use Maverick instructions, whereas %B does"),
18680 flags_compatible = FALSE;
18683 #ifdef EF_ARM_SOFT_FLOAT
18684 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
18686 /* We can allow interworking between code that is VFP format
18687 layout, and uses either soft float or integer regs for
18688 passing floating point arguments and results. We already
18689 know that the APCS_FLOAT flags match; similarly for VFP
18691 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
18692 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
18694 if (in_flags & EF_ARM_SOFT_FLOAT)
18696 (_("error: %B uses software FP, whereas %B uses hardware FP"),
18700 (_("error: %B uses hardware FP, whereas %B uses software FP"),
18703 flags_compatible = FALSE;
18708 /* Interworking mismatch is only a warning. */
18709 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
18711 if (in_flags & EF_ARM_INTERWORK)
18714 (_("Warning: %B supports interworking, whereas %B does not"),
18720 (_("Warning: %B does not support interworking, whereas %B does"),
18726 return flags_compatible;
18730 /* Symbian OS Targets. */
18732 #undef TARGET_LITTLE_SYM
18733 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
18734 #undef TARGET_LITTLE_NAME
18735 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
18736 #undef TARGET_BIG_SYM
18737 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
18738 #undef TARGET_BIG_NAME
18739 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
18741 /* Like elf32_arm_link_hash_table_create -- but overrides
18742 appropriately for Symbian OS. */
18744 static struct bfd_link_hash_table *
18745 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
18747 struct bfd_link_hash_table *ret;
18749 ret = elf32_arm_link_hash_table_create (abfd);
18752 struct elf32_arm_link_hash_table *htab
18753 = (struct elf32_arm_link_hash_table *)ret;
18754 /* There is no PLT header for Symbian OS. */
18755 htab->plt_header_size = 0;
18756 /* The PLT entries are each one instruction and one word. */
18757 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
18758 htab->symbian_p = 1;
18759 /* Symbian uses armv5t or above, so use_blx is always true. */
18761 htab->root.is_relocatable_executable = 1;
18766 static const struct bfd_elf_special_section
18767 elf32_arm_symbian_special_sections[] =
18769 /* In a BPABI executable, the dynamic linking sections do not go in
18770 the loadable read-only segment. The post-linker may wish to
18771 refer to these sections, but they are not part of the final
18773 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
18774 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
18775 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
18776 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
18777 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
18778 /* These sections do not need to be writable as the SymbianOS
18779 postlinker will arrange things so that no dynamic relocation is
18781 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
18782 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
18783 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
18784 { NULL, 0, 0, 0, 0 }
18788 elf32_arm_symbian_begin_write_processing (bfd *abfd,
18789 struct bfd_link_info *link_info)
18791 /* BPABI objects are never loaded directly by an OS kernel; they are
18792 processed by a postlinker first, into an OS-specific format. If
18793 the D_PAGED bit is set on the file, BFD will align segments on
18794 page boundaries, so that an OS can directly map the file. With
18795 BPABI objects, that just results in wasted space. In addition,
18796 because we clear the D_PAGED bit, map_sections_to_segments will
18797 recognize that the program headers should not be mapped into any
18798 loadable segment. */
18799 abfd->flags &= ~D_PAGED;
18800 elf32_arm_begin_write_processing (abfd, link_info);
18804 elf32_arm_symbian_modify_segment_map (bfd *abfd,
18805 struct bfd_link_info *info)
18807 struct elf_segment_map *m;
18810 /* BPABI shared libraries and executables should have a PT_DYNAMIC
18811 segment. However, because the .dynamic section is not marked
18812 with SEC_LOAD, the generic ELF code will not create such a
18814 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
18817 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
18818 if (m->p_type == PT_DYNAMIC)
18823 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
18824 m->next = elf_seg_map (abfd);
18825 elf_seg_map (abfd) = m;
18829 /* Also call the generic arm routine. */
18830 return elf32_arm_modify_segment_map (abfd, info);
18833 /* Return address for Ith PLT stub in section PLT, for relocation REL
18834 or (bfd_vma) -1 if it should not be included. */
18837 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
18838 const arelent *rel ATTRIBUTE_UNUSED)
18840 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
18844 #define elf32_bed elf32_arm_symbian_bed
18846 /* The dynamic sections are not allocated on SymbianOS; the postlinker
18847 will process them and then discard them. */
18848 #undef ELF_DYNAMIC_SEC_FLAGS
18849 #define ELF_DYNAMIC_SEC_FLAGS \
18850 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
18852 #undef elf_backend_emit_relocs
18854 #undef bfd_elf32_bfd_link_hash_table_create
18855 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
18856 #undef elf_backend_special_sections
18857 #define elf_backend_special_sections elf32_arm_symbian_special_sections
18858 #undef elf_backend_begin_write_processing
18859 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
18860 #undef elf_backend_final_write_processing
18861 #define elf_backend_final_write_processing elf32_arm_final_write_processing
18863 #undef elf_backend_modify_segment_map
18864 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
18866 /* There is no .got section for BPABI objects, and hence no header. */
18867 #undef elf_backend_got_header_size
18868 #define elf_backend_got_header_size 0
18870 /* Similarly, there is no .got.plt section. */
18871 #undef elf_backend_want_got_plt
18872 #define elf_backend_want_got_plt 0
18874 #undef elf_backend_plt_sym_val
18875 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
18877 #undef elf_backend_may_use_rel_p
18878 #define elf_backend_may_use_rel_p 1
18879 #undef elf_backend_may_use_rela_p
18880 #define elf_backend_may_use_rela_p 0
18881 #undef elf_backend_default_use_rela_p
18882 #define elf_backend_default_use_rela_p 0
18883 #undef elf_backend_want_plt_sym
18884 #define elf_backend_want_plt_sym 0
18885 #undef ELF_MAXPAGESIZE
18886 #define ELF_MAXPAGESIZE 0x8000
18888 #include "elf32-target.h"