1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
26 #include "bfd_stdint.h"
27 #include "libiberty.h"
31 #include "elf-vxworks.h"
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
60 #define elf_info_to_howto 0
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
66 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
67 struct bfd_link_info *link_info,
71 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
72 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75 static reloc_howto_type elf32_arm_howto_table_1[] =
78 HOWTO (R_ARM_NONE, /* type */
80 0, /* size (0 = byte, 1 = short, 2 = long) */
82 FALSE, /* pc_relative */
84 complain_overflow_dont,/* complain_on_overflow */
85 bfd_elf_generic_reloc, /* special_function */
86 "R_ARM_NONE", /* name */
87 FALSE, /* partial_inplace */
90 FALSE), /* pcrel_offset */
92 HOWTO (R_ARM_PC24, /* type */
94 2, /* size (0 = byte, 1 = short, 2 = long) */
96 TRUE, /* pc_relative */
98 complain_overflow_signed,/* complain_on_overflow */
99 bfd_elf_generic_reloc, /* special_function */
100 "R_ARM_PC24", /* name */
101 FALSE, /* partial_inplace */
102 0x00ffffff, /* src_mask */
103 0x00ffffff, /* dst_mask */
104 TRUE), /* pcrel_offset */
106 /* 32 bit absolute */
107 HOWTO (R_ARM_ABS32, /* type */
109 2, /* size (0 = byte, 1 = short, 2 = long) */
111 FALSE, /* pc_relative */
113 complain_overflow_bitfield,/* complain_on_overflow */
114 bfd_elf_generic_reloc, /* special_function */
115 "R_ARM_ABS32", /* name */
116 FALSE, /* partial_inplace */
117 0xffffffff, /* src_mask */
118 0xffffffff, /* dst_mask */
119 FALSE), /* pcrel_offset */
121 /* standard 32bit pc-relative reloc */
122 HOWTO (R_ARM_REL32, /* type */
124 2, /* size (0 = byte, 1 = short, 2 = long) */
126 TRUE, /* pc_relative */
128 complain_overflow_bitfield,/* complain_on_overflow */
129 bfd_elf_generic_reloc, /* special_function */
130 "R_ARM_REL32", /* name */
131 FALSE, /* partial_inplace */
132 0xffffffff, /* src_mask */
133 0xffffffff, /* dst_mask */
134 TRUE), /* pcrel_offset */
136 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
137 HOWTO (R_ARM_LDR_PC_G0, /* type */
139 0, /* size (0 = byte, 1 = short, 2 = long) */
141 TRUE, /* pc_relative */
143 complain_overflow_dont,/* complain_on_overflow */
144 bfd_elf_generic_reloc, /* special_function */
145 "R_ARM_LDR_PC_G0", /* name */
146 FALSE, /* partial_inplace */
147 0xffffffff, /* src_mask */
148 0xffffffff, /* dst_mask */
149 TRUE), /* pcrel_offset */
151 /* 16 bit absolute */
152 HOWTO (R_ARM_ABS16, /* type */
154 1, /* size (0 = byte, 1 = short, 2 = long) */
156 FALSE, /* pc_relative */
158 complain_overflow_bitfield,/* complain_on_overflow */
159 bfd_elf_generic_reloc, /* special_function */
160 "R_ARM_ABS16", /* name */
161 FALSE, /* partial_inplace */
162 0x0000ffff, /* src_mask */
163 0x0000ffff, /* dst_mask */
164 FALSE), /* pcrel_offset */
166 /* 12 bit absolute */
167 HOWTO (R_ARM_ABS12, /* type */
169 2, /* size (0 = byte, 1 = short, 2 = long) */
171 FALSE, /* pc_relative */
173 complain_overflow_bitfield,/* complain_on_overflow */
174 bfd_elf_generic_reloc, /* special_function */
175 "R_ARM_ABS12", /* name */
176 FALSE, /* partial_inplace */
177 0x00000fff, /* src_mask */
178 0x00000fff, /* dst_mask */
179 FALSE), /* pcrel_offset */
181 HOWTO (R_ARM_THM_ABS5, /* type */
183 1, /* size (0 = byte, 1 = short, 2 = long) */
185 FALSE, /* pc_relative */
187 complain_overflow_bitfield,/* complain_on_overflow */
188 bfd_elf_generic_reloc, /* special_function */
189 "R_ARM_THM_ABS5", /* name */
190 FALSE, /* partial_inplace */
191 0x000007e0, /* src_mask */
192 0x000007e0, /* dst_mask */
193 FALSE), /* pcrel_offset */
196 HOWTO (R_ARM_ABS8, /* type */
198 0, /* size (0 = byte, 1 = short, 2 = long) */
200 FALSE, /* pc_relative */
202 complain_overflow_bitfield,/* complain_on_overflow */
203 bfd_elf_generic_reloc, /* special_function */
204 "R_ARM_ABS8", /* name */
205 FALSE, /* partial_inplace */
206 0x000000ff, /* src_mask */
207 0x000000ff, /* dst_mask */
208 FALSE), /* pcrel_offset */
210 HOWTO (R_ARM_SBREL32, /* type */
212 2, /* size (0 = byte, 1 = short, 2 = long) */
214 FALSE, /* pc_relative */
216 complain_overflow_dont,/* complain_on_overflow */
217 bfd_elf_generic_reloc, /* special_function */
218 "R_ARM_SBREL32", /* name */
219 FALSE, /* partial_inplace */
220 0xffffffff, /* src_mask */
221 0xffffffff, /* dst_mask */
222 FALSE), /* pcrel_offset */
224 HOWTO (R_ARM_THM_CALL, /* type */
226 2, /* size (0 = byte, 1 = short, 2 = long) */
228 TRUE, /* pc_relative */
230 complain_overflow_signed,/* complain_on_overflow */
231 bfd_elf_generic_reloc, /* special_function */
232 "R_ARM_THM_CALL", /* name */
233 FALSE, /* partial_inplace */
234 0x07ff2fff, /* src_mask */
235 0x07ff2fff, /* dst_mask */
236 TRUE), /* pcrel_offset */
238 HOWTO (R_ARM_THM_PC8, /* type */
240 1, /* size (0 = byte, 1 = short, 2 = long) */
242 TRUE, /* pc_relative */
244 complain_overflow_signed,/* complain_on_overflow */
245 bfd_elf_generic_reloc, /* special_function */
246 "R_ARM_THM_PC8", /* name */
247 FALSE, /* partial_inplace */
248 0x000000ff, /* src_mask */
249 0x000000ff, /* dst_mask */
250 TRUE), /* pcrel_offset */
252 HOWTO (R_ARM_BREL_ADJ, /* type */
254 1, /* size (0 = byte, 1 = short, 2 = long) */
256 FALSE, /* pc_relative */
258 complain_overflow_signed,/* complain_on_overflow */
259 bfd_elf_generic_reloc, /* special_function */
260 "R_ARM_BREL_ADJ", /* name */
261 FALSE, /* partial_inplace */
262 0xffffffff, /* src_mask */
263 0xffffffff, /* dst_mask */
264 FALSE), /* pcrel_offset */
266 HOWTO (R_ARM_TLS_DESC, /* type */
268 2, /* size (0 = byte, 1 = short, 2 = long) */
270 FALSE, /* pc_relative */
272 complain_overflow_bitfield,/* complain_on_overflow */
273 bfd_elf_generic_reloc, /* special_function */
274 "R_ARM_TLS_DESC", /* name */
275 FALSE, /* partial_inplace */
276 0xffffffff, /* src_mask */
277 0xffffffff, /* dst_mask */
278 FALSE), /* pcrel_offset */
280 HOWTO (R_ARM_THM_SWI8, /* type */
282 0, /* size (0 = byte, 1 = short, 2 = long) */
284 FALSE, /* pc_relative */
286 complain_overflow_signed,/* complain_on_overflow */
287 bfd_elf_generic_reloc, /* special_function */
288 "R_ARM_SWI8", /* name */
289 FALSE, /* partial_inplace */
290 0x00000000, /* src_mask */
291 0x00000000, /* dst_mask */
292 FALSE), /* pcrel_offset */
294 /* BLX instruction for the ARM. */
295 HOWTO (R_ARM_XPC25, /* type */
297 2, /* size (0 = byte, 1 = short, 2 = long) */
299 TRUE, /* pc_relative */
301 complain_overflow_signed,/* complain_on_overflow */
302 bfd_elf_generic_reloc, /* special_function */
303 "R_ARM_XPC25", /* name */
304 FALSE, /* partial_inplace */
305 0x00ffffff, /* src_mask */
306 0x00ffffff, /* dst_mask */
307 TRUE), /* pcrel_offset */
309 /* BLX instruction for the Thumb. */
310 HOWTO (R_ARM_THM_XPC22, /* type */
312 2, /* size (0 = byte, 1 = short, 2 = long) */
314 TRUE, /* pc_relative */
316 complain_overflow_signed,/* complain_on_overflow */
317 bfd_elf_generic_reloc, /* special_function */
318 "R_ARM_THM_XPC22", /* name */
319 FALSE, /* partial_inplace */
320 0x07ff2fff, /* src_mask */
321 0x07ff2fff, /* dst_mask */
322 TRUE), /* pcrel_offset */
324 /* Dynamic TLS relocations. */
326 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
328 2, /* size (0 = byte, 1 = short, 2 = long) */
330 FALSE, /* pc_relative */
332 complain_overflow_bitfield,/* complain_on_overflow */
333 bfd_elf_generic_reloc, /* special_function */
334 "R_ARM_TLS_DTPMOD32", /* name */
335 TRUE, /* partial_inplace */
336 0xffffffff, /* src_mask */
337 0xffffffff, /* dst_mask */
338 FALSE), /* pcrel_offset */
340 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
342 2, /* size (0 = byte, 1 = short, 2 = long) */
344 FALSE, /* pc_relative */
346 complain_overflow_bitfield,/* complain_on_overflow */
347 bfd_elf_generic_reloc, /* special_function */
348 "R_ARM_TLS_DTPOFF32", /* name */
349 TRUE, /* partial_inplace */
350 0xffffffff, /* src_mask */
351 0xffffffff, /* dst_mask */
352 FALSE), /* pcrel_offset */
354 HOWTO (R_ARM_TLS_TPOFF32, /* type */
356 2, /* size (0 = byte, 1 = short, 2 = long) */
358 FALSE, /* pc_relative */
360 complain_overflow_bitfield,/* complain_on_overflow */
361 bfd_elf_generic_reloc, /* special_function */
362 "R_ARM_TLS_TPOFF32", /* name */
363 TRUE, /* partial_inplace */
364 0xffffffff, /* src_mask */
365 0xffffffff, /* dst_mask */
366 FALSE), /* pcrel_offset */
368 /* Relocs used in ARM Linux */
370 HOWTO (R_ARM_COPY, /* type */
372 2, /* size (0 = byte, 1 = short, 2 = long) */
374 FALSE, /* pc_relative */
376 complain_overflow_bitfield,/* complain_on_overflow */
377 bfd_elf_generic_reloc, /* special_function */
378 "R_ARM_COPY", /* name */
379 TRUE, /* partial_inplace */
380 0xffffffff, /* src_mask */
381 0xffffffff, /* dst_mask */
382 FALSE), /* pcrel_offset */
384 HOWTO (R_ARM_GLOB_DAT, /* type */
386 2, /* size (0 = byte, 1 = short, 2 = long) */
388 FALSE, /* pc_relative */
390 complain_overflow_bitfield,/* complain_on_overflow */
391 bfd_elf_generic_reloc, /* special_function */
392 "R_ARM_GLOB_DAT", /* name */
393 TRUE, /* partial_inplace */
394 0xffffffff, /* src_mask */
395 0xffffffff, /* dst_mask */
396 FALSE), /* pcrel_offset */
398 HOWTO (R_ARM_JUMP_SLOT, /* type */
400 2, /* size (0 = byte, 1 = short, 2 = long) */
402 FALSE, /* pc_relative */
404 complain_overflow_bitfield,/* complain_on_overflow */
405 bfd_elf_generic_reloc, /* special_function */
406 "R_ARM_JUMP_SLOT", /* name */
407 TRUE, /* partial_inplace */
408 0xffffffff, /* src_mask */
409 0xffffffff, /* dst_mask */
410 FALSE), /* pcrel_offset */
412 HOWTO (R_ARM_RELATIVE, /* type */
414 2, /* size (0 = byte, 1 = short, 2 = long) */
416 FALSE, /* pc_relative */
418 complain_overflow_bitfield,/* complain_on_overflow */
419 bfd_elf_generic_reloc, /* special_function */
420 "R_ARM_RELATIVE", /* name */
421 TRUE, /* partial_inplace */
422 0xffffffff, /* src_mask */
423 0xffffffff, /* dst_mask */
424 FALSE), /* pcrel_offset */
426 HOWTO (R_ARM_GOTOFF32, /* type */
428 2, /* size (0 = byte, 1 = short, 2 = long) */
430 FALSE, /* pc_relative */
432 complain_overflow_bitfield,/* complain_on_overflow */
433 bfd_elf_generic_reloc, /* special_function */
434 "R_ARM_GOTOFF32", /* name */
435 TRUE, /* partial_inplace */
436 0xffffffff, /* src_mask */
437 0xffffffff, /* dst_mask */
438 FALSE), /* pcrel_offset */
440 HOWTO (R_ARM_GOTPC, /* type */
442 2, /* size (0 = byte, 1 = short, 2 = long) */
444 TRUE, /* pc_relative */
446 complain_overflow_bitfield,/* complain_on_overflow */
447 bfd_elf_generic_reloc, /* special_function */
448 "R_ARM_GOTPC", /* name */
449 TRUE, /* partial_inplace */
450 0xffffffff, /* src_mask */
451 0xffffffff, /* dst_mask */
452 TRUE), /* pcrel_offset */
454 HOWTO (R_ARM_GOT32, /* type */
456 2, /* size (0 = byte, 1 = short, 2 = long) */
458 FALSE, /* pc_relative */
460 complain_overflow_bitfield,/* complain_on_overflow */
461 bfd_elf_generic_reloc, /* special_function */
462 "R_ARM_GOT32", /* name */
463 TRUE, /* partial_inplace */
464 0xffffffff, /* src_mask */
465 0xffffffff, /* dst_mask */
466 FALSE), /* pcrel_offset */
468 HOWTO (R_ARM_PLT32, /* type */
470 2, /* size (0 = byte, 1 = short, 2 = long) */
472 TRUE, /* pc_relative */
474 complain_overflow_bitfield,/* complain_on_overflow */
475 bfd_elf_generic_reloc, /* special_function */
476 "R_ARM_PLT32", /* name */
477 FALSE, /* partial_inplace */
478 0x00ffffff, /* src_mask */
479 0x00ffffff, /* dst_mask */
480 TRUE), /* pcrel_offset */
482 HOWTO (R_ARM_CALL, /* type */
484 2, /* size (0 = byte, 1 = short, 2 = long) */
486 TRUE, /* pc_relative */
488 complain_overflow_signed,/* complain_on_overflow */
489 bfd_elf_generic_reloc, /* special_function */
490 "R_ARM_CALL", /* name */
491 FALSE, /* partial_inplace */
492 0x00ffffff, /* src_mask */
493 0x00ffffff, /* dst_mask */
494 TRUE), /* pcrel_offset */
496 HOWTO (R_ARM_JUMP24, /* type */
498 2, /* size (0 = byte, 1 = short, 2 = long) */
500 TRUE, /* pc_relative */
502 complain_overflow_signed,/* complain_on_overflow */
503 bfd_elf_generic_reloc, /* special_function */
504 "R_ARM_JUMP24", /* name */
505 FALSE, /* partial_inplace */
506 0x00ffffff, /* src_mask */
507 0x00ffffff, /* dst_mask */
508 TRUE), /* pcrel_offset */
510 HOWTO (R_ARM_THM_JUMP24, /* type */
512 2, /* size (0 = byte, 1 = short, 2 = long) */
514 TRUE, /* pc_relative */
516 complain_overflow_signed,/* complain_on_overflow */
517 bfd_elf_generic_reloc, /* special_function */
518 "R_ARM_THM_JUMP24", /* name */
519 FALSE, /* partial_inplace */
520 0x07ff2fff, /* src_mask */
521 0x07ff2fff, /* dst_mask */
522 TRUE), /* pcrel_offset */
524 HOWTO (R_ARM_BASE_ABS, /* type */
526 2, /* size (0 = byte, 1 = short, 2 = long) */
528 FALSE, /* pc_relative */
530 complain_overflow_dont,/* complain_on_overflow */
531 bfd_elf_generic_reloc, /* special_function */
532 "R_ARM_BASE_ABS", /* name */
533 FALSE, /* partial_inplace */
534 0xffffffff, /* src_mask */
535 0xffffffff, /* dst_mask */
536 FALSE), /* pcrel_offset */
538 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
540 2, /* size (0 = byte, 1 = short, 2 = long) */
542 TRUE, /* pc_relative */
544 complain_overflow_dont,/* complain_on_overflow */
545 bfd_elf_generic_reloc, /* special_function */
546 "R_ARM_ALU_PCREL_7_0", /* name */
547 FALSE, /* partial_inplace */
548 0x00000fff, /* src_mask */
549 0x00000fff, /* dst_mask */
550 TRUE), /* pcrel_offset */
552 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
554 2, /* size (0 = byte, 1 = short, 2 = long) */
556 TRUE, /* pc_relative */
558 complain_overflow_dont,/* complain_on_overflow */
559 bfd_elf_generic_reloc, /* special_function */
560 "R_ARM_ALU_PCREL_15_8",/* name */
561 FALSE, /* partial_inplace */
562 0x00000fff, /* src_mask */
563 0x00000fff, /* dst_mask */
564 TRUE), /* pcrel_offset */
566 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
568 2, /* size (0 = byte, 1 = short, 2 = long) */
570 TRUE, /* pc_relative */
572 complain_overflow_dont,/* complain_on_overflow */
573 bfd_elf_generic_reloc, /* special_function */
574 "R_ARM_ALU_PCREL_23_15",/* name */
575 FALSE, /* partial_inplace */
576 0x00000fff, /* src_mask */
577 0x00000fff, /* dst_mask */
578 TRUE), /* pcrel_offset */
580 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
582 2, /* size (0 = byte, 1 = short, 2 = long) */
584 FALSE, /* pc_relative */
586 complain_overflow_dont,/* complain_on_overflow */
587 bfd_elf_generic_reloc, /* special_function */
588 "R_ARM_LDR_SBREL_11_0",/* name */
589 FALSE, /* partial_inplace */
590 0x00000fff, /* src_mask */
591 0x00000fff, /* dst_mask */
592 FALSE), /* pcrel_offset */
594 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
596 2, /* size (0 = byte, 1 = short, 2 = long) */
598 FALSE, /* pc_relative */
600 complain_overflow_dont,/* complain_on_overflow */
601 bfd_elf_generic_reloc, /* special_function */
602 "R_ARM_ALU_SBREL_19_12",/* name */
603 FALSE, /* partial_inplace */
604 0x000ff000, /* src_mask */
605 0x000ff000, /* dst_mask */
606 FALSE), /* pcrel_offset */
608 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
610 2, /* size (0 = byte, 1 = short, 2 = long) */
612 FALSE, /* pc_relative */
614 complain_overflow_dont,/* complain_on_overflow */
615 bfd_elf_generic_reloc, /* special_function */
616 "R_ARM_ALU_SBREL_27_20",/* name */
617 FALSE, /* partial_inplace */
618 0x0ff00000, /* src_mask */
619 0x0ff00000, /* dst_mask */
620 FALSE), /* pcrel_offset */
622 HOWTO (R_ARM_TARGET1, /* type */
624 2, /* size (0 = byte, 1 = short, 2 = long) */
626 FALSE, /* pc_relative */
628 complain_overflow_dont,/* complain_on_overflow */
629 bfd_elf_generic_reloc, /* special_function */
630 "R_ARM_TARGET1", /* name */
631 FALSE, /* partial_inplace */
632 0xffffffff, /* src_mask */
633 0xffffffff, /* dst_mask */
634 FALSE), /* pcrel_offset */
636 HOWTO (R_ARM_ROSEGREL32, /* type */
638 2, /* size (0 = byte, 1 = short, 2 = long) */
640 FALSE, /* pc_relative */
642 complain_overflow_dont,/* complain_on_overflow */
643 bfd_elf_generic_reloc, /* special_function */
644 "R_ARM_ROSEGREL32", /* name */
645 FALSE, /* partial_inplace */
646 0xffffffff, /* src_mask */
647 0xffffffff, /* dst_mask */
648 FALSE), /* pcrel_offset */
650 HOWTO (R_ARM_V4BX, /* type */
652 2, /* size (0 = byte, 1 = short, 2 = long) */
654 FALSE, /* pc_relative */
656 complain_overflow_dont,/* complain_on_overflow */
657 bfd_elf_generic_reloc, /* special_function */
658 "R_ARM_V4BX", /* name */
659 FALSE, /* partial_inplace */
660 0xffffffff, /* src_mask */
661 0xffffffff, /* dst_mask */
662 FALSE), /* pcrel_offset */
664 HOWTO (R_ARM_TARGET2, /* type */
666 2, /* size (0 = byte, 1 = short, 2 = long) */
668 FALSE, /* pc_relative */
670 complain_overflow_signed,/* complain_on_overflow */
671 bfd_elf_generic_reloc, /* special_function */
672 "R_ARM_TARGET2", /* name */
673 FALSE, /* partial_inplace */
674 0xffffffff, /* src_mask */
675 0xffffffff, /* dst_mask */
676 TRUE), /* pcrel_offset */
678 HOWTO (R_ARM_PREL31, /* type */
680 2, /* size (0 = byte, 1 = short, 2 = long) */
682 TRUE, /* pc_relative */
684 complain_overflow_signed,/* complain_on_overflow */
685 bfd_elf_generic_reloc, /* special_function */
686 "R_ARM_PREL31", /* name */
687 FALSE, /* partial_inplace */
688 0x7fffffff, /* src_mask */
689 0x7fffffff, /* dst_mask */
690 TRUE), /* pcrel_offset */
692 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
694 2, /* size (0 = byte, 1 = short, 2 = long) */
696 FALSE, /* pc_relative */
698 complain_overflow_dont,/* complain_on_overflow */
699 bfd_elf_generic_reloc, /* special_function */
700 "R_ARM_MOVW_ABS_NC", /* name */
701 FALSE, /* partial_inplace */
702 0x000f0fff, /* src_mask */
703 0x000f0fff, /* dst_mask */
704 FALSE), /* pcrel_offset */
706 HOWTO (R_ARM_MOVT_ABS, /* type */
708 2, /* size (0 = byte, 1 = short, 2 = long) */
710 FALSE, /* pc_relative */
712 complain_overflow_bitfield,/* complain_on_overflow */
713 bfd_elf_generic_reloc, /* special_function */
714 "R_ARM_MOVT_ABS", /* name */
715 FALSE, /* partial_inplace */
716 0x000f0fff, /* src_mask */
717 0x000f0fff, /* dst_mask */
718 FALSE), /* pcrel_offset */
720 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
722 2, /* size (0 = byte, 1 = short, 2 = long) */
724 TRUE, /* pc_relative */
726 complain_overflow_dont,/* complain_on_overflow */
727 bfd_elf_generic_reloc, /* special_function */
728 "R_ARM_MOVW_PREL_NC", /* name */
729 FALSE, /* partial_inplace */
730 0x000f0fff, /* src_mask */
731 0x000f0fff, /* dst_mask */
732 TRUE), /* pcrel_offset */
734 HOWTO (R_ARM_MOVT_PREL, /* type */
736 2, /* size (0 = byte, 1 = short, 2 = long) */
738 TRUE, /* pc_relative */
740 complain_overflow_bitfield,/* complain_on_overflow */
741 bfd_elf_generic_reloc, /* special_function */
742 "R_ARM_MOVT_PREL", /* name */
743 FALSE, /* partial_inplace */
744 0x000f0fff, /* src_mask */
745 0x000f0fff, /* dst_mask */
746 TRUE), /* pcrel_offset */
748 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
750 2, /* size (0 = byte, 1 = short, 2 = long) */
752 FALSE, /* pc_relative */
754 complain_overflow_dont,/* complain_on_overflow */
755 bfd_elf_generic_reloc, /* special_function */
756 "R_ARM_THM_MOVW_ABS_NC",/* name */
757 FALSE, /* partial_inplace */
758 0x040f70ff, /* src_mask */
759 0x040f70ff, /* dst_mask */
760 FALSE), /* pcrel_offset */
762 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
764 2, /* size (0 = byte, 1 = short, 2 = long) */
766 FALSE, /* pc_relative */
768 complain_overflow_bitfield,/* complain_on_overflow */
769 bfd_elf_generic_reloc, /* special_function */
770 "R_ARM_THM_MOVT_ABS", /* name */
771 FALSE, /* partial_inplace */
772 0x040f70ff, /* src_mask */
773 0x040f70ff, /* dst_mask */
774 FALSE), /* pcrel_offset */
776 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
778 2, /* size (0 = byte, 1 = short, 2 = long) */
780 TRUE, /* pc_relative */
782 complain_overflow_dont,/* complain_on_overflow */
783 bfd_elf_generic_reloc, /* special_function */
784 "R_ARM_THM_MOVW_PREL_NC",/* name */
785 FALSE, /* partial_inplace */
786 0x040f70ff, /* src_mask */
787 0x040f70ff, /* dst_mask */
788 TRUE), /* pcrel_offset */
790 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
792 2, /* size (0 = byte, 1 = short, 2 = long) */
794 TRUE, /* pc_relative */
796 complain_overflow_bitfield,/* complain_on_overflow */
797 bfd_elf_generic_reloc, /* special_function */
798 "R_ARM_THM_MOVT_PREL", /* name */
799 FALSE, /* partial_inplace */
800 0x040f70ff, /* src_mask */
801 0x040f70ff, /* dst_mask */
802 TRUE), /* pcrel_offset */
804 HOWTO (R_ARM_THM_JUMP19, /* type */
806 2, /* size (0 = byte, 1 = short, 2 = long) */
808 TRUE, /* pc_relative */
810 complain_overflow_signed,/* complain_on_overflow */
811 bfd_elf_generic_reloc, /* special_function */
812 "R_ARM_THM_JUMP19", /* name */
813 FALSE, /* partial_inplace */
814 0x043f2fff, /* src_mask */
815 0x043f2fff, /* dst_mask */
816 TRUE), /* pcrel_offset */
818 HOWTO (R_ARM_THM_JUMP6, /* type */
820 1, /* size (0 = byte, 1 = short, 2 = long) */
822 TRUE, /* pc_relative */
824 complain_overflow_unsigned,/* complain_on_overflow */
825 bfd_elf_generic_reloc, /* special_function */
826 "R_ARM_THM_JUMP6", /* name */
827 FALSE, /* partial_inplace */
828 0x02f8, /* src_mask */
829 0x02f8, /* dst_mask */
830 TRUE), /* pcrel_offset */
832 /* These are declared as 13-bit signed relocations because we can
833 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
835 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
837 2, /* size (0 = byte, 1 = short, 2 = long) */
839 TRUE, /* pc_relative */
841 complain_overflow_dont,/* complain_on_overflow */
842 bfd_elf_generic_reloc, /* special_function */
843 "R_ARM_THM_ALU_PREL_11_0",/* name */
844 FALSE, /* partial_inplace */
845 0xffffffff, /* src_mask */
846 0xffffffff, /* dst_mask */
847 TRUE), /* pcrel_offset */
849 HOWTO (R_ARM_THM_PC12, /* type */
851 2, /* size (0 = byte, 1 = short, 2 = long) */
853 TRUE, /* pc_relative */
855 complain_overflow_dont,/* complain_on_overflow */
856 bfd_elf_generic_reloc, /* special_function */
857 "R_ARM_THM_PC12", /* name */
858 FALSE, /* partial_inplace */
859 0xffffffff, /* src_mask */
860 0xffffffff, /* dst_mask */
861 TRUE), /* pcrel_offset */
863 HOWTO (R_ARM_ABS32_NOI, /* type */
865 2, /* size (0 = byte, 1 = short, 2 = long) */
867 FALSE, /* pc_relative */
869 complain_overflow_dont,/* complain_on_overflow */
870 bfd_elf_generic_reloc, /* special_function */
871 "R_ARM_ABS32_NOI", /* name */
872 FALSE, /* partial_inplace */
873 0xffffffff, /* src_mask */
874 0xffffffff, /* dst_mask */
875 FALSE), /* pcrel_offset */
877 HOWTO (R_ARM_REL32_NOI, /* type */
879 2, /* size (0 = byte, 1 = short, 2 = long) */
881 TRUE, /* pc_relative */
883 complain_overflow_dont,/* complain_on_overflow */
884 bfd_elf_generic_reloc, /* special_function */
885 "R_ARM_REL32_NOI", /* name */
886 FALSE, /* partial_inplace */
887 0xffffffff, /* src_mask */
888 0xffffffff, /* dst_mask */
889 FALSE), /* pcrel_offset */
891 /* Group relocations. */
893 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
895 2, /* size (0 = byte, 1 = short, 2 = long) */
897 TRUE, /* pc_relative */
899 complain_overflow_dont,/* complain_on_overflow */
900 bfd_elf_generic_reloc, /* special_function */
901 "R_ARM_ALU_PC_G0_NC", /* name */
902 FALSE, /* partial_inplace */
903 0xffffffff, /* src_mask */
904 0xffffffff, /* dst_mask */
905 TRUE), /* pcrel_offset */
907 HOWTO (R_ARM_ALU_PC_G0, /* type */
909 2, /* size (0 = byte, 1 = short, 2 = long) */
911 TRUE, /* pc_relative */
913 complain_overflow_dont,/* complain_on_overflow */
914 bfd_elf_generic_reloc, /* special_function */
915 "R_ARM_ALU_PC_G0", /* name */
916 FALSE, /* partial_inplace */
917 0xffffffff, /* src_mask */
918 0xffffffff, /* dst_mask */
919 TRUE), /* pcrel_offset */
921 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
923 2, /* size (0 = byte, 1 = short, 2 = long) */
925 TRUE, /* pc_relative */
927 complain_overflow_dont,/* complain_on_overflow */
928 bfd_elf_generic_reloc, /* special_function */
929 "R_ARM_ALU_PC_G1_NC", /* name */
930 FALSE, /* partial_inplace */
931 0xffffffff, /* src_mask */
932 0xffffffff, /* dst_mask */
933 TRUE), /* pcrel_offset */
935 HOWTO (R_ARM_ALU_PC_G1, /* type */
937 2, /* size (0 = byte, 1 = short, 2 = long) */
939 TRUE, /* pc_relative */
941 complain_overflow_dont,/* complain_on_overflow */
942 bfd_elf_generic_reloc, /* special_function */
943 "R_ARM_ALU_PC_G1", /* name */
944 FALSE, /* partial_inplace */
945 0xffffffff, /* src_mask */
946 0xffffffff, /* dst_mask */
947 TRUE), /* pcrel_offset */
949 HOWTO (R_ARM_ALU_PC_G2, /* type */
951 2, /* size (0 = byte, 1 = short, 2 = long) */
953 TRUE, /* pc_relative */
955 complain_overflow_dont,/* complain_on_overflow */
956 bfd_elf_generic_reloc, /* special_function */
957 "R_ARM_ALU_PC_G2", /* name */
958 FALSE, /* partial_inplace */
959 0xffffffff, /* src_mask */
960 0xffffffff, /* dst_mask */
961 TRUE), /* pcrel_offset */
963 HOWTO (R_ARM_LDR_PC_G1, /* type */
965 2, /* size (0 = byte, 1 = short, 2 = long) */
967 TRUE, /* pc_relative */
969 complain_overflow_dont,/* complain_on_overflow */
970 bfd_elf_generic_reloc, /* special_function */
971 "R_ARM_LDR_PC_G1", /* name */
972 FALSE, /* partial_inplace */
973 0xffffffff, /* src_mask */
974 0xffffffff, /* dst_mask */
975 TRUE), /* pcrel_offset */
977 HOWTO (R_ARM_LDR_PC_G2, /* type */
979 2, /* size (0 = byte, 1 = short, 2 = long) */
981 TRUE, /* pc_relative */
983 complain_overflow_dont,/* complain_on_overflow */
984 bfd_elf_generic_reloc, /* special_function */
985 "R_ARM_LDR_PC_G2", /* name */
986 FALSE, /* partial_inplace */
987 0xffffffff, /* src_mask */
988 0xffffffff, /* dst_mask */
989 TRUE), /* pcrel_offset */
991 HOWTO (R_ARM_LDRS_PC_G0, /* type */
993 2, /* size (0 = byte, 1 = short, 2 = long) */
995 TRUE, /* pc_relative */
997 complain_overflow_dont,/* complain_on_overflow */
998 bfd_elf_generic_reloc, /* special_function */
999 "R_ARM_LDRS_PC_G0", /* name */
1000 FALSE, /* partial_inplace */
1001 0xffffffff, /* src_mask */
1002 0xffffffff, /* dst_mask */
1003 TRUE), /* pcrel_offset */
1005 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1007 2, /* size (0 = byte, 1 = short, 2 = long) */
1009 TRUE, /* pc_relative */
1011 complain_overflow_dont,/* complain_on_overflow */
1012 bfd_elf_generic_reloc, /* special_function */
1013 "R_ARM_LDRS_PC_G1", /* name */
1014 FALSE, /* partial_inplace */
1015 0xffffffff, /* src_mask */
1016 0xffffffff, /* dst_mask */
1017 TRUE), /* pcrel_offset */
1019 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1021 2, /* size (0 = byte, 1 = short, 2 = long) */
1023 TRUE, /* pc_relative */
1025 complain_overflow_dont,/* complain_on_overflow */
1026 bfd_elf_generic_reloc, /* special_function */
1027 "R_ARM_LDRS_PC_G2", /* name */
1028 FALSE, /* partial_inplace */
1029 0xffffffff, /* src_mask */
1030 0xffffffff, /* dst_mask */
1031 TRUE), /* pcrel_offset */
1033 HOWTO (R_ARM_LDC_PC_G0, /* type */
1035 2, /* size (0 = byte, 1 = short, 2 = long) */
1037 TRUE, /* pc_relative */
1039 complain_overflow_dont,/* complain_on_overflow */
1040 bfd_elf_generic_reloc, /* special_function */
1041 "R_ARM_LDC_PC_G0", /* name */
1042 FALSE, /* partial_inplace */
1043 0xffffffff, /* src_mask */
1044 0xffffffff, /* dst_mask */
1045 TRUE), /* pcrel_offset */
1047 HOWTO (R_ARM_LDC_PC_G1, /* type */
1049 2, /* size (0 = byte, 1 = short, 2 = long) */
1051 TRUE, /* pc_relative */
1053 complain_overflow_dont,/* complain_on_overflow */
1054 bfd_elf_generic_reloc, /* special_function */
1055 "R_ARM_LDC_PC_G1", /* name */
1056 FALSE, /* partial_inplace */
1057 0xffffffff, /* src_mask */
1058 0xffffffff, /* dst_mask */
1059 TRUE), /* pcrel_offset */
1061 HOWTO (R_ARM_LDC_PC_G2, /* type */
1063 2, /* size (0 = byte, 1 = short, 2 = long) */
1065 TRUE, /* pc_relative */
1067 complain_overflow_dont,/* complain_on_overflow */
1068 bfd_elf_generic_reloc, /* special_function */
1069 "R_ARM_LDC_PC_G2", /* name */
1070 FALSE, /* partial_inplace */
1071 0xffffffff, /* src_mask */
1072 0xffffffff, /* dst_mask */
1073 TRUE), /* pcrel_offset */
1075 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1077 2, /* size (0 = byte, 1 = short, 2 = long) */
1079 TRUE, /* pc_relative */
1081 complain_overflow_dont,/* complain_on_overflow */
1082 bfd_elf_generic_reloc, /* special_function */
1083 "R_ARM_ALU_SB_G0_NC", /* name */
1084 FALSE, /* partial_inplace */
1085 0xffffffff, /* src_mask */
1086 0xffffffff, /* dst_mask */
1087 TRUE), /* pcrel_offset */
1089 HOWTO (R_ARM_ALU_SB_G0, /* type */
1091 2, /* size (0 = byte, 1 = short, 2 = long) */
1093 TRUE, /* pc_relative */
1095 complain_overflow_dont,/* complain_on_overflow */
1096 bfd_elf_generic_reloc, /* special_function */
1097 "R_ARM_ALU_SB_G0", /* name */
1098 FALSE, /* partial_inplace */
1099 0xffffffff, /* src_mask */
1100 0xffffffff, /* dst_mask */
1101 TRUE), /* pcrel_offset */
1103 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1105 2, /* size (0 = byte, 1 = short, 2 = long) */
1107 TRUE, /* pc_relative */
1109 complain_overflow_dont,/* complain_on_overflow */
1110 bfd_elf_generic_reloc, /* special_function */
1111 "R_ARM_ALU_SB_G1_NC", /* name */
1112 FALSE, /* partial_inplace */
1113 0xffffffff, /* src_mask */
1114 0xffffffff, /* dst_mask */
1115 TRUE), /* pcrel_offset */
1117 HOWTO (R_ARM_ALU_SB_G1, /* type */
1119 2, /* size (0 = byte, 1 = short, 2 = long) */
1121 TRUE, /* pc_relative */
1123 complain_overflow_dont,/* complain_on_overflow */
1124 bfd_elf_generic_reloc, /* special_function */
1125 "R_ARM_ALU_SB_G1", /* name */
1126 FALSE, /* partial_inplace */
1127 0xffffffff, /* src_mask */
1128 0xffffffff, /* dst_mask */
1129 TRUE), /* pcrel_offset */
1131 HOWTO (R_ARM_ALU_SB_G2, /* type */
1133 2, /* size (0 = byte, 1 = short, 2 = long) */
1135 TRUE, /* pc_relative */
1137 complain_overflow_dont,/* complain_on_overflow */
1138 bfd_elf_generic_reloc, /* special_function */
1139 "R_ARM_ALU_SB_G2", /* name */
1140 FALSE, /* partial_inplace */
1141 0xffffffff, /* src_mask */
1142 0xffffffff, /* dst_mask */
1143 TRUE), /* pcrel_offset */
1145 HOWTO (R_ARM_LDR_SB_G0, /* type */
1147 2, /* size (0 = byte, 1 = short, 2 = long) */
1149 TRUE, /* pc_relative */
1151 complain_overflow_dont,/* complain_on_overflow */
1152 bfd_elf_generic_reloc, /* special_function */
1153 "R_ARM_LDR_SB_G0", /* name */
1154 FALSE, /* partial_inplace */
1155 0xffffffff, /* src_mask */
1156 0xffffffff, /* dst_mask */
1157 TRUE), /* pcrel_offset */
1159 HOWTO (R_ARM_LDR_SB_G1, /* type */
1161 2, /* size (0 = byte, 1 = short, 2 = long) */
1163 TRUE, /* pc_relative */
1165 complain_overflow_dont,/* complain_on_overflow */
1166 bfd_elf_generic_reloc, /* special_function */
1167 "R_ARM_LDR_SB_G1", /* name */
1168 FALSE, /* partial_inplace */
1169 0xffffffff, /* src_mask */
1170 0xffffffff, /* dst_mask */
1171 TRUE), /* pcrel_offset */
1173 HOWTO (R_ARM_LDR_SB_G2, /* type */
1175 2, /* size (0 = byte, 1 = short, 2 = long) */
1177 TRUE, /* pc_relative */
1179 complain_overflow_dont,/* complain_on_overflow */
1180 bfd_elf_generic_reloc, /* special_function */
1181 "R_ARM_LDR_SB_G2", /* name */
1182 FALSE, /* partial_inplace */
1183 0xffffffff, /* src_mask */
1184 0xffffffff, /* dst_mask */
1185 TRUE), /* pcrel_offset */
1187 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1189 2, /* size (0 = byte, 1 = short, 2 = long) */
1191 TRUE, /* pc_relative */
1193 complain_overflow_dont,/* complain_on_overflow */
1194 bfd_elf_generic_reloc, /* special_function */
1195 "R_ARM_LDRS_SB_G0", /* name */
1196 FALSE, /* partial_inplace */
1197 0xffffffff, /* src_mask */
1198 0xffffffff, /* dst_mask */
1199 TRUE), /* pcrel_offset */
1201 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1203 2, /* size (0 = byte, 1 = short, 2 = long) */
1205 TRUE, /* pc_relative */
1207 complain_overflow_dont,/* complain_on_overflow */
1208 bfd_elf_generic_reloc, /* special_function */
1209 "R_ARM_LDRS_SB_G1", /* name */
1210 FALSE, /* partial_inplace */
1211 0xffffffff, /* src_mask */
1212 0xffffffff, /* dst_mask */
1213 TRUE), /* pcrel_offset */
1215 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1217 2, /* size (0 = byte, 1 = short, 2 = long) */
1219 TRUE, /* pc_relative */
1221 complain_overflow_dont,/* complain_on_overflow */
1222 bfd_elf_generic_reloc, /* special_function */
1223 "R_ARM_LDRS_SB_G2", /* name */
1224 FALSE, /* partial_inplace */
1225 0xffffffff, /* src_mask */
1226 0xffffffff, /* dst_mask */
1227 TRUE), /* pcrel_offset */
1229 HOWTO (R_ARM_LDC_SB_G0, /* type */
1231 2, /* size (0 = byte, 1 = short, 2 = long) */
1233 TRUE, /* pc_relative */
1235 complain_overflow_dont,/* complain_on_overflow */
1236 bfd_elf_generic_reloc, /* special_function */
1237 "R_ARM_LDC_SB_G0", /* name */
1238 FALSE, /* partial_inplace */
1239 0xffffffff, /* src_mask */
1240 0xffffffff, /* dst_mask */
1241 TRUE), /* pcrel_offset */
1243 HOWTO (R_ARM_LDC_SB_G1, /* type */
1245 2, /* size (0 = byte, 1 = short, 2 = long) */
1247 TRUE, /* pc_relative */
1249 complain_overflow_dont,/* complain_on_overflow */
1250 bfd_elf_generic_reloc, /* special_function */
1251 "R_ARM_LDC_SB_G1", /* name */
1252 FALSE, /* partial_inplace */
1253 0xffffffff, /* src_mask */
1254 0xffffffff, /* dst_mask */
1255 TRUE), /* pcrel_offset */
1257 HOWTO (R_ARM_LDC_SB_G2, /* type */
1259 2, /* size (0 = byte, 1 = short, 2 = long) */
1261 TRUE, /* pc_relative */
1263 complain_overflow_dont,/* complain_on_overflow */
1264 bfd_elf_generic_reloc, /* special_function */
1265 "R_ARM_LDC_SB_G2", /* name */
1266 FALSE, /* partial_inplace */
1267 0xffffffff, /* src_mask */
1268 0xffffffff, /* dst_mask */
1269 TRUE), /* pcrel_offset */
1271 /* End of group relocations. */
1273 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1275 2, /* size (0 = byte, 1 = short, 2 = long) */
1277 FALSE, /* pc_relative */
1279 complain_overflow_dont,/* complain_on_overflow */
1280 bfd_elf_generic_reloc, /* special_function */
1281 "R_ARM_MOVW_BREL_NC", /* name */
1282 FALSE, /* partial_inplace */
1283 0x0000ffff, /* src_mask */
1284 0x0000ffff, /* dst_mask */
1285 FALSE), /* pcrel_offset */
1287 HOWTO (R_ARM_MOVT_BREL, /* type */
1289 2, /* size (0 = byte, 1 = short, 2 = long) */
1291 FALSE, /* pc_relative */
1293 complain_overflow_bitfield,/* complain_on_overflow */
1294 bfd_elf_generic_reloc, /* special_function */
1295 "R_ARM_MOVT_BREL", /* name */
1296 FALSE, /* partial_inplace */
1297 0x0000ffff, /* src_mask */
1298 0x0000ffff, /* dst_mask */
1299 FALSE), /* pcrel_offset */
1301 HOWTO (R_ARM_MOVW_BREL, /* type */
1303 2, /* size (0 = byte, 1 = short, 2 = long) */
1305 FALSE, /* pc_relative */
1307 complain_overflow_dont,/* complain_on_overflow */
1308 bfd_elf_generic_reloc, /* special_function */
1309 "R_ARM_MOVW_BREL", /* name */
1310 FALSE, /* partial_inplace */
1311 0x0000ffff, /* src_mask */
1312 0x0000ffff, /* dst_mask */
1313 FALSE), /* pcrel_offset */
1315 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1317 2, /* size (0 = byte, 1 = short, 2 = long) */
1319 FALSE, /* pc_relative */
1321 complain_overflow_dont,/* complain_on_overflow */
1322 bfd_elf_generic_reloc, /* special_function */
1323 "R_ARM_THM_MOVW_BREL_NC",/* name */
1324 FALSE, /* partial_inplace */
1325 0x040f70ff, /* src_mask */
1326 0x040f70ff, /* dst_mask */
1327 FALSE), /* pcrel_offset */
1329 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1331 2, /* size (0 = byte, 1 = short, 2 = long) */
1333 FALSE, /* pc_relative */
1335 complain_overflow_bitfield,/* complain_on_overflow */
1336 bfd_elf_generic_reloc, /* special_function */
1337 "R_ARM_THM_MOVT_BREL", /* name */
1338 FALSE, /* partial_inplace */
1339 0x040f70ff, /* src_mask */
1340 0x040f70ff, /* dst_mask */
1341 FALSE), /* pcrel_offset */
1343 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1345 2, /* size (0 = byte, 1 = short, 2 = long) */
1347 FALSE, /* pc_relative */
1349 complain_overflow_dont,/* complain_on_overflow */
1350 bfd_elf_generic_reloc, /* special_function */
1351 "R_ARM_THM_MOVW_BREL", /* name */
1352 FALSE, /* partial_inplace */
1353 0x040f70ff, /* src_mask */
1354 0x040f70ff, /* dst_mask */
1355 FALSE), /* pcrel_offset */
1357 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1359 2, /* size (0 = byte, 1 = short, 2 = long) */
1361 FALSE, /* pc_relative */
1363 complain_overflow_bitfield,/* complain_on_overflow */
1364 NULL, /* special_function */
1365 "R_ARM_TLS_GOTDESC", /* name */
1366 TRUE, /* partial_inplace */
1367 0xffffffff, /* src_mask */
1368 0xffffffff, /* dst_mask */
1369 FALSE), /* pcrel_offset */
1371 HOWTO (R_ARM_TLS_CALL, /* type */
1373 2, /* size (0 = byte, 1 = short, 2 = long) */
1375 FALSE, /* pc_relative */
1377 complain_overflow_dont,/* complain_on_overflow */
1378 bfd_elf_generic_reloc, /* special_function */
1379 "R_ARM_TLS_CALL", /* name */
1380 FALSE, /* partial_inplace */
1381 0x00ffffff, /* src_mask */
1382 0x00ffffff, /* dst_mask */
1383 FALSE), /* pcrel_offset */
1385 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1387 2, /* size (0 = byte, 1 = short, 2 = long) */
1389 FALSE, /* pc_relative */
1391 complain_overflow_bitfield,/* complain_on_overflow */
1392 bfd_elf_generic_reloc, /* special_function */
1393 "R_ARM_TLS_DESCSEQ", /* name */
1394 FALSE, /* partial_inplace */
1395 0x00000000, /* src_mask */
1396 0x00000000, /* dst_mask */
1397 FALSE), /* pcrel_offset */
1399 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1401 2, /* size (0 = byte, 1 = short, 2 = long) */
1403 FALSE, /* pc_relative */
1405 complain_overflow_dont,/* complain_on_overflow */
1406 bfd_elf_generic_reloc, /* special_function */
1407 "R_ARM_THM_TLS_CALL", /* name */
1408 FALSE, /* partial_inplace */
1409 0x07ff07ff, /* src_mask */
1410 0x07ff07ff, /* dst_mask */
1411 FALSE), /* pcrel_offset */
1413 HOWTO (R_ARM_PLT32_ABS, /* type */
1415 2, /* size (0 = byte, 1 = short, 2 = long) */
1417 FALSE, /* pc_relative */
1419 complain_overflow_dont,/* complain_on_overflow */
1420 bfd_elf_generic_reloc, /* special_function */
1421 "R_ARM_PLT32_ABS", /* name */
1422 FALSE, /* partial_inplace */
1423 0xffffffff, /* src_mask */
1424 0xffffffff, /* dst_mask */
1425 FALSE), /* pcrel_offset */
1427 HOWTO (R_ARM_GOT_ABS, /* type */
1429 2, /* size (0 = byte, 1 = short, 2 = long) */
1431 FALSE, /* pc_relative */
1433 complain_overflow_dont,/* complain_on_overflow */
1434 bfd_elf_generic_reloc, /* special_function */
1435 "R_ARM_GOT_ABS", /* name */
1436 FALSE, /* partial_inplace */
1437 0xffffffff, /* src_mask */
1438 0xffffffff, /* dst_mask */
1439 FALSE), /* pcrel_offset */
1441 HOWTO (R_ARM_GOT_PREL, /* type */
1443 2, /* size (0 = byte, 1 = short, 2 = long) */
1445 TRUE, /* pc_relative */
1447 complain_overflow_dont, /* complain_on_overflow */
1448 bfd_elf_generic_reloc, /* special_function */
1449 "R_ARM_GOT_PREL", /* name */
1450 FALSE, /* partial_inplace */
1451 0xffffffff, /* src_mask */
1452 0xffffffff, /* dst_mask */
1453 TRUE), /* pcrel_offset */
1455 HOWTO (R_ARM_GOT_BREL12, /* type */
1457 2, /* size (0 = byte, 1 = short, 2 = long) */
1459 FALSE, /* pc_relative */
1461 complain_overflow_bitfield,/* complain_on_overflow */
1462 bfd_elf_generic_reloc, /* special_function */
1463 "R_ARM_GOT_BREL12", /* name */
1464 FALSE, /* partial_inplace */
1465 0x00000fff, /* src_mask */
1466 0x00000fff, /* dst_mask */
1467 FALSE), /* pcrel_offset */
1469 HOWTO (R_ARM_GOTOFF12, /* type */
1471 2, /* size (0 = byte, 1 = short, 2 = long) */
1473 FALSE, /* pc_relative */
1475 complain_overflow_bitfield,/* complain_on_overflow */
1476 bfd_elf_generic_reloc, /* special_function */
1477 "R_ARM_GOTOFF12", /* name */
1478 FALSE, /* partial_inplace */
1479 0x00000fff, /* src_mask */
1480 0x00000fff, /* dst_mask */
1481 FALSE), /* pcrel_offset */
1483 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1485 /* GNU extension to record C++ vtable member usage */
1486 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1488 2, /* size (0 = byte, 1 = short, 2 = long) */
1490 FALSE, /* pc_relative */
1492 complain_overflow_dont, /* complain_on_overflow */
1493 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1494 "R_ARM_GNU_VTENTRY", /* name */
1495 FALSE, /* partial_inplace */
1498 FALSE), /* pcrel_offset */
1500 /* GNU extension to record C++ vtable hierarchy */
1501 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1503 2, /* size (0 = byte, 1 = short, 2 = long) */
1505 FALSE, /* pc_relative */
1507 complain_overflow_dont, /* complain_on_overflow */
1508 NULL, /* special_function */
1509 "R_ARM_GNU_VTINHERIT", /* name */
1510 FALSE, /* partial_inplace */
1513 FALSE), /* pcrel_offset */
1515 HOWTO (R_ARM_THM_JUMP11, /* type */
1517 1, /* size (0 = byte, 1 = short, 2 = long) */
1519 TRUE, /* pc_relative */
1521 complain_overflow_signed, /* complain_on_overflow */
1522 bfd_elf_generic_reloc, /* special_function */
1523 "R_ARM_THM_JUMP11", /* name */
1524 FALSE, /* partial_inplace */
1525 0x000007ff, /* src_mask */
1526 0x000007ff, /* dst_mask */
1527 TRUE), /* pcrel_offset */
1529 HOWTO (R_ARM_THM_JUMP8, /* type */
1531 1, /* size (0 = byte, 1 = short, 2 = long) */
1533 TRUE, /* pc_relative */
1535 complain_overflow_signed, /* complain_on_overflow */
1536 bfd_elf_generic_reloc, /* special_function */
1537 "R_ARM_THM_JUMP8", /* name */
1538 FALSE, /* partial_inplace */
1539 0x000000ff, /* src_mask */
1540 0x000000ff, /* dst_mask */
1541 TRUE), /* pcrel_offset */
1543 /* TLS relocations */
1544 HOWTO (R_ARM_TLS_GD32, /* type */
1546 2, /* size (0 = byte, 1 = short, 2 = long) */
1548 FALSE, /* pc_relative */
1550 complain_overflow_bitfield,/* complain_on_overflow */
1551 NULL, /* special_function */
1552 "R_ARM_TLS_GD32", /* name */
1553 TRUE, /* partial_inplace */
1554 0xffffffff, /* src_mask */
1555 0xffffffff, /* dst_mask */
1556 FALSE), /* pcrel_offset */
1558 HOWTO (R_ARM_TLS_LDM32, /* type */
1560 2, /* size (0 = byte, 1 = short, 2 = long) */
1562 FALSE, /* pc_relative */
1564 complain_overflow_bitfield,/* complain_on_overflow */
1565 bfd_elf_generic_reloc, /* special_function */
1566 "R_ARM_TLS_LDM32", /* name */
1567 TRUE, /* partial_inplace */
1568 0xffffffff, /* src_mask */
1569 0xffffffff, /* dst_mask */
1570 FALSE), /* pcrel_offset */
1572 HOWTO (R_ARM_TLS_LDO32, /* type */
1574 2, /* size (0 = byte, 1 = short, 2 = long) */
1576 FALSE, /* pc_relative */
1578 complain_overflow_bitfield,/* complain_on_overflow */
1579 bfd_elf_generic_reloc, /* special_function */
1580 "R_ARM_TLS_LDO32", /* name */
1581 TRUE, /* partial_inplace */
1582 0xffffffff, /* src_mask */
1583 0xffffffff, /* dst_mask */
1584 FALSE), /* pcrel_offset */
1586 HOWTO (R_ARM_TLS_IE32, /* type */
1588 2, /* size (0 = byte, 1 = short, 2 = long) */
1590 FALSE, /* pc_relative */
1592 complain_overflow_bitfield,/* complain_on_overflow */
1593 NULL, /* special_function */
1594 "R_ARM_TLS_IE32", /* name */
1595 TRUE, /* partial_inplace */
1596 0xffffffff, /* src_mask */
1597 0xffffffff, /* dst_mask */
1598 FALSE), /* pcrel_offset */
1600 HOWTO (R_ARM_TLS_LE32, /* type */
1602 2, /* size (0 = byte, 1 = short, 2 = long) */
1604 FALSE, /* pc_relative */
1606 complain_overflow_bitfield,/* complain_on_overflow */
1607 bfd_elf_generic_reloc, /* special_function */
1608 "R_ARM_TLS_LE32", /* name */
1609 TRUE, /* partial_inplace */
1610 0xffffffff, /* src_mask */
1611 0xffffffff, /* dst_mask */
1612 FALSE), /* pcrel_offset */
1614 HOWTO (R_ARM_TLS_LDO12, /* type */
1616 2, /* size (0 = byte, 1 = short, 2 = long) */
1618 FALSE, /* pc_relative */
1620 complain_overflow_bitfield,/* complain_on_overflow */
1621 bfd_elf_generic_reloc, /* special_function */
1622 "R_ARM_TLS_LDO12", /* name */
1623 FALSE, /* partial_inplace */
1624 0x00000fff, /* src_mask */
1625 0x00000fff, /* dst_mask */
1626 FALSE), /* pcrel_offset */
1628 HOWTO (R_ARM_TLS_LE12, /* type */
1630 2, /* size (0 = byte, 1 = short, 2 = long) */
1632 FALSE, /* pc_relative */
1634 complain_overflow_bitfield,/* complain_on_overflow */
1635 bfd_elf_generic_reloc, /* special_function */
1636 "R_ARM_TLS_LE12", /* name */
1637 FALSE, /* partial_inplace */
1638 0x00000fff, /* src_mask */
1639 0x00000fff, /* dst_mask */
1640 FALSE), /* pcrel_offset */
1642 HOWTO (R_ARM_TLS_IE12GP, /* type */
1644 2, /* size (0 = byte, 1 = short, 2 = long) */
1646 FALSE, /* pc_relative */
1648 complain_overflow_bitfield,/* complain_on_overflow */
1649 bfd_elf_generic_reloc, /* special_function */
1650 "R_ARM_TLS_IE12GP", /* name */
1651 FALSE, /* partial_inplace */
1652 0x00000fff, /* src_mask */
1653 0x00000fff, /* dst_mask */
1654 FALSE), /* pcrel_offset */
1656 /* 112-127 private relocations. */
1674 /* R_ARM_ME_TOO, obsolete. */
1677 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1679 1, /* size (0 = byte, 1 = short, 2 = long) */
1681 FALSE, /* pc_relative */
1683 complain_overflow_bitfield,/* complain_on_overflow */
1684 bfd_elf_generic_reloc, /* special_function */
1685 "R_ARM_THM_TLS_DESCSEQ",/* name */
1686 FALSE, /* partial_inplace */
1687 0x00000000, /* src_mask */
1688 0x00000000, /* dst_mask */
1689 FALSE), /* pcrel_offset */
1693 static reloc_howto_type elf32_arm_howto_table_2[1] =
1695 HOWTO (R_ARM_IRELATIVE, /* type */
1697 2, /* size (0 = byte, 1 = short, 2 = long) */
1699 FALSE, /* pc_relative */
1701 complain_overflow_bitfield,/* complain_on_overflow */
1702 bfd_elf_generic_reloc, /* special_function */
1703 "R_ARM_IRELATIVE", /* name */
1704 TRUE, /* partial_inplace */
1705 0xffffffff, /* src_mask */
1706 0xffffffff, /* dst_mask */
1707 FALSE) /* pcrel_offset */
1710 /* 249-255 extended, currently unused, relocations: */
1711 static reloc_howto_type elf32_arm_howto_table_3[4] =
1713 HOWTO (R_ARM_RREL32, /* type */
1715 0, /* size (0 = byte, 1 = short, 2 = long) */
1717 FALSE, /* pc_relative */
1719 complain_overflow_dont,/* complain_on_overflow */
1720 bfd_elf_generic_reloc, /* special_function */
1721 "R_ARM_RREL32", /* name */
1722 FALSE, /* partial_inplace */
1725 FALSE), /* pcrel_offset */
1727 HOWTO (R_ARM_RABS32, /* type */
1729 0, /* size (0 = byte, 1 = short, 2 = long) */
1731 FALSE, /* pc_relative */
1733 complain_overflow_dont,/* complain_on_overflow */
1734 bfd_elf_generic_reloc, /* special_function */
1735 "R_ARM_RABS32", /* name */
1736 FALSE, /* partial_inplace */
1739 FALSE), /* pcrel_offset */
1741 HOWTO (R_ARM_RPC24, /* type */
1743 0, /* size (0 = byte, 1 = short, 2 = long) */
1745 FALSE, /* pc_relative */
1747 complain_overflow_dont,/* complain_on_overflow */
1748 bfd_elf_generic_reloc, /* special_function */
1749 "R_ARM_RPC24", /* name */
1750 FALSE, /* partial_inplace */
1753 FALSE), /* pcrel_offset */
1755 HOWTO (R_ARM_RBASE, /* type */
1757 0, /* size (0 = byte, 1 = short, 2 = long) */
1759 FALSE, /* pc_relative */
1761 complain_overflow_dont,/* complain_on_overflow */
1762 bfd_elf_generic_reloc, /* special_function */
1763 "R_ARM_RBASE", /* name */
1764 FALSE, /* partial_inplace */
1767 FALSE) /* pcrel_offset */
1770 static reloc_howto_type *
1771 elf32_arm_howto_from_type (unsigned int r_type)
1773 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1774 return &elf32_arm_howto_table_1[r_type];
1776 if (r_type == R_ARM_IRELATIVE)
1777 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1779 if (r_type >= R_ARM_RREL32
1780 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1781 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1787 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1788 Elf_Internal_Rela * elf_reloc)
1790 unsigned int r_type;
1792 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1793 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1796 struct elf32_arm_reloc_map
1798 bfd_reloc_code_real_type bfd_reloc_val;
1799 unsigned char elf_reloc_val;
1802 /* All entries in this list must also be present in elf32_arm_howto_table. */
1803 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1805 {BFD_RELOC_NONE, R_ARM_NONE},
1806 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1807 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1808 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1809 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1810 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1811 {BFD_RELOC_32, R_ARM_ABS32},
1812 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1813 {BFD_RELOC_8, R_ARM_ABS8},
1814 {BFD_RELOC_16, R_ARM_ABS16},
1815 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1816 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1817 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1818 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1819 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1820 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1821 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1822 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1823 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1824 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1825 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1826 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1827 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1828 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1829 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1830 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1831 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1832 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1833 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1834 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1835 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1836 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1837 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1838 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1839 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1840 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1841 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1842 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1843 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1844 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1845 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1846 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1847 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1848 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1849 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1850 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1851 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
1852 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1853 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1854 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1855 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1856 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1857 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1858 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1859 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1860 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1861 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1862 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1863 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1864 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1865 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1866 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1867 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1868 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1869 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1870 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1871 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1872 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1873 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1874 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1875 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1876 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1877 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1878 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1879 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1880 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1881 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1882 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1883 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1884 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1885 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1886 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1887 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1888 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1889 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1890 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1893 static reloc_howto_type *
1894 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1895 bfd_reloc_code_real_type code)
1899 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1900 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1901 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1906 static reloc_howto_type *
1907 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1912 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1913 if (elf32_arm_howto_table_1[i].name != NULL
1914 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1915 return &elf32_arm_howto_table_1[i];
1917 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1918 if (elf32_arm_howto_table_2[i].name != NULL
1919 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1920 return &elf32_arm_howto_table_2[i];
1922 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1923 if (elf32_arm_howto_table_3[i].name != NULL
1924 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1925 return &elf32_arm_howto_table_3[i];
1930 /* Support for core dump NOTE sections. */
1933 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1938 switch (note->descsz)
1943 case 148: /* Linux/ARM 32-bit. */
1945 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1948 elf_tdata (abfd)->core_lwpid = bfd_get_32 (abfd, note->descdata + 24);
1957 /* Make a ".reg/999" section. */
1958 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1959 size, note->descpos + offset);
1963 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1965 switch (note->descsz)
1970 case 124: /* Linux/ARM elf_prpsinfo. */
1971 elf_tdata (abfd)->core_pid
1972 = bfd_get_32 (abfd, note->descdata + 12);
1973 elf_tdata (abfd)->core_program
1974 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1975 elf_tdata (abfd)->core_command
1976 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1979 /* Note that for some reason, a spurious space is tacked
1980 onto the end of the args in some (at least one anyway)
1981 implementations, so strip it off if it exists. */
1983 char *command = elf_tdata (abfd)->core_command;
1984 int n = strlen (command);
1986 if (0 < n && command[n - 1] == ' ')
1987 command[n - 1] = '\0';
1994 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2007 va_start (ap, note_type);
2008 memset (data, 0, sizeof (data));
2009 strncpy (data + 28, va_arg (ap, const char *), 16);
2010 strncpy (data + 44, va_arg (ap, const char *), 80);
2013 return elfcore_write_note (abfd, buf, bufsiz,
2014 "CORE", note_type, data, sizeof (data));
2025 va_start (ap, note_type);
2026 memset (data, 0, sizeof (data));
2027 pid = va_arg (ap, long);
2028 bfd_put_32 (abfd, pid, data + 24);
2029 cursig = va_arg (ap, int);
2030 bfd_put_16 (abfd, cursig, data + 12);
2031 greg = va_arg (ap, const void *);
2032 memcpy (data + 72, greg, 72);
2035 return elfcore_write_note (abfd, buf, bufsiz,
2036 "CORE", note_type, data, sizeof (data));
2041 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
2042 #define TARGET_LITTLE_NAME "elf32-littlearm"
2043 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
2044 #define TARGET_BIG_NAME "elf32-bigarm"
2046 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2047 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2048 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2050 typedef unsigned long int insn32;
2051 typedef unsigned short int insn16;
2053 /* In lieu of proper flags, assume all EABIv4 or later objects are
2055 #define INTERWORK_FLAG(abfd) \
2056 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2057 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2058 || ((abfd)->flags & BFD_LINKER_CREATED))
2060 /* The linker script knows the section names for placement.
2061 The entry_names are used to do simple name mangling on the stubs.
2062 Given a function name, and its type, the stub can be found. The
2063 name can be changed. The only requirement is the %s be present. */
2064 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2065 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2067 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2068 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2070 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2071 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2073 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2074 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2076 #define STUB_ENTRY_NAME "__%s_veneer"
2078 /* The name of the dynamic interpreter. This is put in the .interp
2080 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2082 static const unsigned long tls_trampoline [] =
2084 0xe08e0000, /* add r0, lr, r0 */
2085 0xe5901004, /* ldr r1, [r0,#4] */
2086 0xe12fff11, /* bx r1 */
2089 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2091 0xe52d2004, /* push {r2} */
2092 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2093 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2094 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2095 0xe081100f, /* 2: add r1, pc */
2096 0xe12fff12, /* bx r2 */
2097 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2098 + dl_tlsdesc_lazy_resolver(GOT) */
2099 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2102 #ifdef FOUR_WORD_PLT
2104 /* The first entry in a procedure linkage table looks like
2105 this. It is set up so that any shared library function that is
2106 called before the relocation has been set up calls the dynamic
2108 static const bfd_vma elf32_arm_plt0_entry [] =
2110 0xe52de004, /* str lr, [sp, #-4]! */
2111 0xe59fe010, /* ldr lr, [pc, #16] */
2112 0xe08fe00e, /* add lr, pc, lr */
2113 0xe5bef008, /* ldr pc, [lr, #8]! */
2116 /* Subsequent entries in a procedure linkage table look like
2118 static const bfd_vma elf32_arm_plt_entry [] =
2120 0xe28fc600, /* add ip, pc, #NN */
2121 0xe28cca00, /* add ip, ip, #NN */
2122 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2123 0x00000000, /* unused */
2128 /* The first entry in a procedure linkage table looks like
2129 this. It is set up so that any shared library function that is
2130 called before the relocation has been set up calls the dynamic
2132 static const bfd_vma elf32_arm_plt0_entry [] =
2134 0xe52de004, /* str lr, [sp, #-4]! */
2135 0xe59fe004, /* ldr lr, [pc, #4] */
2136 0xe08fe00e, /* add lr, pc, lr */
2137 0xe5bef008, /* ldr pc, [lr, #8]! */
2138 0x00000000, /* &GOT[0] - . */
2141 /* Subsequent entries in a procedure linkage table look like
2143 static const bfd_vma elf32_arm_plt_entry [] =
2145 0xe28fc600, /* add ip, pc, #0xNN00000 */
2146 0xe28cca00, /* add ip, ip, #0xNN000 */
2147 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2152 /* The format of the first entry in the procedure linkage table
2153 for a VxWorks executable. */
2154 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2156 0xe52dc008, /* str ip,[sp,#-8]! */
2157 0xe59fc000, /* ldr ip,[pc] */
2158 0xe59cf008, /* ldr pc,[ip,#8] */
2159 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2162 /* The format of subsequent entries in a VxWorks executable. */
2163 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2165 0xe59fc000, /* ldr ip,[pc] */
2166 0xe59cf000, /* ldr pc,[ip] */
2167 0x00000000, /* .long @got */
2168 0xe59fc000, /* ldr ip,[pc] */
2169 0xea000000, /* b _PLT */
2170 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2173 /* The format of entries in a VxWorks shared library. */
2174 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2176 0xe59fc000, /* ldr ip,[pc] */
2177 0xe79cf009, /* ldr pc,[ip,r9] */
2178 0x00000000, /* .long @got */
2179 0xe59fc000, /* ldr ip,[pc] */
2180 0xe599f008, /* ldr pc,[r9,#8] */
2181 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2184 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2185 #define PLT_THUMB_STUB_SIZE 4
2186 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2192 /* The entries in a PLT when using a DLL-based target with multiple
2194 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2196 0xe51ff004, /* ldr pc, [pc, #-4] */
2197 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2200 /* The first entry in a procedure linkage table looks like
2201 this. It is set up so that any shared library function that is
2202 called before the relocation has been set up calls the dynamic
2204 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2207 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2208 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2209 0xe08cc00f, /* add ip, ip, pc */
2210 0xe52dc008, /* str ip, [sp, #-8]! */
2211 /* Second bundle: */
2212 0xe7dfcf1f, /* bfc ip, #30, #2 */
2213 0xe59cc000, /* ldr ip, [ip] */
2214 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2215 0xe12fff1c, /* bx ip */
2217 0xe320f000, /* nop */
2218 0xe320f000, /* nop */
2219 0xe320f000, /* nop */
2221 0xe50dc004, /* str ip, [sp, #-4] */
2222 /* Fourth bundle: */
2223 0xe7dfcf1f, /* bfc ip, #30, #2 */
2224 0xe59cc000, /* ldr ip, [ip] */
2225 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2226 0xe12fff1c, /* bx ip */
2228 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2230 /* Subsequent entries in a procedure linkage table look like this. */
2231 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2233 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2234 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2235 0xe08cc00f, /* add ip, ip, pc */
2236 0xea000000, /* b .Lplt_tail */
2239 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2240 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2241 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2242 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2243 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2244 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2254 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2255 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2256 is inserted in arm_build_one_stub(). */
2257 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2258 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2259 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2260 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2261 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2262 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2267 enum stub_insn_type type;
2268 unsigned int r_type;
2272 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2273 to reach the stub if necessary. */
2274 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2276 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2277 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2280 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2282 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2284 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2285 ARM_INSN (0xe12fff1c), /* bx ip */
2286 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2289 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2290 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2292 THUMB16_INSN (0xb401), /* push {r0} */
2293 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2294 THUMB16_INSN (0x4684), /* mov ip, r0 */
2295 THUMB16_INSN (0xbc01), /* pop {r0} */
2296 THUMB16_INSN (0x4760), /* bx ip */
2297 THUMB16_INSN (0xbf00), /* nop */
2298 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2301 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2303 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2305 THUMB16_INSN (0x4778), /* bx pc */
2306 THUMB16_INSN (0x46c0), /* nop */
2307 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2308 ARM_INSN (0xe12fff1c), /* bx ip */
2309 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2312 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2314 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2316 THUMB16_INSN (0x4778), /* bx pc */
2317 THUMB16_INSN (0x46c0), /* nop */
2318 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2319 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2322 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2323 one, when the destination is close enough. */
2324 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2326 THUMB16_INSN (0x4778), /* bx pc */
2327 THUMB16_INSN (0x46c0), /* nop */
2328 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2331 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2332 blx to reach the stub if necessary. */
2333 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2335 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2336 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2337 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2340 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2341 blx to reach the stub if necessary. We can not add into pc;
2342 it is not guaranteed to mode switch (different in ARMv6 and
2344 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2346 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2347 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2348 ARM_INSN (0xe12fff1c), /* bx ip */
2349 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2352 /* V4T ARM -> ARM long branch stub, PIC. */
2353 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2355 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2356 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2357 ARM_INSN (0xe12fff1c), /* bx ip */
2358 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2361 /* V4T Thumb -> ARM long branch stub, PIC. */
2362 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2364 THUMB16_INSN (0x4778), /* bx pc */
2365 THUMB16_INSN (0x46c0), /* nop */
2366 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2367 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2368 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2371 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2373 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2375 THUMB16_INSN (0xb401), /* push {r0} */
2376 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2377 THUMB16_INSN (0x46fc), /* mov ip, pc */
2378 THUMB16_INSN (0x4484), /* add ip, r0 */
2379 THUMB16_INSN (0xbc01), /* pop {r0} */
2380 THUMB16_INSN (0x4760), /* bx ip */
2381 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2384 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2386 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2388 THUMB16_INSN (0x4778), /* bx pc */
2389 THUMB16_INSN (0x46c0), /* nop */
2390 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2391 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2392 ARM_INSN (0xe12fff1c), /* bx ip */
2393 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2396 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2397 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2398 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2400 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2401 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2402 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2405 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2406 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2407 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2409 THUMB16_INSN (0x4778), /* bx pc */
2410 THUMB16_INSN (0x46c0), /* nop */
2411 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2412 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2413 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2416 /* Cortex-A8 erratum-workaround stubs. */
2418 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2419 can't use a conditional branch to reach this stub). */
2421 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2423 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2424 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2425 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2428 /* Stub used for b.w and bl.w instructions. */
2430 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2432 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2435 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2437 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2440 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2441 instruction (which switches to ARM mode) to point to this stub. Jump to the
2442 real destination using an ARM-mode branch. */
2444 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2446 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2449 /* For each section group there can be a specially created linker section
2450 to hold the stubs for that group. The name of the stub section is based
2451 upon the name of another section within that group with the suffix below
2454 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2455 create what appeared to be a linker stub section when it actually
2456 contained user code/data. For example, consider this fragment:
2458 const char * stubborn_problems[] = { "np" };
2460 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2463 .data.rel.local.stubborn_problems
2465 This then causes problems in arm32_arm_build_stubs() as it triggers:
2467 // Ignore non-stub sections.
2468 if (!strstr (stub_sec->name, STUB_SUFFIX))
2471 And so the section would be ignored instead of being processed. Hence
2472 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2474 #define STUB_SUFFIX ".__stub"
2476 /* One entry per long/short branch stub defined above. */
2478 DEF_STUB(long_branch_any_any) \
2479 DEF_STUB(long_branch_v4t_arm_thumb) \
2480 DEF_STUB(long_branch_thumb_only) \
2481 DEF_STUB(long_branch_v4t_thumb_thumb) \
2482 DEF_STUB(long_branch_v4t_thumb_arm) \
2483 DEF_STUB(short_branch_v4t_thumb_arm) \
2484 DEF_STUB(long_branch_any_arm_pic) \
2485 DEF_STUB(long_branch_any_thumb_pic) \
2486 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2487 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2488 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2489 DEF_STUB(long_branch_thumb_only_pic) \
2490 DEF_STUB(long_branch_any_tls_pic) \
2491 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2492 DEF_STUB(a8_veneer_b_cond) \
2493 DEF_STUB(a8_veneer_b) \
2494 DEF_STUB(a8_veneer_bl) \
2495 DEF_STUB(a8_veneer_blx)
2497 #define DEF_STUB(x) arm_stub_##x,
2498 enum elf32_arm_stub_type
2502 /* Note the first a8_veneer type */
2503 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2509 const insn_sequence* template_sequence;
2513 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2514 static const stub_def stub_definitions[] =
2520 struct elf32_arm_stub_hash_entry
2522 /* Base hash table entry structure. */
2523 struct bfd_hash_entry root;
2525 /* The stub section. */
2528 /* Offset within stub_sec of the beginning of this stub. */
2529 bfd_vma stub_offset;
2531 /* Given the symbol's value and its section we can determine its final
2532 value when building the stubs (so the stub knows where to jump). */
2533 bfd_vma target_value;
2534 asection *target_section;
2536 /* Offset to apply to relocation referencing target_value. */
2537 bfd_vma target_addend;
2539 /* The instruction which caused this stub to be generated (only valid for
2540 Cortex-A8 erratum workaround stubs at present). */
2541 unsigned long orig_insn;
2543 /* The stub type. */
2544 enum elf32_arm_stub_type stub_type;
2545 /* Its encoding size in bytes. */
2548 const insn_sequence *stub_template;
2549 /* The size of the template (number of entries). */
2550 int stub_template_size;
2552 /* The symbol table entry, if any, that this was derived from. */
2553 struct elf32_arm_link_hash_entry *h;
2555 /* Type of branch. */
2556 enum arm_st_branch_type branch_type;
2558 /* Where this stub is being called from, or, in the case of combined
2559 stub sections, the first input section in the group. */
2562 /* The name for the local symbol at the start of this stub. The
2563 stub name in the hash table has to be unique; this does not, so
2564 it can be friendlier. */
2568 /* Used to build a map of a section. This is required for mixed-endian
2571 typedef struct elf32_elf_section_map
2576 elf32_arm_section_map;
2578 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2582 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2583 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2584 VFP11_ERRATUM_ARM_VENEER,
2585 VFP11_ERRATUM_THUMB_VENEER
2587 elf32_vfp11_erratum_type;
2589 typedef struct elf32_vfp11_erratum_list
2591 struct elf32_vfp11_erratum_list *next;
2597 struct elf32_vfp11_erratum_list *veneer;
2598 unsigned int vfp_insn;
2602 struct elf32_vfp11_erratum_list *branch;
2606 elf32_vfp11_erratum_type type;
2608 elf32_vfp11_erratum_list;
2613 INSERT_EXIDX_CANTUNWIND_AT_END
2615 arm_unwind_edit_type;
2617 /* A (sorted) list of edits to apply to an unwind table. */
2618 typedef struct arm_unwind_table_edit
2620 arm_unwind_edit_type type;
2621 /* Note: we sometimes want to insert an unwind entry corresponding to a
2622 section different from the one we're currently writing out, so record the
2623 (text) section this edit relates to here. */
2624 asection *linked_section;
2626 struct arm_unwind_table_edit *next;
2628 arm_unwind_table_edit;
2630 typedef struct _arm_elf_section_data
2632 /* Information about mapping symbols. */
2633 struct bfd_elf_section_data elf;
2634 unsigned int mapcount;
2635 unsigned int mapsize;
2636 elf32_arm_section_map *map;
2637 /* Information about CPU errata. */
2638 unsigned int erratumcount;
2639 elf32_vfp11_erratum_list *erratumlist;
2640 /* Information about unwind tables. */
2643 /* Unwind info attached to a text section. */
2646 asection *arm_exidx_sec;
2649 /* Unwind info attached to an .ARM.exidx section. */
2652 arm_unwind_table_edit *unwind_edit_list;
2653 arm_unwind_table_edit *unwind_edit_tail;
2657 _arm_elf_section_data;
2659 #define elf32_arm_section_data(sec) \
2660 ((_arm_elf_section_data *) elf_section_data (sec))
2662 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2663 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2664 so may be created multiple times: we use an array of these entries whilst
2665 relaxing which we can refresh easily, then create stubs for each potentially
2666 erratum-triggering instruction once we've settled on a solution. */
2668 struct a8_erratum_fix
2674 unsigned long orig_insn;
2676 enum elf32_arm_stub_type stub_type;
2677 enum arm_st_branch_type branch_type;
2680 /* A table of relocs applied to branches which might trigger Cortex-A8
2683 struct a8_erratum_reloc
2686 bfd_vma destination;
2687 struct elf32_arm_link_hash_entry *hash;
2688 const char *sym_name;
2689 unsigned int r_type;
2690 enum arm_st_branch_type branch_type;
2691 bfd_boolean non_a8_stub;
2694 /* The size of the thread control block. */
2697 /* ARM-specific information about a PLT entry, over and above the usual
2701 /* We reference count Thumb references to a PLT entry separately,
2702 so that we can emit the Thumb trampoline only if needed. */
2703 bfd_signed_vma thumb_refcount;
2705 /* Some references from Thumb code may be eliminated by BL->BLX
2706 conversion, so record them separately. */
2707 bfd_signed_vma maybe_thumb_refcount;
2709 /* How many of the recorded PLT accesses were from non-call relocations.
2710 This information is useful when deciding whether anything takes the
2711 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
2712 non-call references to the function should resolve directly to the
2713 real runtime target. */
2714 unsigned int noncall_refcount;
2716 /* Since PLT entries have variable size if the Thumb prologue is
2717 used, we need to record the index into .got.plt instead of
2718 recomputing it from the PLT offset. */
2719 bfd_signed_vma got_offset;
2722 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
2723 struct arm_local_iplt_info
2725 /* The information that is usually found in the generic ELF part of
2726 the hash table entry. */
2727 union gotplt_union root;
2729 /* The information that is usually found in the ARM-specific part of
2730 the hash table entry. */
2731 struct arm_plt_info arm;
2733 /* A list of all potential dynamic relocations against this symbol. */
2734 struct elf_dyn_relocs *dyn_relocs;
2737 struct elf_arm_obj_tdata
2739 struct elf_obj_tdata root;
2741 /* tls_type for each local got entry. */
2742 char *local_got_tls_type;
2744 /* GOTPLT entries for TLS descriptors. */
2745 bfd_vma *local_tlsdesc_gotent;
2747 /* Information for local symbols that need entries in .iplt. */
2748 struct arm_local_iplt_info **local_iplt;
2750 /* Zero to warn when linking objects with incompatible enum sizes. */
2751 int no_enum_size_warning;
2753 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2754 int no_wchar_size_warning;
2757 #define elf_arm_tdata(bfd) \
2758 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2760 #define elf32_arm_local_got_tls_type(bfd) \
2761 (elf_arm_tdata (bfd)->local_got_tls_type)
2763 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2764 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2766 #define elf32_arm_local_iplt(bfd) \
2767 (elf_arm_tdata (bfd)->local_iplt)
2769 #define is_arm_elf(bfd) \
2770 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2771 && elf_tdata (bfd) != NULL \
2772 && elf_object_id (bfd) == ARM_ELF_DATA)
2775 elf32_arm_mkobject (bfd *abfd)
2777 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2781 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2783 /* Arm ELF linker hash entry. */
2784 struct elf32_arm_link_hash_entry
2786 struct elf_link_hash_entry root;
2788 /* Track dynamic relocs copied for this symbol. */
2789 struct elf_dyn_relocs *dyn_relocs;
2791 /* ARM-specific PLT information. */
2792 struct arm_plt_info plt;
2794 #define GOT_UNKNOWN 0
2795 #define GOT_NORMAL 1
2796 #define GOT_TLS_GD 2
2797 #define GOT_TLS_IE 4
2798 #define GOT_TLS_GDESC 8
2799 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2800 unsigned int tls_type : 8;
2802 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
2803 unsigned int is_iplt : 1;
2805 unsigned int unused : 23;
2807 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2808 starting at the end of the jump table. */
2809 bfd_vma tlsdesc_got;
2811 /* The symbol marking the real symbol location for exported thumb
2812 symbols with Arm stubs. */
2813 struct elf_link_hash_entry *export_glue;
2815 /* A pointer to the most recently used stub hash entry against this
2817 struct elf32_arm_stub_hash_entry *stub_cache;
2820 /* Traverse an arm ELF linker hash table. */
2821 #define elf32_arm_link_hash_traverse(table, func, info) \
2822 (elf_link_hash_traverse \
2824 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2827 /* Get the ARM elf linker hash table from a link_info structure. */
2828 #define elf32_arm_hash_table(info) \
2829 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2830 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2832 #define arm_stub_hash_lookup(table, string, create, copy) \
2833 ((struct elf32_arm_stub_hash_entry *) \
2834 bfd_hash_lookup ((table), (string), (create), (copy)))
2836 /* Array to keep track of which stub sections have been created, and
2837 information on stub grouping. */
2840 /* This is the section to which stubs in the group will be
2843 /* The stub section. */
2847 #define elf32_arm_compute_jump_table_size(htab) \
2848 ((htab)->next_tls_desc_index * 4)
2850 /* ARM ELF linker hash table. */
2851 struct elf32_arm_link_hash_table
2853 /* The main hash table. */
2854 struct elf_link_hash_table root;
2856 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2857 bfd_size_type thumb_glue_size;
2859 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2860 bfd_size_type arm_glue_size;
2862 /* The size in bytes of section containing the ARMv4 BX veneers. */
2863 bfd_size_type bx_glue_size;
2865 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2866 veneer has been populated. */
2867 bfd_vma bx_glue_offset[15];
2869 /* The size in bytes of the section containing glue for VFP11 erratum
2871 bfd_size_type vfp11_erratum_glue_size;
2873 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2874 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2875 elf32_arm_write_section(). */
2876 struct a8_erratum_fix *a8_erratum_fixes;
2877 unsigned int num_a8_erratum_fixes;
2879 /* An arbitrary input BFD chosen to hold the glue sections. */
2880 bfd * bfd_of_glue_owner;
2882 /* Nonzero to output a BE8 image. */
2885 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2886 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2889 /* The relocation to use for R_ARM_TARGET2 relocations. */
2892 /* 0 = Ignore R_ARM_V4BX.
2893 1 = Convert BX to MOV PC.
2894 2 = Generate v4 interworing stubs. */
2897 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2900 /* Whether we should fix the ARM1176 BLX immediate issue. */
2903 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2906 /* What sort of code sequences we should look for which may trigger the
2907 VFP11 denorm erratum. */
2908 bfd_arm_vfp11_fix vfp11_fix;
2910 /* Global counter for the number of fixes we have emitted. */
2911 int num_vfp11_fixes;
2913 /* Nonzero to force PIC branch veneers. */
2916 /* The number of bytes in the initial entry in the PLT. */
2917 bfd_size_type plt_header_size;
2919 /* The number of bytes in the subsequent PLT etries. */
2920 bfd_size_type plt_entry_size;
2922 /* True if the target system is VxWorks. */
2925 /* True if the target system is Symbian OS. */
2928 /* True if the target system is Native Client. */
2931 /* True if the target uses REL relocations. */
2934 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
2935 bfd_vma next_tls_desc_index;
2937 /* How many R_ARM_TLS_DESC relocations were generated so far. */
2938 bfd_vma num_tls_desc;
2940 /* Short-cuts to get to dynamic linker sections. */
2944 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2947 /* The offset into splt of the PLT entry for the TLS descriptor
2948 resolver. Special values are 0, if not necessary (or not found
2949 to be necessary yet), and -1 if needed but not determined
2951 bfd_vma dt_tlsdesc_plt;
2953 /* The offset into sgot of the GOT entry used by the PLT entry
2955 bfd_vma dt_tlsdesc_got;
2957 /* Offset in .plt section of tls_arm_trampoline. */
2958 bfd_vma tls_trampoline;
2960 /* Data for R_ARM_TLS_LDM32 relocations. */
2963 bfd_signed_vma refcount;
2967 /* Small local sym cache. */
2968 struct sym_cache sym_cache;
2970 /* For convenience in allocate_dynrelocs. */
2973 /* The amount of space used by the reserved portion of the sgotplt
2974 section, plus whatever space is used by the jump slots. */
2975 bfd_vma sgotplt_jump_table_size;
2977 /* The stub hash table. */
2978 struct bfd_hash_table stub_hash_table;
2980 /* Linker stub bfd. */
2983 /* Linker call-backs. */
2984 asection * (*add_stub_section) (const char *, asection *);
2985 void (*layout_sections_again) (void);
2987 /* Array to keep track of which stub sections have been created, and
2988 information on stub grouping. */
2989 struct map_stub *stub_group;
2991 /* Number of elements in stub_group. */
2994 /* Assorted information used by elf32_arm_size_stubs. */
2995 unsigned int bfd_count;
2997 asection **input_list;
3000 /* Create an entry in an ARM ELF linker hash table. */
3002 static struct bfd_hash_entry *
3003 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3004 struct bfd_hash_table * table,
3005 const char * string)
3007 struct elf32_arm_link_hash_entry * ret =
3008 (struct elf32_arm_link_hash_entry *) entry;
3010 /* Allocate the structure if it has not already been allocated by a
3013 ret = (struct elf32_arm_link_hash_entry *)
3014 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3016 return (struct bfd_hash_entry *) ret;
3018 /* Call the allocation method of the superclass. */
3019 ret = ((struct elf32_arm_link_hash_entry *)
3020 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3024 ret->dyn_relocs = NULL;
3025 ret->tls_type = GOT_UNKNOWN;
3026 ret->tlsdesc_got = (bfd_vma) -1;
3027 ret->plt.thumb_refcount = 0;
3028 ret->plt.maybe_thumb_refcount = 0;
3029 ret->plt.noncall_refcount = 0;
3030 ret->plt.got_offset = -1;
3031 ret->is_iplt = FALSE;
3032 ret->export_glue = NULL;
3034 ret->stub_cache = NULL;
3037 return (struct bfd_hash_entry *) ret;
3040 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3044 elf32_arm_allocate_local_sym_info (bfd *abfd)
3046 if (elf_local_got_refcounts (abfd) == NULL)
3048 bfd_size_type num_syms;
3052 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3053 size = num_syms * (sizeof (bfd_signed_vma)
3054 + sizeof (struct arm_local_iplt_info *)
3057 data = bfd_zalloc (abfd, size);
3061 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3062 data += num_syms * sizeof (bfd_signed_vma);
3064 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3065 data += num_syms * sizeof (struct arm_local_iplt_info *);
3067 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3068 data += num_syms * sizeof (bfd_vma);
3070 elf32_arm_local_got_tls_type (abfd) = data;
3075 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3076 to input bfd ABFD. Create the information if it doesn't already exist.
3077 Return null if an allocation fails. */
3079 static struct arm_local_iplt_info *
3080 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3082 struct arm_local_iplt_info **ptr;
3084 if (!elf32_arm_allocate_local_sym_info (abfd))
3087 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3088 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3090 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3094 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3095 in ABFD's symbol table. If the symbol is global, H points to its
3096 hash table entry, otherwise H is null.
3098 Return true if the symbol does have PLT information. When returning
3099 true, point *ROOT_PLT at the target-independent reference count/offset
3100 union and *ARM_PLT at the ARM-specific information. */
3103 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3104 unsigned long r_symndx, union gotplt_union **root_plt,
3105 struct arm_plt_info **arm_plt)
3107 struct arm_local_iplt_info *local_iplt;
3111 *root_plt = &h->root.plt;
3116 if (elf32_arm_local_iplt (abfd) == NULL)
3119 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3120 if (local_iplt == NULL)
3123 *root_plt = &local_iplt->root;
3124 *arm_plt = &local_iplt->arm;
3128 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3132 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3133 struct arm_plt_info *arm_plt)
3135 struct elf32_arm_link_hash_table *htab;
3137 htab = elf32_arm_hash_table (info);
3138 return (arm_plt->thumb_refcount != 0
3139 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3142 /* Return a pointer to the head of the dynamic reloc list that should
3143 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3144 ABFD's symbol table. Return null if an error occurs. */
3146 static struct elf_dyn_relocs **
3147 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3148 Elf_Internal_Sym *isym)
3150 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3152 struct arm_local_iplt_info *local_iplt;
3154 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3155 if (local_iplt == NULL)
3157 return &local_iplt->dyn_relocs;
3161 /* Track dynamic relocs needed for local syms too.
3162 We really need local syms available to do this
3167 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3171 vpp = &elf_section_data (s)->local_dynrel;
3172 return (struct elf_dyn_relocs **) vpp;
3176 /* Initialize an entry in the stub hash table. */
3178 static struct bfd_hash_entry *
3179 stub_hash_newfunc (struct bfd_hash_entry *entry,
3180 struct bfd_hash_table *table,
3183 /* Allocate the structure if it has not already been allocated by a
3187 entry = (struct bfd_hash_entry *)
3188 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3193 /* Call the allocation method of the superclass. */
3194 entry = bfd_hash_newfunc (entry, table, string);
3197 struct elf32_arm_stub_hash_entry *eh;
3199 /* Initialize the local fields. */
3200 eh = (struct elf32_arm_stub_hash_entry *) entry;
3201 eh->stub_sec = NULL;
3202 eh->stub_offset = 0;
3203 eh->target_value = 0;
3204 eh->target_section = NULL;
3205 eh->target_addend = 0;
3207 eh->stub_type = arm_stub_none;
3209 eh->stub_template = NULL;
3210 eh->stub_template_size = 0;
3213 eh->output_name = NULL;
3219 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3220 shortcuts to them in our hash table. */
3223 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3225 struct elf32_arm_link_hash_table *htab;
3227 htab = elf32_arm_hash_table (info);
3231 /* BPABI objects never have a GOT, or associated sections. */
3232 if (htab->symbian_p)
3235 if (! _bfd_elf_create_got_section (dynobj, info))
3241 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3244 create_ifunc_sections (struct bfd_link_info *info)
3246 struct elf32_arm_link_hash_table *htab;
3247 const struct elf_backend_data *bed;
3252 htab = elf32_arm_hash_table (info);
3253 dynobj = htab->root.dynobj;
3254 bed = get_elf_backend_data (dynobj);
3255 flags = bed->dynamic_sec_flags;
3257 if (htab->root.iplt == NULL)
3259 s = bfd_make_section_with_flags (dynobj, ".iplt",
3260 flags | SEC_READONLY | SEC_CODE);
3262 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3264 htab->root.iplt = s;
3267 if (htab->root.irelplt == NULL)
3269 s = bfd_make_section_with_flags (dynobj, RELOC_SECTION (htab, ".iplt"),
3270 flags | SEC_READONLY);
3272 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3274 htab->root.irelplt = s;
3277 if (htab->root.igotplt == NULL)
3279 s = bfd_make_section_with_flags (dynobj, ".igot.plt", flags);
3281 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3283 htab->root.igotplt = s;
3288 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3289 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3293 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3295 struct elf32_arm_link_hash_table *htab;
3297 htab = elf32_arm_hash_table (info);
3301 if (!htab->root.sgot && !create_got_section (dynobj, info))
3304 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3307 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
3309 htab->srelbss = bfd_get_section_by_name (dynobj,
3310 RELOC_SECTION (htab, ".bss"));
3312 if (htab->vxworks_p)
3314 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3319 htab->plt_header_size = 0;
3320 htab->plt_entry_size
3321 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3325 htab->plt_header_size
3326 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3327 htab->plt_entry_size
3328 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3332 if (!htab->root.splt
3333 || !htab->root.srelplt
3335 || (!info->shared && !htab->srelbss))
3341 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3344 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3345 struct elf_link_hash_entry *dir,
3346 struct elf_link_hash_entry *ind)
3348 struct elf32_arm_link_hash_entry *edir, *eind;
3350 edir = (struct elf32_arm_link_hash_entry *) dir;
3351 eind = (struct elf32_arm_link_hash_entry *) ind;
3353 if (eind->dyn_relocs != NULL)
3355 if (edir->dyn_relocs != NULL)
3357 struct elf_dyn_relocs **pp;
3358 struct elf_dyn_relocs *p;
3360 /* Add reloc counts against the indirect sym to the direct sym
3361 list. Merge any entries against the same section. */
3362 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3364 struct elf_dyn_relocs *q;
3366 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3367 if (q->sec == p->sec)
3369 q->pc_count += p->pc_count;
3370 q->count += p->count;
3377 *pp = edir->dyn_relocs;
3380 edir->dyn_relocs = eind->dyn_relocs;
3381 eind->dyn_relocs = NULL;
3384 if (ind->root.type == bfd_link_hash_indirect)
3386 /* Copy over PLT info. */
3387 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3388 eind->plt.thumb_refcount = 0;
3389 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3390 eind->plt.maybe_thumb_refcount = 0;
3391 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3392 eind->plt.noncall_refcount = 0;
3394 /* We should only allocate a function to .iplt once the final
3395 symbol information is known. */
3396 BFD_ASSERT (!eind->is_iplt);
3398 if (dir->got.refcount <= 0)
3400 edir->tls_type = eind->tls_type;
3401 eind->tls_type = GOT_UNKNOWN;
3405 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3408 /* Create an ARM elf linker hash table. */
3410 static struct bfd_link_hash_table *
3411 elf32_arm_link_hash_table_create (bfd *abfd)
3413 struct elf32_arm_link_hash_table *ret;
3414 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3416 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
3420 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3421 elf32_arm_link_hash_newfunc,
3422 sizeof (struct elf32_arm_link_hash_entry),
3429 ret->sdynbss = NULL;
3430 ret->srelbss = NULL;
3431 ret->srelplt2 = NULL;
3432 ret->dt_tlsdesc_plt = 0;
3433 ret->dt_tlsdesc_got = 0;
3434 ret->tls_trampoline = 0;
3435 ret->next_tls_desc_index = 0;
3436 ret->num_tls_desc = 0;
3437 ret->thumb_glue_size = 0;
3438 ret->arm_glue_size = 0;
3439 ret->bx_glue_size = 0;
3440 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
3441 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3442 ret->vfp11_erratum_glue_size = 0;
3443 ret->num_vfp11_fixes = 0;
3444 ret->fix_cortex_a8 = 0;
3445 ret->fix_arm1176 = 0;
3446 ret->bfd_of_glue_owner = NULL;
3447 ret->byteswap_code = 0;
3448 ret->target1_is_rel = 0;
3449 ret->target2_reloc = R_ARM_NONE;
3450 #ifdef FOUR_WORD_PLT
3451 ret->plt_header_size = 16;
3452 ret->plt_entry_size = 16;
3454 ret->plt_header_size = 20;
3455 ret->plt_entry_size = 12;
3463 ret->sym_cache.abfd = NULL;
3465 ret->tls_ldm_got.refcount = 0;
3466 ret->stub_bfd = NULL;
3467 ret->add_stub_section = NULL;
3468 ret->layout_sections_again = NULL;
3469 ret->stub_group = NULL;
3473 ret->input_list = NULL;
3475 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3476 sizeof (struct elf32_arm_stub_hash_entry)))
3482 return &ret->root.root;
3485 /* Free the derived linker hash table. */
3488 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
3490 struct elf32_arm_link_hash_table *ret
3491 = (struct elf32_arm_link_hash_table *) hash;
3493 bfd_hash_table_free (&ret->stub_hash_table);
3494 _bfd_generic_link_hash_table_free (hash);
3497 /* Determine if we're dealing with a Thumb only architecture. */
3500 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3502 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3506 if (arch == TAG_CPU_ARCH_V6_M || arch == TAG_CPU_ARCH_V6S_M)
3509 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
3512 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3513 Tag_CPU_arch_profile);
3515 return profile == 'M';
3518 /* Determine if we're dealing with a Thumb-2 object. */
3521 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3523 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3525 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3528 /* Determine what kind of NOPs are available. */
3531 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3533 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3535 return arch == TAG_CPU_ARCH_V6T2
3536 || arch == TAG_CPU_ARCH_V6K
3537 || arch == TAG_CPU_ARCH_V7
3538 || arch == TAG_CPU_ARCH_V7E_M;
3542 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3544 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3546 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3547 || arch == TAG_CPU_ARCH_V7E_M);
3551 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3555 case arm_stub_long_branch_thumb_only:
3556 case arm_stub_long_branch_v4t_thumb_arm:
3557 case arm_stub_short_branch_v4t_thumb_arm:
3558 case arm_stub_long_branch_v4t_thumb_arm_pic:
3559 case arm_stub_long_branch_v4t_thumb_tls_pic:
3560 case arm_stub_long_branch_thumb_only_pic:
3571 /* Determine the type of stub needed, if any, for a call. */
3573 static enum elf32_arm_stub_type
3574 arm_type_of_stub (struct bfd_link_info *info,
3575 asection *input_sec,
3576 const Elf_Internal_Rela *rel,
3577 unsigned char st_type,
3578 enum arm_st_branch_type *actual_branch_type,
3579 struct elf32_arm_link_hash_entry *hash,
3580 bfd_vma destination,
3586 bfd_signed_vma branch_offset;
3587 unsigned int r_type;
3588 struct elf32_arm_link_hash_table * globals;
3591 enum elf32_arm_stub_type stub_type = arm_stub_none;
3593 enum arm_st_branch_type branch_type = *actual_branch_type;
3594 union gotplt_union *root_plt;
3595 struct arm_plt_info *arm_plt;
3597 if (branch_type == ST_BRANCH_LONG)
3600 globals = elf32_arm_hash_table (info);
3601 if (globals == NULL)
3604 thumb_only = using_thumb_only (globals);
3606 thumb2 = using_thumb2 (globals);
3608 /* Determine where the call point is. */
3609 location = (input_sec->output_offset
3610 + input_sec->output_section->vma
3613 r_type = ELF32_R_TYPE (rel->r_info);
3615 /* For TLS call relocs, it is the caller's responsibility to provide
3616 the address of the appropriate trampoline. */
3617 if (r_type != R_ARM_TLS_CALL
3618 && r_type != R_ARM_THM_TLS_CALL
3619 && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3620 &root_plt, &arm_plt)
3621 && root_plt->offset != (bfd_vma) -1)
3625 if (hash == NULL || hash->is_iplt)
3626 splt = globals->root.iplt;
3628 splt = globals->root.splt;
3633 /* Note when dealing with PLT entries: the main PLT stub is in
3634 ARM mode, so if the branch is in Thumb mode, another
3635 Thumb->ARM stub will be inserted later just before the ARM
3636 PLT stub. We don't take this extra distance into account
3637 here, because if a long branch stub is needed, we'll add a
3638 Thumb->Arm one and branch directly to the ARM PLT entry
3639 because it avoids spreading offset corrections in several
3642 destination = (splt->output_section->vma
3643 + splt->output_offset
3644 + root_plt->offset);
3646 branch_type = ST_BRANCH_TO_ARM;
3649 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
3650 BFD_ASSERT (st_type != STT_GNU_IFUNC);
3652 branch_offset = (bfd_signed_vma)(destination - location);
3654 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3655 || r_type == R_ARM_THM_TLS_CALL)
3657 /* Handle cases where:
3658 - this call goes too far (different Thumb/Thumb2 max
3660 - it's a Thumb->Arm call and blx is not available, or it's a
3661 Thumb->Arm branch (not bl). A stub is needed in this case,
3662 but only if this call is not through a PLT entry. Indeed,
3663 PLT stubs handle mode switching already.
3666 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3667 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3669 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3670 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3671 || (branch_type == ST_BRANCH_TO_ARM
3672 && (((r_type == R_ARM_THM_CALL
3673 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3674 || (r_type == R_ARM_THM_JUMP24))
3677 if (branch_type == ST_BRANCH_TO_THUMB)
3679 /* Thumb to thumb. */
3682 stub_type = (info->shared | globals->pic_veneer)
3684 ? ((globals->use_blx
3685 && (r_type == R_ARM_THM_CALL))
3686 /* V5T and above. Stub starts with ARM code, so
3687 we must be able to switch mode before
3688 reaching it, which is only possible for 'bl'
3689 (ie R_ARM_THM_CALL relocation). */
3690 ? arm_stub_long_branch_any_thumb_pic
3691 /* On V4T, use Thumb code only. */
3692 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3694 /* non-PIC stubs. */
3695 : ((globals->use_blx
3696 && (r_type == R_ARM_THM_CALL))
3697 /* V5T and above. */
3698 ? arm_stub_long_branch_any_any
3700 : arm_stub_long_branch_v4t_thumb_thumb);
3704 stub_type = (info->shared | globals->pic_veneer)
3706 ? arm_stub_long_branch_thumb_only_pic
3708 : arm_stub_long_branch_thumb_only;
3715 && sym_sec->owner != NULL
3716 && !INTERWORK_FLAG (sym_sec->owner))
3718 (*_bfd_error_handler)
3719 (_("%B(%s): warning: interworking not enabled.\n"
3720 " first occurrence: %B: Thumb call to ARM"),
3721 sym_sec->owner, input_bfd, name);
3725 (info->shared | globals->pic_veneer)
3727 ? (r_type == R_ARM_THM_TLS_CALL
3729 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3730 : arm_stub_long_branch_v4t_thumb_tls_pic)
3731 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3732 /* V5T PIC and above. */
3733 ? arm_stub_long_branch_any_arm_pic
3735 : arm_stub_long_branch_v4t_thumb_arm_pic))
3737 /* non-PIC stubs. */
3738 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3739 /* V5T and above. */
3740 ? arm_stub_long_branch_any_any
3742 : arm_stub_long_branch_v4t_thumb_arm);
3744 /* Handle v4t short branches. */
3745 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3746 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3747 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3748 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3752 else if (r_type == R_ARM_CALL
3753 || r_type == R_ARM_JUMP24
3754 || r_type == R_ARM_PLT32
3755 || r_type == R_ARM_TLS_CALL)
3757 if (branch_type == ST_BRANCH_TO_THUMB)
3762 && sym_sec->owner != NULL
3763 && !INTERWORK_FLAG (sym_sec->owner))
3765 (*_bfd_error_handler)
3766 (_("%B(%s): warning: interworking not enabled.\n"
3767 " first occurrence: %B: ARM call to Thumb"),
3768 sym_sec->owner, input_bfd, name);
3771 /* We have an extra 2-bytes reach because of
3772 the mode change (bit 24 (H) of BLX encoding). */
3773 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3774 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3775 || (r_type == R_ARM_CALL && !globals->use_blx)
3776 || (r_type == R_ARM_JUMP24)
3777 || (r_type == R_ARM_PLT32))
3779 stub_type = (info->shared | globals->pic_veneer)
3781 ? ((globals->use_blx)
3782 /* V5T and above. */
3783 ? arm_stub_long_branch_any_thumb_pic
3785 : arm_stub_long_branch_v4t_arm_thumb_pic)
3787 /* non-PIC stubs. */
3788 : ((globals->use_blx)
3789 /* V5T and above. */
3790 ? arm_stub_long_branch_any_any
3792 : arm_stub_long_branch_v4t_arm_thumb);
3798 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3799 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3802 (info->shared | globals->pic_veneer)
3804 ? (r_type == R_ARM_TLS_CALL
3806 ? arm_stub_long_branch_any_tls_pic
3807 : arm_stub_long_branch_any_arm_pic)
3808 /* non-PIC stubs. */
3809 : arm_stub_long_branch_any_any;
3814 /* If a stub is needed, record the actual destination type. */
3815 if (stub_type != arm_stub_none)
3816 *actual_branch_type = branch_type;
3821 /* Build a name for an entry in the stub hash table. */
3824 elf32_arm_stub_name (const asection *input_section,
3825 const asection *sym_sec,
3826 const struct elf32_arm_link_hash_entry *hash,
3827 const Elf_Internal_Rela *rel,
3828 enum elf32_arm_stub_type stub_type)
3835 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
3836 stub_name = (char *) bfd_malloc (len);
3837 if (stub_name != NULL)
3838 sprintf (stub_name, "%08x_%s+%x_%d",
3839 input_section->id & 0xffffffff,
3840 hash->root.root.root.string,
3841 (int) rel->r_addend & 0xffffffff,
3846 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
3847 stub_name = (char *) bfd_malloc (len);
3848 if (stub_name != NULL)
3849 sprintf (stub_name, "%08x_%x:%x+%x_%d",
3850 input_section->id & 0xffffffff,
3851 sym_sec->id & 0xffffffff,
3852 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
3853 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
3854 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3855 (int) rel->r_addend & 0xffffffff,
3862 /* Look up an entry in the stub hash. Stub entries are cached because
3863 creating the stub name takes a bit of time. */
3865 static struct elf32_arm_stub_hash_entry *
3866 elf32_arm_get_stub_entry (const asection *input_section,
3867 const asection *sym_sec,
3868 struct elf_link_hash_entry *hash,
3869 const Elf_Internal_Rela *rel,
3870 struct elf32_arm_link_hash_table *htab,
3871 enum elf32_arm_stub_type stub_type)
3873 struct elf32_arm_stub_hash_entry *stub_entry;
3874 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3875 const asection *id_sec;
3877 if ((input_section->flags & SEC_CODE) == 0)
3880 /* If this input section is part of a group of sections sharing one
3881 stub section, then use the id of the first section in the group.
3882 Stub names need to include a section id, as there may well be
3883 more than one stub used to reach say, printf, and we need to
3884 distinguish between them. */
3885 id_sec = htab->stub_group[input_section->id].link_sec;
3887 if (h != NULL && h->stub_cache != NULL
3888 && h->stub_cache->h == h
3889 && h->stub_cache->id_sec == id_sec
3890 && h->stub_cache->stub_type == stub_type)
3892 stub_entry = h->stub_cache;
3898 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
3899 if (stub_name == NULL)
3902 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3903 stub_name, FALSE, FALSE);
3905 h->stub_cache = stub_entry;
3913 /* Find or create a stub section. Returns a pointer to the stub section, and
3914 the section to which the stub section will be attached (in *LINK_SEC_P).
3915 LINK_SEC_P may be NULL. */
3918 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3919 struct elf32_arm_link_hash_table *htab)
3924 link_sec = htab->stub_group[section->id].link_sec;
3925 BFD_ASSERT (link_sec != NULL);
3926 stub_sec = htab->stub_group[section->id].stub_sec;
3928 if (stub_sec == NULL)
3930 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3931 if (stub_sec == NULL)
3937 namelen = strlen (link_sec->name);
3938 len = namelen + sizeof (STUB_SUFFIX);
3939 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3943 memcpy (s_name, link_sec->name, namelen);
3944 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3945 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3946 if (stub_sec == NULL)
3948 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3950 htab->stub_group[section->id].stub_sec = stub_sec;
3954 *link_sec_p = link_sec;
3959 /* Add a new stub entry to the stub hash. Not all fields of the new
3960 stub entry are initialised. */
3962 static struct elf32_arm_stub_hash_entry *
3963 elf32_arm_add_stub (const char *stub_name,
3965 struct elf32_arm_link_hash_table *htab)
3969 struct elf32_arm_stub_hash_entry *stub_entry;
3971 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3972 if (stub_sec == NULL)
3975 /* Enter this entry into the linker stub hash table. */
3976 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3978 if (stub_entry == NULL)
3980 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3986 stub_entry->stub_sec = stub_sec;
3987 stub_entry->stub_offset = 0;
3988 stub_entry->id_sec = link_sec;
3993 /* Store an Arm insn into an output section not processed by
3994 elf32_arm_write_section. */
3997 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3998 bfd * output_bfd, bfd_vma val, void * ptr)
4000 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4001 bfd_putl32 (val, ptr);
4003 bfd_putb32 (val, ptr);
4006 /* Store a 16-bit Thumb insn into an output section not processed by
4007 elf32_arm_write_section. */
4010 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4011 bfd * output_bfd, bfd_vma val, void * ptr)
4013 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4014 bfd_putl16 (val, ptr);
4016 bfd_putb16 (val, ptr);
4019 /* If it's possible to change R_TYPE to a more efficient access
4020 model, return the new reloc type. */
4023 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4024 struct elf_link_hash_entry *h)
4026 int is_local = (h == NULL);
4028 if (info->shared || (h && h->root.type == bfd_link_hash_undefweak))
4031 /* We do not support relaxations for Old TLS models. */
4034 case R_ARM_TLS_GOTDESC:
4035 case R_ARM_TLS_CALL:
4036 case R_ARM_THM_TLS_CALL:
4037 case R_ARM_TLS_DESCSEQ:
4038 case R_ARM_THM_TLS_DESCSEQ:
4039 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4045 static bfd_reloc_status_type elf32_arm_final_link_relocate
4046 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4047 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4048 const char *, unsigned char, enum arm_st_branch_type,
4049 struct elf_link_hash_entry *, bfd_boolean *, char **);
4052 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4056 case arm_stub_a8_veneer_b_cond:
4057 case arm_stub_a8_veneer_b:
4058 case arm_stub_a8_veneer_bl:
4061 case arm_stub_long_branch_any_any:
4062 case arm_stub_long_branch_v4t_arm_thumb:
4063 case arm_stub_long_branch_thumb_only:
4064 case arm_stub_long_branch_v4t_thumb_thumb:
4065 case arm_stub_long_branch_v4t_thumb_arm:
4066 case arm_stub_short_branch_v4t_thumb_arm:
4067 case arm_stub_long_branch_any_arm_pic:
4068 case arm_stub_long_branch_any_thumb_pic:
4069 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4070 case arm_stub_long_branch_v4t_arm_thumb_pic:
4071 case arm_stub_long_branch_v4t_thumb_arm_pic:
4072 case arm_stub_long_branch_thumb_only_pic:
4073 case arm_stub_long_branch_any_tls_pic:
4074 case arm_stub_long_branch_v4t_thumb_tls_pic:
4075 case arm_stub_a8_veneer_blx:
4079 abort (); /* Should be unreachable. */
4084 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4088 struct elf32_arm_stub_hash_entry *stub_entry;
4089 struct elf32_arm_link_hash_table *globals;
4090 struct bfd_link_info *info;
4097 const insn_sequence *template_sequence;
4099 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4100 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4103 /* Massage our args to the form they really have. */
4104 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4105 info = (struct bfd_link_info *) in_arg;
4107 globals = elf32_arm_hash_table (info);
4108 if (globals == NULL)
4111 stub_sec = stub_entry->stub_sec;
4113 if ((globals->fix_cortex_a8 < 0)
4114 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4115 /* We have to do less-strictly-aligned fixes last. */
4118 /* Make a note of the offset within the stubs for this entry. */
4119 stub_entry->stub_offset = stub_sec->size;
4120 loc = stub_sec->contents + stub_entry->stub_offset;
4122 stub_bfd = stub_sec->owner;
4124 /* This is the address of the stub destination. */
4125 sym_value = (stub_entry->target_value
4126 + stub_entry->target_section->output_offset
4127 + stub_entry->target_section->output_section->vma);
4129 template_sequence = stub_entry->stub_template;
4130 template_size = stub_entry->stub_template_size;
4133 for (i = 0; i < template_size; i++)
4135 switch (template_sequence[i].type)
4139 bfd_vma data = (bfd_vma) template_sequence[i].data;
4140 if (template_sequence[i].reloc_addend != 0)
4142 /* We've borrowed the reloc_addend field to mean we should
4143 insert a condition code into this (Thumb-1 branch)
4144 instruction. See THUMB16_BCOND_INSN. */
4145 BFD_ASSERT ((data & 0xff00) == 0xd000);
4146 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4148 bfd_put_16 (stub_bfd, data, loc + size);
4154 bfd_put_16 (stub_bfd,
4155 (template_sequence[i].data >> 16) & 0xffff,
4157 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4159 if (template_sequence[i].r_type != R_ARM_NONE)
4161 stub_reloc_idx[nrelocs] = i;
4162 stub_reloc_offset[nrelocs++] = size;
4168 bfd_put_32 (stub_bfd, template_sequence[i].data,
4170 /* Handle cases where the target is encoded within the
4172 if (template_sequence[i].r_type == R_ARM_JUMP24)
4174 stub_reloc_idx[nrelocs] = i;
4175 stub_reloc_offset[nrelocs++] = size;
4181 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4182 stub_reloc_idx[nrelocs] = i;
4183 stub_reloc_offset[nrelocs++] = size;
4193 stub_sec->size += size;
4195 /* Stub size has already been computed in arm_size_one_stub. Check
4197 BFD_ASSERT (size == stub_entry->stub_size);
4199 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
4200 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4203 /* Assume there is at least one and at most MAXRELOCS entries to relocate
4205 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4207 for (i = 0; i < nrelocs; i++)
4208 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
4209 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
4210 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
4211 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
4213 Elf_Internal_Rela rel;
4214 bfd_boolean unresolved_reloc;
4215 char *error_message;
4216 enum arm_st_branch_type branch_type
4217 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22
4218 ? ST_BRANCH_TO_THUMB : ST_BRANCH_TO_ARM);
4219 bfd_vma points_to = sym_value + stub_entry->target_addend;
4221 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4222 rel.r_info = ELF32_R_INFO (0,
4223 template_sequence[stub_reloc_idx[i]].r_type);
4224 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
4226 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4227 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4228 template should refer back to the instruction after the original
4230 points_to = sym_value;
4232 /* There may be unintended consequences if this is not true. */
4233 BFD_ASSERT (stub_entry->h == NULL);
4235 /* Note: _bfd_final_link_relocate doesn't handle these relocations
4236 properly. We should probably use this function unconditionally,
4237 rather than only for certain relocations listed in the enclosing
4238 conditional, for the sake of consistency. */
4239 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4240 (template_sequence[stub_reloc_idx[i]].r_type),
4241 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4242 points_to, info, stub_entry->target_section, "", STT_FUNC,
4243 branch_type, (struct elf_link_hash_entry *) stub_entry->h,
4244 &unresolved_reloc, &error_message);
4248 Elf_Internal_Rela rel;
4249 bfd_boolean unresolved_reloc;
4250 char *error_message;
4251 bfd_vma points_to = sym_value + stub_entry->target_addend
4252 + template_sequence[stub_reloc_idx[i]].reloc_addend;
4254 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4255 rel.r_info = ELF32_R_INFO (0,
4256 template_sequence[stub_reloc_idx[i]].r_type);
4259 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4260 (template_sequence[stub_reloc_idx[i]].r_type),
4261 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4262 points_to, info, stub_entry->target_section, "", STT_FUNC,
4263 stub_entry->branch_type,
4264 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4272 /* Calculate the template, template size and instruction size for a stub.
4273 Return value is the instruction size. */
4276 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4277 const insn_sequence **stub_template,
4278 int *stub_template_size)
4280 const insn_sequence *template_sequence = NULL;
4281 int template_size = 0, i;
4284 template_sequence = stub_definitions[stub_type].template_sequence;
4286 *stub_template = template_sequence;
4288 template_size = stub_definitions[stub_type].template_size;
4289 if (stub_template_size)
4290 *stub_template_size = template_size;
4293 for (i = 0; i < template_size; i++)
4295 switch (template_sequence[i].type)
4316 /* As above, but don't actually build the stub. Just bump offset so
4317 we know stub section sizes. */
4320 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4321 void *in_arg ATTRIBUTE_UNUSED)
4323 struct elf32_arm_stub_hash_entry *stub_entry;
4324 const insn_sequence *template_sequence;
4325 int template_size, size;
4327 /* Massage our args to the form they really have. */
4328 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4330 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4331 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4333 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4336 stub_entry->stub_size = size;
4337 stub_entry->stub_template = template_sequence;
4338 stub_entry->stub_template_size = template_size;
4340 size = (size + 7) & ~7;
4341 stub_entry->stub_sec->size += size;
4346 /* External entry points for sizing and building linker stubs. */
4348 /* Set up various things so that we can make a list of input sections
4349 for each output section included in the link. Returns -1 on error,
4350 0 when no stubs will be needed, and 1 on success. */
4353 elf32_arm_setup_section_lists (bfd *output_bfd,
4354 struct bfd_link_info *info)
4357 unsigned int bfd_count;
4358 int top_id, top_index;
4360 asection **input_list, **list;
4362 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4366 if (! is_elf_hash_table (htab))
4369 /* Count the number of input BFDs and find the top input section id. */
4370 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4372 input_bfd = input_bfd->link_next)
4375 for (section = input_bfd->sections;
4377 section = section->next)
4379 if (top_id < section->id)
4380 top_id = section->id;
4383 htab->bfd_count = bfd_count;
4385 amt = sizeof (struct map_stub) * (top_id + 1);
4386 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4387 if (htab->stub_group == NULL)
4389 htab->top_id = top_id;
4391 /* We can't use output_bfd->section_count here to find the top output
4392 section index as some sections may have been removed, and
4393 _bfd_strip_section_from_output doesn't renumber the indices. */
4394 for (section = output_bfd->sections, top_index = 0;
4396 section = section->next)
4398 if (top_index < section->index)
4399 top_index = section->index;
4402 htab->top_index = top_index;
4403 amt = sizeof (asection *) * (top_index + 1);
4404 input_list = (asection **) bfd_malloc (amt);
4405 htab->input_list = input_list;
4406 if (input_list == NULL)
4409 /* For sections we aren't interested in, mark their entries with a
4410 value we can check later. */
4411 list = input_list + top_index;
4413 *list = bfd_abs_section_ptr;
4414 while (list-- != input_list);
4416 for (section = output_bfd->sections;
4418 section = section->next)
4420 if ((section->flags & SEC_CODE) != 0)
4421 input_list[section->index] = NULL;
4427 /* The linker repeatedly calls this function for each input section,
4428 in the order that input sections are linked into output sections.
4429 Build lists of input sections to determine groupings between which
4430 we may insert linker stubs. */
4433 elf32_arm_next_input_section (struct bfd_link_info *info,
4436 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4441 if (isec->output_section->index <= htab->top_index)
4443 asection **list = htab->input_list + isec->output_section->index;
4445 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4447 /* Steal the link_sec pointer for our list. */
4448 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4449 /* This happens to make the list in reverse order,
4450 which we reverse later. */
4451 PREV_SEC (isec) = *list;
4457 /* See whether we can group stub sections together. Grouping stub
4458 sections may result in fewer stubs. More importantly, we need to
4459 put all .init* and .fini* stubs at the end of the .init or
4460 .fini output sections respectively, because glibc splits the
4461 _init and _fini functions into multiple parts. Putting a stub in
4462 the middle of a function is not a good idea. */
4465 group_sections (struct elf32_arm_link_hash_table *htab,
4466 bfd_size_type stub_group_size,
4467 bfd_boolean stubs_always_after_branch)
4469 asection **list = htab->input_list;
4473 asection *tail = *list;
4476 if (tail == bfd_abs_section_ptr)
4479 /* Reverse the list: we must avoid placing stubs at the
4480 beginning of the section because the beginning of the text
4481 section may be required for an interrupt vector in bare metal
4483 #define NEXT_SEC PREV_SEC
4485 while (tail != NULL)
4487 /* Pop from tail. */
4488 asection *item = tail;
4489 tail = PREV_SEC (item);
4492 NEXT_SEC (item) = head;
4496 while (head != NULL)
4500 bfd_vma stub_group_start = head->output_offset;
4501 bfd_vma end_of_next;
4504 while (NEXT_SEC (curr) != NULL)
4506 next = NEXT_SEC (curr);
4507 end_of_next = next->output_offset + next->size;
4508 if (end_of_next - stub_group_start >= stub_group_size)
4509 /* End of NEXT is too far from start, so stop. */
4511 /* Add NEXT to the group. */
4515 /* OK, the size from the start to the start of CURR is less
4516 than stub_group_size and thus can be handled by one stub
4517 section. (Or the head section is itself larger than
4518 stub_group_size, in which case we may be toast.)
4519 We should really be keeping track of the total size of
4520 stubs added here, as stubs contribute to the final output
4524 next = NEXT_SEC (head);
4525 /* Set up this stub group. */
4526 htab->stub_group[head->id].link_sec = curr;
4528 while (head != curr && (head = next) != NULL);
4530 /* But wait, there's more! Input sections up to stub_group_size
4531 bytes after the stub section can be handled by it too. */
4532 if (!stubs_always_after_branch)
4534 stub_group_start = curr->output_offset + curr->size;
4536 while (next != NULL)
4538 end_of_next = next->output_offset + next->size;
4539 if (end_of_next - stub_group_start >= stub_group_size)
4540 /* End of NEXT is too far from stubs, so stop. */
4542 /* Add NEXT to the stub group. */
4544 next = NEXT_SEC (head);
4545 htab->stub_group[head->id].link_sec = curr;
4551 while (list++ != htab->input_list + htab->top_index);
4553 free (htab->input_list);
4558 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4562 a8_reloc_compare (const void *a, const void *b)
4564 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4565 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4567 if (ra->from < rb->from)
4569 else if (ra->from > rb->from)
4575 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4576 const char *, char **);
4578 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4579 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
4580 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
4584 cortex_a8_erratum_scan (bfd *input_bfd,
4585 struct bfd_link_info *info,
4586 struct a8_erratum_fix **a8_fixes_p,
4587 unsigned int *num_a8_fixes_p,
4588 unsigned int *a8_fix_table_size_p,
4589 struct a8_erratum_reloc *a8_relocs,
4590 unsigned int num_a8_relocs,
4591 unsigned prev_num_a8_fixes,
4592 bfd_boolean *stub_changed_p)
4595 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4596 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4597 unsigned int num_a8_fixes = *num_a8_fixes_p;
4598 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4603 for (section = input_bfd->sections;
4605 section = section->next)
4607 bfd_byte *contents = NULL;
4608 struct _arm_elf_section_data *sec_data;
4612 if (elf_section_type (section) != SHT_PROGBITS
4613 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4614 || (section->flags & SEC_EXCLUDE) != 0
4615 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4616 || (section->output_section == bfd_abs_section_ptr))
4619 base_vma = section->output_section->vma + section->output_offset;
4621 if (elf_section_data (section)->this_hdr.contents != NULL)
4622 contents = elf_section_data (section)->this_hdr.contents;
4623 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4626 sec_data = elf32_arm_section_data (section);
4628 for (span = 0; span < sec_data->mapcount; span++)
4630 unsigned int span_start = sec_data->map[span].vma;
4631 unsigned int span_end = (span == sec_data->mapcount - 1)
4632 ? section->size : sec_data->map[span + 1].vma;
4634 char span_type = sec_data->map[span].type;
4635 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
4637 if (span_type != 't')
4640 /* Span is entirely within a single 4KB region: skip scanning. */
4641 if (((base_vma + span_start) & ~0xfff)
4642 == ((base_vma + span_end) & ~0xfff))
4645 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4647 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4648 * The branch target is in the same 4KB region as the
4649 first half of the branch.
4650 * The instruction before the branch is a 32-bit
4651 length non-branch instruction. */
4652 for (i = span_start; i < span_end;)
4654 unsigned int insn = bfd_getl16 (&contents[i]);
4655 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4656 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4658 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4663 /* Load the rest of the insn (in manual-friendly order). */
4664 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4666 /* Encoding T4: B<c>.W. */
4667 is_b = (insn & 0xf800d000) == 0xf0009000;
4668 /* Encoding T1: BL<c>.W. */
4669 is_bl = (insn & 0xf800d000) == 0xf000d000;
4670 /* Encoding T2: BLX<c>.W. */
4671 is_blx = (insn & 0xf800d000) == 0xf000c000;
4672 /* Encoding T3: B<c>.W (not permitted in IT block). */
4673 is_bcc = (insn & 0xf800d000) == 0xf0008000
4674 && (insn & 0x07f00000) != 0x03800000;
4677 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4679 if (((base_vma + i) & 0xfff) == 0xffe
4683 && ! last_was_branch)
4685 bfd_signed_vma offset = 0;
4686 bfd_boolean force_target_arm = FALSE;
4687 bfd_boolean force_target_thumb = FALSE;
4689 enum elf32_arm_stub_type stub_type = arm_stub_none;
4690 struct a8_erratum_reloc key, *found;
4691 bfd_boolean use_plt = FALSE;
4693 key.from = base_vma + i;
4694 found = (struct a8_erratum_reloc *)
4695 bsearch (&key, a8_relocs, num_a8_relocs,
4696 sizeof (struct a8_erratum_reloc),
4701 char *error_message = NULL;
4702 struct elf_link_hash_entry *entry;
4704 /* We don't care about the error returned from this
4705 function, only if there is glue or not. */
4706 entry = find_thumb_glue (info, found->sym_name,
4710 found->non_a8_stub = TRUE;
4712 /* Keep a simpler condition, for the sake of clarity. */
4713 if (htab->root.splt != NULL && found->hash != NULL
4714 && found->hash->root.plt.offset != (bfd_vma) -1)
4717 if (found->r_type == R_ARM_THM_CALL)
4719 if (found->branch_type == ST_BRANCH_TO_ARM
4721 force_target_arm = TRUE;
4723 force_target_thumb = TRUE;
4727 /* Check if we have an offending branch instruction. */
4729 if (found && found->non_a8_stub)
4730 /* We've already made a stub for this instruction, e.g.
4731 it's a long branch or a Thumb->ARM stub. Assume that
4732 stub will suffice to work around the A8 erratum (see
4733 setting of always_after_branch above). */
4737 offset = (insn & 0x7ff) << 1;
4738 offset |= (insn & 0x3f0000) >> 4;
4739 offset |= (insn & 0x2000) ? 0x40000 : 0;
4740 offset |= (insn & 0x800) ? 0x80000 : 0;
4741 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4742 if (offset & 0x100000)
4743 offset |= ~ ((bfd_signed_vma) 0xfffff);
4744 stub_type = arm_stub_a8_veneer_b_cond;
4746 else if (is_b || is_bl || is_blx)
4748 int s = (insn & 0x4000000) != 0;
4749 int j1 = (insn & 0x2000) != 0;
4750 int j2 = (insn & 0x800) != 0;
4754 offset = (insn & 0x7ff) << 1;
4755 offset |= (insn & 0x3ff0000) >> 4;
4759 if (offset & 0x1000000)
4760 offset |= ~ ((bfd_signed_vma) 0xffffff);
4763 offset &= ~ ((bfd_signed_vma) 3);
4765 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4766 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4769 if (stub_type != arm_stub_none)
4771 bfd_vma pc_for_insn = base_vma + i + 4;
4773 /* The original instruction is a BL, but the target is
4774 an ARM instruction. If we were not making a stub,
4775 the BL would have been converted to a BLX. Use the
4776 BLX stub instead in that case. */
4777 if (htab->use_blx && force_target_arm
4778 && stub_type == arm_stub_a8_veneer_bl)
4780 stub_type = arm_stub_a8_veneer_blx;
4784 /* Conversely, if the original instruction was
4785 BLX but the target is Thumb mode, use the BL
4787 else if (force_target_thumb
4788 && stub_type == arm_stub_a8_veneer_blx)
4790 stub_type = arm_stub_a8_veneer_bl;
4796 pc_for_insn &= ~ ((bfd_vma) 3);
4798 /* If we found a relocation, use the proper destination,
4799 not the offset in the (unrelocated) instruction.
4800 Note this is always done if we switched the stub type
4804 (bfd_signed_vma) (found->destination - pc_for_insn);
4806 /* If the stub will use a Thumb-mode branch to a
4807 PLT target, redirect it to the preceding Thumb
4809 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
4810 offset -= PLT_THUMB_STUB_SIZE;
4812 target = pc_for_insn + offset;
4814 /* The BLX stub is ARM-mode code. Adjust the offset to
4815 take the different PC value (+8 instead of +4) into
4817 if (stub_type == arm_stub_a8_veneer_blx)
4820 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4822 char *stub_name = NULL;
4824 if (num_a8_fixes == a8_fix_table_size)
4826 a8_fix_table_size *= 2;
4827 a8_fixes = (struct a8_erratum_fix *)
4828 bfd_realloc (a8_fixes,
4829 sizeof (struct a8_erratum_fix)
4830 * a8_fix_table_size);
4833 if (num_a8_fixes < prev_num_a8_fixes)
4835 /* If we're doing a subsequent scan,
4836 check if we've found the same fix as
4837 before, and try and reuse the stub
4839 stub_name = a8_fixes[num_a8_fixes].stub_name;
4840 if ((a8_fixes[num_a8_fixes].section != section)
4841 || (a8_fixes[num_a8_fixes].offset != i))
4845 *stub_changed_p = TRUE;
4851 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4852 if (stub_name != NULL)
4853 sprintf (stub_name, "%x:%x", section->id, i);
4856 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4857 a8_fixes[num_a8_fixes].section = section;
4858 a8_fixes[num_a8_fixes].offset = i;
4859 a8_fixes[num_a8_fixes].addend = offset;
4860 a8_fixes[num_a8_fixes].orig_insn = insn;
4861 a8_fixes[num_a8_fixes].stub_name = stub_name;
4862 a8_fixes[num_a8_fixes].stub_type = stub_type;
4863 a8_fixes[num_a8_fixes].branch_type =
4864 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
4871 i += insn_32bit ? 4 : 2;
4872 last_was_32bit = insn_32bit;
4873 last_was_branch = is_32bit_branch;
4877 if (elf_section_data (section)->this_hdr.contents == NULL)
4881 *a8_fixes_p = a8_fixes;
4882 *num_a8_fixes_p = num_a8_fixes;
4883 *a8_fix_table_size_p = a8_fix_table_size;
4888 /* Determine and set the size of the stub section for a final link.
4890 The basic idea here is to examine all the relocations looking for
4891 PC-relative calls to a target that is unreachable with a "bl"
4895 elf32_arm_size_stubs (bfd *output_bfd,
4897 struct bfd_link_info *info,
4898 bfd_signed_vma group_size,
4899 asection * (*add_stub_section) (const char *, asection *),
4900 void (*layout_sections_again) (void))
4902 bfd_size_type stub_group_size;
4903 bfd_boolean stubs_always_after_branch;
4904 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4905 struct a8_erratum_fix *a8_fixes = NULL;
4906 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4907 struct a8_erratum_reloc *a8_relocs = NULL;
4908 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4913 if (htab->fix_cortex_a8)
4915 a8_fixes = (struct a8_erratum_fix *)
4916 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4917 a8_relocs = (struct a8_erratum_reloc *)
4918 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4921 /* Propagate mach to stub bfd, because it may not have been
4922 finalized when we created stub_bfd. */
4923 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4924 bfd_get_mach (output_bfd));
4926 /* Stash our params away. */
4927 htab->stub_bfd = stub_bfd;
4928 htab->add_stub_section = add_stub_section;
4929 htab->layout_sections_again = layout_sections_again;
4930 stubs_always_after_branch = group_size < 0;
4932 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4933 as the first half of a 32-bit branch straddling two 4K pages. This is a
4934 crude way of enforcing that. */
4935 if (htab->fix_cortex_a8)
4936 stubs_always_after_branch = 1;
4939 stub_group_size = -group_size;
4941 stub_group_size = group_size;
4943 if (stub_group_size == 1)
4945 /* Default values. */
4946 /* Thumb branch range is +-4MB has to be used as the default
4947 maximum size (a given section can contain both ARM and Thumb
4948 code, so the worst case has to be taken into account).
4950 This value is 24K less than that, which allows for 2025
4951 12-byte stubs. If we exceed that, then we will fail to link.
4952 The user will have to relink with an explicit group size
4954 stub_group_size = 4170000;
4957 group_sections (htab, stub_group_size, stubs_always_after_branch);
4959 /* If we're applying the cortex A8 fix, we need to determine the
4960 program header size now, because we cannot change it later --
4961 that could alter section placements. Notice the A8 erratum fix
4962 ends up requiring the section addresses to remain unchanged
4963 modulo the page size. That's something we cannot represent
4964 inside BFD, and we don't want to force the section alignment to
4965 be the page size. */
4966 if (htab->fix_cortex_a8)
4967 (*htab->layout_sections_again) ();
4972 unsigned int bfd_indx;
4974 bfd_boolean stub_changed = FALSE;
4975 unsigned prev_num_a8_fixes = num_a8_fixes;
4978 for (input_bfd = info->input_bfds, bfd_indx = 0;
4980 input_bfd = input_bfd->link_next, bfd_indx++)
4982 Elf_Internal_Shdr *symtab_hdr;
4984 Elf_Internal_Sym *local_syms = NULL;
4988 /* We'll need the symbol table in a second. */
4989 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4990 if (symtab_hdr->sh_info == 0)
4993 /* Walk over each section attached to the input bfd. */
4994 for (section = input_bfd->sections;
4996 section = section->next)
4998 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5000 /* If there aren't any relocs, then there's nothing more
5002 if ((section->flags & SEC_RELOC) == 0
5003 || section->reloc_count == 0
5004 || (section->flags & SEC_CODE) == 0)
5007 /* If this section is a link-once section that will be
5008 discarded, then don't create any stubs. */
5009 if (section->output_section == NULL
5010 || section->output_section->owner != output_bfd)
5013 /* Get the relocs. */
5015 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5016 NULL, info->keep_memory);
5017 if (internal_relocs == NULL)
5018 goto error_ret_free_local;
5020 /* Now examine each relocation. */
5021 irela = internal_relocs;
5022 irelaend = irela + section->reloc_count;
5023 for (; irela < irelaend; irela++)
5025 unsigned int r_type, r_indx;
5026 enum elf32_arm_stub_type stub_type;
5027 struct elf32_arm_stub_hash_entry *stub_entry;
5030 bfd_vma destination;
5031 struct elf32_arm_link_hash_entry *hash;
5032 const char *sym_name;
5034 const asection *id_sec;
5035 unsigned char st_type;
5036 enum arm_st_branch_type branch_type;
5037 bfd_boolean created_stub = FALSE;
5039 r_type = ELF32_R_TYPE (irela->r_info);
5040 r_indx = ELF32_R_SYM (irela->r_info);
5042 if (r_type >= (unsigned int) R_ARM_max)
5044 bfd_set_error (bfd_error_bad_value);
5045 error_ret_free_internal:
5046 if (elf_section_data (section)->relocs == NULL)
5047 free (internal_relocs);
5048 goto error_ret_free_local;
5052 if (r_indx >= symtab_hdr->sh_info)
5053 hash = elf32_arm_hash_entry
5054 (elf_sym_hashes (input_bfd)
5055 [r_indx - symtab_hdr->sh_info]);
5057 /* Only look for stubs on branch instructions, or
5058 non-relaxed TLSCALL */
5059 if ((r_type != (unsigned int) R_ARM_CALL)
5060 && (r_type != (unsigned int) R_ARM_THM_CALL)
5061 && (r_type != (unsigned int) R_ARM_JUMP24)
5062 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5063 && (r_type != (unsigned int) R_ARM_THM_XPC22)
5064 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5065 && (r_type != (unsigned int) R_ARM_PLT32)
5066 && !((r_type == (unsigned int) R_ARM_TLS_CALL
5067 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5068 && r_type == elf32_arm_tls_transition
5069 (info, r_type, &hash->root)
5070 && ((hash ? hash->tls_type
5071 : (elf32_arm_local_got_tls_type
5072 (input_bfd)[r_indx]))
5073 & GOT_TLS_GDESC) != 0))
5076 /* Now determine the call target, its name, value,
5083 if (r_type == (unsigned int) R_ARM_TLS_CALL
5084 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5086 /* A non-relaxed TLS call. The target is the
5087 plt-resident trampoline and nothing to do
5089 BFD_ASSERT (htab->tls_trampoline > 0);
5090 sym_sec = htab->root.splt;
5091 sym_value = htab->tls_trampoline;
5094 branch_type = ST_BRANCH_TO_ARM;
5098 /* It's a local symbol. */
5099 Elf_Internal_Sym *sym;
5101 if (local_syms == NULL)
5104 = (Elf_Internal_Sym *) symtab_hdr->contents;
5105 if (local_syms == NULL)
5107 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5108 symtab_hdr->sh_info, 0,
5110 if (local_syms == NULL)
5111 goto error_ret_free_internal;
5114 sym = local_syms + r_indx;
5115 if (sym->st_shndx == SHN_UNDEF)
5116 sym_sec = bfd_und_section_ptr;
5117 else if (sym->st_shndx == SHN_ABS)
5118 sym_sec = bfd_abs_section_ptr;
5119 else if (sym->st_shndx == SHN_COMMON)
5120 sym_sec = bfd_com_section_ptr;
5123 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5126 /* This is an undefined symbol. It can never
5130 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5131 sym_value = sym->st_value;
5132 destination = (sym_value + irela->r_addend
5133 + sym_sec->output_offset
5134 + sym_sec->output_section->vma);
5135 st_type = ELF_ST_TYPE (sym->st_info);
5136 branch_type = ARM_SYM_BRANCH_TYPE (sym);
5138 = bfd_elf_string_from_elf_section (input_bfd,
5139 symtab_hdr->sh_link,
5144 /* It's an external symbol. */
5145 while (hash->root.root.type == bfd_link_hash_indirect
5146 || hash->root.root.type == bfd_link_hash_warning)
5147 hash = ((struct elf32_arm_link_hash_entry *)
5148 hash->root.root.u.i.link);
5150 if (hash->root.root.type == bfd_link_hash_defined
5151 || hash->root.root.type == bfd_link_hash_defweak)
5153 sym_sec = hash->root.root.u.def.section;
5154 sym_value = hash->root.root.u.def.value;
5156 struct elf32_arm_link_hash_table *globals =
5157 elf32_arm_hash_table (info);
5159 /* For a destination in a shared library,
5160 use the PLT stub as target address to
5161 decide whether a branch stub is
5164 && globals->root.splt != NULL
5166 && hash->root.plt.offset != (bfd_vma) -1)
5168 sym_sec = globals->root.splt;
5169 sym_value = hash->root.plt.offset;
5170 if (sym_sec->output_section != NULL)
5171 destination = (sym_value
5172 + sym_sec->output_offset
5173 + sym_sec->output_section->vma);
5175 else if (sym_sec->output_section != NULL)
5176 destination = (sym_value + irela->r_addend
5177 + sym_sec->output_offset
5178 + sym_sec->output_section->vma);
5180 else if ((hash->root.root.type == bfd_link_hash_undefined)
5181 || (hash->root.root.type == bfd_link_hash_undefweak))
5183 /* For a shared library, use the PLT stub as
5184 target address to decide whether a long
5185 branch stub is needed.
5186 For absolute code, they cannot be handled. */
5187 struct elf32_arm_link_hash_table *globals =
5188 elf32_arm_hash_table (info);
5191 && globals->root.splt != NULL
5193 && hash->root.plt.offset != (bfd_vma) -1)
5195 sym_sec = globals->root.splt;
5196 sym_value = hash->root.plt.offset;
5197 if (sym_sec->output_section != NULL)
5198 destination = (sym_value
5199 + sym_sec->output_offset
5200 + sym_sec->output_section->vma);
5207 bfd_set_error (bfd_error_bad_value);
5208 goto error_ret_free_internal;
5210 st_type = hash->root.type;
5211 branch_type = hash->root.target_internal;
5212 sym_name = hash->root.root.root.string;
5217 /* Determine what (if any) linker stub is needed. */
5218 stub_type = arm_type_of_stub (info, section, irela,
5219 st_type, &branch_type,
5220 hash, destination, sym_sec,
5221 input_bfd, sym_name);
5222 if (stub_type == arm_stub_none)
5225 /* Support for grouping stub sections. */
5226 id_sec = htab->stub_group[section->id].link_sec;
5228 /* Get the name of this stub. */
5229 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
5232 goto error_ret_free_internal;
5234 /* We've either created a stub for this reloc already,
5235 or we are about to. */
5236 created_stub = TRUE;
5238 stub_entry = arm_stub_hash_lookup
5239 (&htab->stub_hash_table, stub_name,
5241 if (stub_entry != NULL)
5243 /* The proper stub has already been created. */
5245 stub_entry->target_value = sym_value;
5249 stub_entry = elf32_arm_add_stub (stub_name, section,
5251 if (stub_entry == NULL)
5254 goto error_ret_free_internal;
5257 stub_entry->target_value = sym_value;
5258 stub_entry->target_section = sym_sec;
5259 stub_entry->stub_type = stub_type;
5260 stub_entry->h = hash;
5261 stub_entry->branch_type = branch_type;
5263 if (sym_name == NULL)
5264 sym_name = "unnamed";
5265 stub_entry->output_name = (char *)
5266 bfd_alloc (htab->stub_bfd,
5267 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5268 + strlen (sym_name));
5269 if (stub_entry->output_name == NULL)
5272 goto error_ret_free_internal;
5275 /* For historical reasons, use the existing names for
5276 ARM-to-Thumb and Thumb-to-ARM stubs. */
5277 if ((r_type == (unsigned int) R_ARM_THM_CALL
5278 || r_type == (unsigned int) R_ARM_THM_JUMP24)
5279 && branch_type == ST_BRANCH_TO_ARM)
5280 sprintf (stub_entry->output_name,
5281 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5282 else if ((r_type == (unsigned int) R_ARM_CALL
5283 || r_type == (unsigned int) R_ARM_JUMP24)
5284 && branch_type == ST_BRANCH_TO_THUMB)
5285 sprintf (stub_entry->output_name,
5286 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5288 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
5291 stub_changed = TRUE;
5295 /* Look for relocations which might trigger Cortex-A8
5297 if (htab->fix_cortex_a8
5298 && (r_type == (unsigned int) R_ARM_THM_JUMP24
5299 || r_type == (unsigned int) R_ARM_THM_JUMP19
5300 || r_type == (unsigned int) R_ARM_THM_CALL
5301 || r_type == (unsigned int) R_ARM_THM_XPC22))
5303 bfd_vma from = section->output_section->vma
5304 + section->output_offset
5307 if ((from & 0xfff) == 0xffe)
5309 /* Found a candidate. Note we haven't checked the
5310 destination is within 4K here: if we do so (and
5311 don't create an entry in a8_relocs) we can't tell
5312 that a branch should have been relocated when
5314 if (num_a8_relocs == a8_reloc_table_size)
5316 a8_reloc_table_size *= 2;
5317 a8_relocs = (struct a8_erratum_reloc *)
5318 bfd_realloc (a8_relocs,
5319 sizeof (struct a8_erratum_reloc)
5320 * a8_reloc_table_size);
5323 a8_relocs[num_a8_relocs].from = from;
5324 a8_relocs[num_a8_relocs].destination = destination;
5325 a8_relocs[num_a8_relocs].r_type = r_type;
5326 a8_relocs[num_a8_relocs].branch_type = branch_type;
5327 a8_relocs[num_a8_relocs].sym_name = sym_name;
5328 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5329 a8_relocs[num_a8_relocs].hash = hash;
5336 /* We're done with the internal relocs, free them. */
5337 if (elf_section_data (section)->relocs == NULL)
5338 free (internal_relocs);
5341 if (htab->fix_cortex_a8)
5343 /* Sort relocs which might apply to Cortex-A8 erratum. */
5344 qsort (a8_relocs, num_a8_relocs,
5345 sizeof (struct a8_erratum_reloc),
5348 /* Scan for branches which might trigger Cortex-A8 erratum. */
5349 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5350 &num_a8_fixes, &a8_fix_table_size,
5351 a8_relocs, num_a8_relocs,
5352 prev_num_a8_fixes, &stub_changed)
5354 goto error_ret_free_local;
5358 if (prev_num_a8_fixes != num_a8_fixes)
5359 stub_changed = TRUE;
5364 /* OK, we've added some stubs. Find out the new size of the
5366 for (stub_sec = htab->stub_bfd->sections;
5368 stub_sec = stub_sec->next)
5370 /* Ignore non-stub sections. */
5371 if (!strstr (stub_sec->name, STUB_SUFFIX))
5377 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5379 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
5380 if (htab->fix_cortex_a8)
5381 for (i = 0; i < num_a8_fixes; i++)
5383 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5384 a8_fixes[i].section, htab);
5386 if (stub_sec == NULL)
5387 goto error_ret_free_local;
5390 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5395 /* Ask the linker to do its stuff. */
5396 (*htab->layout_sections_again) ();
5399 /* Add stubs for Cortex-A8 erratum fixes now. */
5400 if (htab->fix_cortex_a8)
5402 for (i = 0; i < num_a8_fixes; i++)
5404 struct elf32_arm_stub_hash_entry *stub_entry;
5405 char *stub_name = a8_fixes[i].stub_name;
5406 asection *section = a8_fixes[i].section;
5407 unsigned int section_id = a8_fixes[i].section->id;
5408 asection *link_sec = htab->stub_group[section_id].link_sec;
5409 asection *stub_sec = htab->stub_group[section_id].stub_sec;
5410 const insn_sequence *template_sequence;
5411 int template_size, size = 0;
5413 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5415 if (stub_entry == NULL)
5417 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5423 stub_entry->stub_sec = stub_sec;
5424 stub_entry->stub_offset = 0;
5425 stub_entry->id_sec = link_sec;
5426 stub_entry->stub_type = a8_fixes[i].stub_type;
5427 stub_entry->target_section = a8_fixes[i].section;
5428 stub_entry->target_value = a8_fixes[i].offset;
5429 stub_entry->target_addend = a8_fixes[i].addend;
5430 stub_entry->orig_insn = a8_fixes[i].orig_insn;
5431 stub_entry->branch_type = a8_fixes[i].branch_type;
5433 size = find_stub_size_and_template (a8_fixes[i].stub_type,
5437 stub_entry->stub_size = size;
5438 stub_entry->stub_template = template_sequence;
5439 stub_entry->stub_template_size = template_size;
5442 /* Stash the Cortex-A8 erratum fix array for use later in
5443 elf32_arm_write_section(). */
5444 htab->a8_erratum_fixes = a8_fixes;
5445 htab->num_a8_erratum_fixes = num_a8_fixes;
5449 htab->a8_erratum_fixes = NULL;
5450 htab->num_a8_erratum_fixes = 0;
5454 error_ret_free_local:
5458 /* Build all the stubs associated with the current output file. The
5459 stubs are kept in a hash table attached to the main linker hash
5460 table. We also set up the .plt entries for statically linked PIC
5461 functions here. This function is called via arm_elf_finish in the
5465 elf32_arm_build_stubs (struct bfd_link_info *info)
5468 struct bfd_hash_table *table;
5469 struct elf32_arm_link_hash_table *htab;
5471 htab = elf32_arm_hash_table (info);
5475 for (stub_sec = htab->stub_bfd->sections;
5477 stub_sec = stub_sec->next)
5481 /* Ignore non-stub sections. */
5482 if (!strstr (stub_sec->name, STUB_SUFFIX))
5485 /* Allocate memory to hold the linker stubs. */
5486 size = stub_sec->size;
5487 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5488 if (stub_sec->contents == NULL && size != 0)
5493 /* Build the stubs as directed by the stub hash table. */
5494 table = &htab->stub_hash_table;
5495 bfd_hash_traverse (table, arm_build_one_stub, info);
5496 if (htab->fix_cortex_a8)
5498 /* Place the cortex a8 stubs last. */
5499 htab->fix_cortex_a8 = -1;
5500 bfd_hash_traverse (table, arm_build_one_stub, info);
5506 /* Locate the Thumb encoded calling stub for NAME. */
5508 static struct elf_link_hash_entry *
5509 find_thumb_glue (struct bfd_link_info *link_info,
5511 char **error_message)
5514 struct elf_link_hash_entry *hash;
5515 struct elf32_arm_link_hash_table *hash_table;
5517 /* We need a pointer to the armelf specific hash table. */
5518 hash_table = elf32_arm_hash_table (link_info);
5519 if (hash_table == NULL)
5522 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5523 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
5525 BFD_ASSERT (tmp_name);
5527 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
5529 hash = elf_link_hash_lookup
5530 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5533 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
5534 tmp_name, name) == -1)
5535 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5542 /* Locate the ARM encoded calling stub for NAME. */
5544 static struct elf_link_hash_entry *
5545 find_arm_glue (struct bfd_link_info *link_info,
5547 char **error_message)
5550 struct elf_link_hash_entry *myh;
5551 struct elf32_arm_link_hash_table *hash_table;
5553 /* We need a pointer to the elfarm specific hash table. */
5554 hash_table = elf32_arm_hash_table (link_info);
5555 if (hash_table == NULL)
5558 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5559 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5561 BFD_ASSERT (tmp_name);
5563 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5565 myh = elf_link_hash_lookup
5566 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5569 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
5570 tmp_name, name) == -1)
5571 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
5578 /* ARM->Thumb glue (static images):
5582 ldr r12, __func_addr
5585 .word func @ behave as if you saw a ARM_32 reloc.
5592 .word func @ behave as if you saw a ARM_32 reloc.
5594 (relocatable images)
5597 ldr r12, __func_offset
5603 #define ARM2THUMB_STATIC_GLUE_SIZE 12
5604 static const insn32 a2t1_ldr_insn = 0xe59fc000;
5605 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
5606 static const insn32 a2t3_func_addr_insn = 0x00000001;
5608 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
5609 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
5610 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
5612 #define ARM2THUMB_PIC_GLUE_SIZE 16
5613 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
5614 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
5615 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
5617 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
5621 __func_from_thumb: __func_from_thumb:
5623 nop ldr r6, __func_addr
5633 #define THUMB2ARM_GLUE_SIZE 8
5634 static const insn16 t2a1_bx_pc_insn = 0x4778;
5635 static const insn16 t2a2_noop_insn = 0x46c0;
5636 static const insn32 t2a3_b_insn = 0xea000000;
5638 #define VFP11_ERRATUM_VENEER_SIZE 8
5640 #define ARM_BX_VENEER_SIZE 12
5641 static const insn32 armbx1_tst_insn = 0xe3100001;
5642 static const insn32 armbx2_moveq_insn = 0x01a0f000;
5643 static const insn32 armbx3_bx_insn = 0xe12fff10;
5645 #ifndef ELFARM_NABI_C_INCLUDED
5647 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
5650 bfd_byte * contents;
5654 /* Do not include empty glue sections in the output. */
5657 s = bfd_get_section_by_name (abfd, name);
5659 s->flags |= SEC_EXCLUDE;
5664 BFD_ASSERT (abfd != NULL);
5666 s = bfd_get_section_by_name (abfd, name);
5667 BFD_ASSERT (s != NULL);
5669 contents = (bfd_byte *) bfd_alloc (abfd, size);
5671 BFD_ASSERT (s->size == size);
5672 s->contents = contents;
5676 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
5678 struct elf32_arm_link_hash_table * globals;
5680 globals = elf32_arm_hash_table (info);
5681 BFD_ASSERT (globals != NULL);
5683 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5684 globals->arm_glue_size,
5685 ARM2THUMB_GLUE_SECTION_NAME);
5687 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5688 globals->thumb_glue_size,
5689 THUMB2ARM_GLUE_SECTION_NAME);
5691 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5692 globals->vfp11_erratum_glue_size,
5693 VFP11_ERRATUM_VENEER_SECTION_NAME);
5695 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5696 globals->bx_glue_size,
5697 ARM_BX_GLUE_SECTION_NAME);
5702 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5703 returns the symbol identifying the stub. */
5705 static struct elf_link_hash_entry *
5706 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5707 struct elf_link_hash_entry * h)
5709 const char * name = h->root.root.string;
5712 struct elf_link_hash_entry * myh;
5713 struct bfd_link_hash_entry * bh;
5714 struct elf32_arm_link_hash_table * globals;
5718 globals = elf32_arm_hash_table (link_info);
5719 BFD_ASSERT (globals != NULL);
5720 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5722 s = bfd_get_section_by_name
5723 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5725 BFD_ASSERT (s != NULL);
5727 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5728 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5730 BFD_ASSERT (tmp_name);
5732 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5734 myh = elf_link_hash_lookup
5735 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5739 /* We've already seen this guy. */
5744 /* The only trick here is using hash_table->arm_glue_size as the value.
5745 Even though the section isn't allocated yet, this is where we will be
5746 putting it. The +1 on the value marks that the stub has not been
5747 output yet - not that it is a Thumb function. */
5749 val = globals->arm_glue_size + 1;
5750 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5751 tmp_name, BSF_GLOBAL, s, val,
5752 NULL, TRUE, FALSE, &bh);
5754 myh = (struct elf_link_hash_entry *) bh;
5755 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5756 myh->forced_local = 1;
5760 if (link_info->shared || globals->root.is_relocatable_executable
5761 || globals->pic_veneer)
5762 size = ARM2THUMB_PIC_GLUE_SIZE;
5763 else if (globals->use_blx)
5764 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5766 size = ARM2THUMB_STATIC_GLUE_SIZE;
5769 globals->arm_glue_size += size;
5774 /* Allocate space for ARMv4 BX veneers. */
5777 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5780 struct elf32_arm_link_hash_table *globals;
5782 struct elf_link_hash_entry *myh;
5783 struct bfd_link_hash_entry *bh;
5786 /* BX PC does not need a veneer. */
5790 globals = elf32_arm_hash_table (link_info);
5791 BFD_ASSERT (globals != NULL);
5792 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5794 /* Check if this veneer has already been allocated. */
5795 if (globals->bx_glue_offset[reg])
5798 s = bfd_get_section_by_name
5799 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5801 BFD_ASSERT (s != NULL);
5803 /* Add symbol for veneer. */
5805 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5807 BFD_ASSERT (tmp_name);
5809 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5811 myh = elf_link_hash_lookup
5812 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5814 BFD_ASSERT (myh == NULL);
5817 val = globals->bx_glue_size;
5818 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5819 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5820 NULL, TRUE, FALSE, &bh);
5822 myh = (struct elf_link_hash_entry *) bh;
5823 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5824 myh->forced_local = 1;
5826 s->size += ARM_BX_VENEER_SIZE;
5827 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5828 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5832 /* Add an entry to the code/data map for section SEC. */
5835 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5837 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5838 unsigned int newidx;
5840 if (sec_data->map == NULL)
5842 sec_data->map = (elf32_arm_section_map *)
5843 bfd_malloc (sizeof (elf32_arm_section_map));
5844 sec_data->mapcount = 0;
5845 sec_data->mapsize = 1;
5848 newidx = sec_data->mapcount++;
5850 if (sec_data->mapcount > sec_data->mapsize)
5852 sec_data->mapsize *= 2;
5853 sec_data->map = (elf32_arm_section_map *)
5854 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5855 * sizeof (elf32_arm_section_map));
5860 sec_data->map[newidx].vma = vma;
5861 sec_data->map[newidx].type = type;
5866 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5867 veneers are handled for now. */
5870 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5871 elf32_vfp11_erratum_list *branch,
5873 asection *branch_sec,
5874 unsigned int offset)
5877 struct elf32_arm_link_hash_table *hash_table;
5879 struct elf_link_hash_entry *myh;
5880 struct bfd_link_hash_entry *bh;
5882 struct _arm_elf_section_data *sec_data;
5883 elf32_vfp11_erratum_list *newerr;
5885 hash_table = elf32_arm_hash_table (link_info);
5886 BFD_ASSERT (hash_table != NULL);
5887 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5889 s = bfd_get_section_by_name
5890 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5892 sec_data = elf32_arm_section_data (s);
5894 BFD_ASSERT (s != NULL);
5896 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5897 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5899 BFD_ASSERT (tmp_name);
5901 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5902 hash_table->num_vfp11_fixes);
5904 myh = elf_link_hash_lookup
5905 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5907 BFD_ASSERT (myh == NULL);
5910 val = hash_table->vfp11_erratum_glue_size;
5911 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5912 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5913 NULL, TRUE, FALSE, &bh);
5915 myh = (struct elf_link_hash_entry *) bh;
5916 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5917 myh->forced_local = 1;
5919 /* Link veneer back to calling location. */
5920 sec_data->erratumcount += 1;
5921 newerr = (elf32_vfp11_erratum_list *)
5922 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5924 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5926 newerr->u.v.branch = branch;
5927 newerr->u.v.id = hash_table->num_vfp11_fixes;
5928 branch->u.b.veneer = newerr;
5930 newerr->next = sec_data->erratumlist;
5931 sec_data->erratumlist = newerr;
5933 /* A symbol for the return from the veneer. */
5934 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5935 hash_table->num_vfp11_fixes);
5937 myh = elf_link_hash_lookup
5938 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5945 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5946 branch_sec, val, NULL, TRUE, FALSE, &bh);
5948 myh = (struct elf_link_hash_entry *) bh;
5949 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5950 myh->forced_local = 1;
5954 /* Generate a mapping symbol for the veneer section, and explicitly add an
5955 entry for that symbol to the code/data map for the section. */
5956 if (hash_table->vfp11_erratum_glue_size == 0)
5959 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5960 ever requires this erratum fix. */
5961 _bfd_generic_link_add_one_symbol (link_info,
5962 hash_table->bfd_of_glue_owner, "$a",
5963 BSF_LOCAL, s, 0, NULL,
5966 myh = (struct elf_link_hash_entry *) bh;
5967 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5968 myh->forced_local = 1;
5970 /* The elf32_arm_init_maps function only cares about symbols from input
5971 BFDs. We must make a note of this generated mapping symbol
5972 ourselves so that code byteswapping works properly in
5973 elf32_arm_write_section. */
5974 elf32_arm_section_map_add (s, 'a', 0);
5977 s->size += VFP11_ERRATUM_VENEER_SIZE;
5978 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5979 hash_table->num_vfp11_fixes++;
5981 /* The offset of the veneer. */
5985 #define ARM_GLUE_SECTION_FLAGS \
5986 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5987 | SEC_READONLY | SEC_LINKER_CREATED)
5989 /* Create a fake section for use by the ARM backend of the linker. */
5992 arm_make_glue_section (bfd * abfd, const char * name)
5996 sec = bfd_get_section_by_name (abfd, name);
6001 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6004 || !bfd_set_section_alignment (abfd, sec, 2))
6007 /* Set the gc mark to prevent the section from being removed by garbage
6008 collection, despite the fact that no relocs refer to this section. */
6014 /* Add the glue sections to ABFD. This function is called from the
6015 linker scripts in ld/emultempl/{armelf}.em. */
6018 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6019 struct bfd_link_info *info)
6021 /* If we are only performing a partial
6022 link do not bother adding the glue. */
6023 if (info->relocatable)
6026 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6027 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6028 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6029 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6032 /* Select a BFD to be used to hold the sections used by the glue code.
6033 This function is called from the linker scripts in ld/emultempl/
6037 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6039 struct elf32_arm_link_hash_table *globals;
6041 /* If we are only performing a partial link
6042 do not bother getting a bfd to hold the glue. */
6043 if (info->relocatable)
6046 /* Make sure we don't attach the glue sections to a dynamic object. */
6047 BFD_ASSERT (!(abfd->flags & DYNAMIC));
6049 globals = elf32_arm_hash_table (info);
6050 BFD_ASSERT (globals != NULL);
6052 if (globals->bfd_of_glue_owner != NULL)
6055 /* Save the bfd for later use. */
6056 globals->bfd_of_glue_owner = abfd;
6062 check_use_blx (struct elf32_arm_link_hash_table *globals)
6066 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6069 if (globals->fix_arm1176)
6071 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6072 globals->use_blx = 1;
6076 if (cpu_arch > TAG_CPU_ARCH_V4T)
6077 globals->use_blx = 1;
6082 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6083 struct bfd_link_info *link_info)
6085 Elf_Internal_Shdr *symtab_hdr;
6086 Elf_Internal_Rela *internal_relocs = NULL;
6087 Elf_Internal_Rela *irel, *irelend;
6088 bfd_byte *contents = NULL;
6091 struct elf32_arm_link_hash_table *globals;
6093 /* If we are only performing a partial link do not bother
6094 to construct any glue. */
6095 if (link_info->relocatable)
6098 /* Here we have a bfd that is to be included on the link. We have a
6099 hook to do reloc rummaging, before section sizes are nailed down. */
6100 globals = elf32_arm_hash_table (link_info);
6101 BFD_ASSERT (globals != NULL);
6103 check_use_blx (globals);
6105 if (globals->byteswap_code && !bfd_big_endian (abfd))
6107 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6112 /* PR 5398: If we have not decided to include any loadable sections in
6113 the output then we will not have a glue owner bfd. This is OK, it
6114 just means that there is nothing else for us to do here. */
6115 if (globals->bfd_of_glue_owner == NULL)
6118 /* Rummage around all the relocs and map the glue vectors. */
6119 sec = abfd->sections;
6124 for (; sec != NULL; sec = sec->next)
6126 if (sec->reloc_count == 0)
6129 if ((sec->flags & SEC_EXCLUDE) != 0)
6132 symtab_hdr = & elf_symtab_hdr (abfd);
6134 /* Load the relocs. */
6136 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6138 if (internal_relocs == NULL)
6141 irelend = internal_relocs + sec->reloc_count;
6142 for (irel = internal_relocs; irel < irelend; irel++)
6145 unsigned long r_index;
6147 struct elf_link_hash_entry *h;
6149 r_type = ELF32_R_TYPE (irel->r_info);
6150 r_index = ELF32_R_SYM (irel->r_info);
6152 /* These are the only relocation types we care about. */
6153 if ( r_type != R_ARM_PC24
6154 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6157 /* Get the section contents if we haven't done so already. */
6158 if (contents == NULL)
6160 /* Get cached copy if it exists. */
6161 if (elf_section_data (sec)->this_hdr.contents != NULL)
6162 contents = elf_section_data (sec)->this_hdr.contents;
6165 /* Go get them off disk. */
6166 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6171 if (r_type == R_ARM_V4BX)
6175 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6176 record_arm_bx_glue (link_info, reg);
6180 /* If the relocation is not against a symbol it cannot concern us. */
6183 /* We don't care about local symbols. */
6184 if (r_index < symtab_hdr->sh_info)
6187 /* This is an external symbol. */
6188 r_index -= symtab_hdr->sh_info;
6189 h = (struct elf_link_hash_entry *)
6190 elf_sym_hashes (abfd)[r_index];
6192 /* If the relocation is against a static symbol it must be within
6193 the current section and so cannot be a cross ARM/Thumb relocation. */
6197 /* If the call will go through a PLT entry then we do not need
6199 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6205 /* This one is a call from arm code. We need to look up
6206 the target of the call. If it is a thumb target, we
6208 if (h->target_internal == ST_BRANCH_TO_THUMB)
6209 record_arm_to_thumb_glue (link_info, h);
6217 if (contents != NULL
6218 && elf_section_data (sec)->this_hdr.contents != contents)
6222 if (internal_relocs != NULL
6223 && elf_section_data (sec)->relocs != internal_relocs)
6224 free (internal_relocs);
6225 internal_relocs = NULL;
6231 if (contents != NULL
6232 && elf_section_data (sec)->this_hdr.contents != contents)
6234 if (internal_relocs != NULL
6235 && elf_section_data (sec)->relocs != internal_relocs)
6236 free (internal_relocs);
6243 /* Initialise maps of ARM/Thumb/data for input BFDs. */
6246 bfd_elf32_arm_init_maps (bfd *abfd)
6248 Elf_Internal_Sym *isymbuf;
6249 Elf_Internal_Shdr *hdr;
6250 unsigned int i, localsyms;
6252 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
6253 if (! is_arm_elf (abfd))
6256 if ((abfd->flags & DYNAMIC) != 0)
6259 hdr = & elf_symtab_hdr (abfd);
6260 localsyms = hdr->sh_info;
6262 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6263 should contain the number of local symbols, which should come before any
6264 global symbols. Mapping symbols are always local. */
6265 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6268 /* No internal symbols read? Skip this BFD. */
6269 if (isymbuf == NULL)
6272 for (i = 0; i < localsyms; i++)
6274 Elf_Internal_Sym *isym = &isymbuf[i];
6275 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6279 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6281 name = bfd_elf_string_from_elf_section (abfd,
6282 hdr->sh_link, isym->st_name);
6284 if (bfd_is_arm_special_symbol_name (name,
6285 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6286 elf32_arm_section_map_add (sec, name[1], isym->st_value);
6292 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6293 say what they wanted. */
6296 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6298 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6299 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6301 if (globals == NULL)
6304 if (globals->fix_cortex_a8 == -1)
6306 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
6307 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6308 && (out_attr[Tag_CPU_arch_profile].i == 'A'
6309 || out_attr[Tag_CPU_arch_profile].i == 0))
6310 globals->fix_cortex_a8 = 1;
6312 globals->fix_cortex_a8 = 0;
6318 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6320 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6321 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6323 if (globals == NULL)
6325 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
6326 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6328 switch (globals->vfp11_fix)
6330 case BFD_ARM_VFP11_FIX_DEFAULT:
6331 case BFD_ARM_VFP11_FIX_NONE:
6332 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6336 /* Give a warning, but do as the user requests anyway. */
6337 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6338 "workaround is not necessary for target architecture"), obfd);
6341 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
6342 /* For earlier architectures, we might need the workaround, but do not
6343 enable it by default. If users is running with broken hardware, they
6344 must enable the erratum fix explicitly. */
6345 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6349 enum bfd_arm_vfp11_pipe
6357 /* Return a VFP register number. This is encoded as RX:X for single-precision
6358 registers, or X:RX for double-precision registers, where RX is the group of
6359 four bits in the instruction encoding and X is the single extension bit.
6360 RX and X fields are specified using their lowest (starting) bit. The return
6363 0...31: single-precision registers s0...s31
6364 32...63: double-precision registers d0...d31.
6366 Although X should be zero for VFP11 (encoding d0...d15 only), we might
6367 encounter VFP3 instructions, so we allow the full range for DP registers. */
6370 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
6374 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
6376 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
6379 /* Set bits in *WMASK according to a register number REG as encoded by
6380 bfd_arm_vfp11_regno(). Ignore d16-d31. */
6383 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
6388 *wmask |= 3 << ((reg - 32) * 2);
6391 /* Return TRUE if WMASK overwrites anything in REGS. */
6394 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
6398 for (i = 0; i < numregs; i++)
6400 unsigned int reg = regs[i];
6402 if (reg < 32 && (wmask & (1 << reg)) != 0)
6410 if ((wmask & (3 << (reg * 2))) != 0)
6417 /* In this function, we're interested in two things: finding input registers
6418 for VFP data-processing instructions, and finding the set of registers which
6419 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
6420 hold the written set, so FLDM etc. are easy to deal with (we're only
6421 interested in 32 SP registers or 16 dp registers, due to the VFP version
6422 implemented by the chip in question). DP registers are marked by setting
6423 both SP registers in the write mask). */
6425 static enum bfd_arm_vfp11_pipe
6426 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
6429 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
6430 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
6432 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
6435 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6436 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6438 pqrs = ((insn & 0x00800000) >> 20)
6439 | ((insn & 0x00300000) >> 19)
6440 | ((insn & 0x00000040) >> 6);
6444 case 0: /* fmac[sd]. */
6445 case 1: /* fnmac[sd]. */
6446 case 2: /* fmsc[sd]. */
6447 case 3: /* fnmsc[sd]. */
6449 bfd_arm_vfp11_write_mask (destmask, fd);
6451 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6456 case 4: /* fmul[sd]. */
6457 case 5: /* fnmul[sd]. */
6458 case 6: /* fadd[sd]. */
6459 case 7: /* fsub[sd]. */
6463 case 8: /* fdiv[sd]. */
6466 bfd_arm_vfp11_write_mask (destmask, fd);
6467 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
6472 case 15: /* extended opcode. */
6474 unsigned int extn = ((insn >> 15) & 0x1e)
6475 | ((insn >> 7) & 1);
6479 case 0: /* fcpy[sd]. */
6480 case 1: /* fabs[sd]. */
6481 case 2: /* fneg[sd]. */
6482 case 8: /* fcmp[sd]. */
6483 case 9: /* fcmpe[sd]. */
6484 case 10: /* fcmpz[sd]. */
6485 case 11: /* fcmpez[sd]. */
6486 case 16: /* fuito[sd]. */
6487 case 17: /* fsito[sd]. */
6488 case 24: /* ftoui[sd]. */
6489 case 25: /* ftouiz[sd]. */
6490 case 26: /* ftosi[sd]. */
6491 case 27: /* ftosiz[sd]. */
6492 /* These instructions will not bounce due to underflow. */
6497 case 3: /* fsqrt[sd]. */
6498 /* fsqrt cannot underflow, but it can (perhaps) overwrite
6499 registers to cause the erratum in previous instructions. */
6500 bfd_arm_vfp11_write_mask (destmask, fd);
6504 case 15: /* fcvt{ds,sd}. */
6508 bfd_arm_vfp11_write_mask (destmask, fd);
6510 /* Only FCVTSD can underflow. */
6511 if ((insn & 0x100) != 0)
6530 /* Two-register transfer. */
6531 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
6533 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
6535 if ((insn & 0x100000) == 0)
6538 bfd_arm_vfp11_write_mask (destmask, fm);
6541 bfd_arm_vfp11_write_mask (destmask, fm);
6542 bfd_arm_vfp11_write_mask (destmask, fm + 1);
6548 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
6550 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
6551 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
6555 case 0: /* Two-reg transfer. We should catch these above. */
6558 case 2: /* fldm[sdx]. */
6562 unsigned int i, offset = insn & 0xff;
6567 for (i = fd; i < fd + offset; i++)
6568 bfd_arm_vfp11_write_mask (destmask, i);
6572 case 4: /* fld[sd]. */
6574 bfd_arm_vfp11_write_mask (destmask, fd);
6583 /* Single-register transfer. Note L==0. */
6584 else if ((insn & 0x0f100e10) == 0x0e000a10)
6586 unsigned int opcode = (insn >> 21) & 7;
6587 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
6591 case 0: /* fmsr/fmdlr. */
6592 case 1: /* fmdhr. */
6593 /* Mark fmdhr and fmdlr as writing to the whole of the DP
6594 destination register. I don't know if this is exactly right,
6595 but it is the conservative choice. */
6596 bfd_arm_vfp11_write_mask (destmask, fn);
6610 static int elf32_arm_compare_mapping (const void * a, const void * b);
6613 /* Look for potentially-troublesome code sequences which might trigger the
6614 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
6615 (available from ARM) for details of the erratum. A short version is
6616 described in ld.texinfo. */
6619 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
6622 bfd_byte *contents = NULL;
6624 int regs[3], numregs = 0;
6625 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6626 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
6628 if (globals == NULL)
6631 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
6632 The states transition as follows:
6634 0 -> 1 (vector) or 0 -> 2 (scalar)
6635 A VFP FMAC-pipeline instruction has been seen. Fill
6636 regs[0]..regs[numregs-1] with its input operands. Remember this
6637 instruction in 'first_fmac'.
6640 Any instruction, except for a VFP instruction which overwrites
6645 A VFP instruction has been seen which overwrites any of regs[*].
6646 We must make a veneer! Reset state to 0 before examining next
6650 If we fail to match anything in state 2, reset to state 0 and reset
6651 the instruction pointer to the instruction after 'first_fmac'.
6653 If the VFP11 vector mode is in use, there must be at least two unrelated
6654 instructions between anti-dependent VFP11 instructions to properly avoid
6655 triggering the erratum, hence the use of the extra state 1. */
6657 /* If we are only performing a partial link do not bother
6658 to construct any glue. */
6659 if (link_info->relocatable)
6662 /* Skip if this bfd does not correspond to an ELF image. */
6663 if (! is_arm_elf (abfd))
6666 /* We should have chosen a fix type by the time we get here. */
6667 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
6669 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
6672 /* Skip this BFD if it corresponds to an executable or dynamic object. */
6673 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
6676 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6678 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
6679 struct _arm_elf_section_data *sec_data;
6681 /* If we don't have executable progbits, we're not interested in this
6682 section. Also skip if section is to be excluded. */
6683 if (elf_section_type (sec) != SHT_PROGBITS
6684 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
6685 || (sec->flags & SEC_EXCLUDE) != 0
6686 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
6687 || sec->output_section == bfd_abs_section_ptr
6688 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
6691 sec_data = elf32_arm_section_data (sec);
6693 if (sec_data->mapcount == 0)
6696 if (elf_section_data (sec)->this_hdr.contents != NULL)
6697 contents = elf_section_data (sec)->this_hdr.contents;
6698 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6701 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6702 elf32_arm_compare_mapping);
6704 for (span = 0; span < sec_data->mapcount; span++)
6706 unsigned int span_start = sec_data->map[span].vma;
6707 unsigned int span_end = (span == sec_data->mapcount - 1)
6708 ? sec->size : sec_data->map[span + 1].vma;
6709 char span_type = sec_data->map[span].type;
6711 /* FIXME: Only ARM mode is supported at present. We may need to
6712 support Thumb-2 mode also at some point. */
6713 if (span_type != 'a')
6716 for (i = span_start; i < span_end;)
6718 unsigned int next_i = i + 4;
6719 unsigned int insn = bfd_big_endian (abfd)
6720 ? (contents[i] << 24)
6721 | (contents[i + 1] << 16)
6722 | (contents[i + 2] << 8)
6724 : (contents[i + 3] << 24)
6725 | (contents[i + 2] << 16)
6726 | (contents[i + 1] << 8)
6728 unsigned int writemask = 0;
6729 enum bfd_arm_vfp11_pipe vpipe;
6734 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6736 /* I'm assuming the VFP11 erratum can trigger with denorm
6737 operands on either the FMAC or the DS pipeline. This might
6738 lead to slightly overenthusiastic veneer insertion. */
6739 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6741 state = use_vector ? 1 : 2;
6743 veneer_of_insn = insn;
6749 int other_regs[3], other_numregs;
6750 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6753 if (vpipe != VFP11_BAD
6754 && bfd_arm_vfp11_antidependency (writemask, regs,
6764 int other_regs[3], other_numregs;
6765 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6768 if (vpipe != VFP11_BAD
6769 && bfd_arm_vfp11_antidependency (writemask, regs,
6775 next_i = first_fmac + 4;
6781 abort (); /* Should be unreachable. */
6786 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6787 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6789 elf32_arm_section_data (sec)->erratumcount += 1;
6791 newerr->u.b.vfp_insn = veneer_of_insn;
6796 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6803 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6808 newerr->next = sec_data->erratumlist;
6809 sec_data->erratumlist = newerr;
6818 if (contents != NULL
6819 && elf_section_data (sec)->this_hdr.contents != contents)
6827 if (contents != NULL
6828 && elf_section_data (sec)->this_hdr.contents != contents)
6834 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6835 after sections have been laid out, using specially-named symbols. */
6838 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6839 struct bfd_link_info *link_info)
6842 struct elf32_arm_link_hash_table *globals;
6845 if (link_info->relocatable)
6848 /* Skip if this bfd does not correspond to an ELF image. */
6849 if (! is_arm_elf (abfd))
6852 globals = elf32_arm_hash_table (link_info);
6853 if (globals == NULL)
6856 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6857 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6859 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6861 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6862 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6864 for (; errnode != NULL; errnode = errnode->next)
6866 struct elf_link_hash_entry *myh;
6869 switch (errnode->type)
6871 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6872 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6873 /* Find veneer symbol. */
6874 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6875 errnode->u.b.veneer->u.v.id);
6877 myh = elf_link_hash_lookup
6878 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6881 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6882 "`%s'"), abfd, tmp_name);
6884 vma = myh->root.u.def.section->output_section->vma
6885 + myh->root.u.def.section->output_offset
6886 + myh->root.u.def.value;
6888 errnode->u.b.veneer->vma = vma;
6891 case VFP11_ERRATUM_ARM_VENEER:
6892 case VFP11_ERRATUM_THUMB_VENEER:
6893 /* Find return location. */
6894 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6897 myh = elf_link_hash_lookup
6898 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6901 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6902 "`%s'"), abfd, tmp_name);
6904 vma = myh->root.u.def.section->output_section->vma
6905 + myh->root.u.def.section->output_offset
6906 + myh->root.u.def.value;
6908 errnode->u.v.branch->vma = vma;
6921 /* Set target relocation values needed during linking. */
6924 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6925 struct bfd_link_info *link_info,
6927 char * target2_type,
6930 bfd_arm_vfp11_fix vfp11_fix,
6931 int no_enum_warn, int no_wchar_warn,
6932 int pic_veneer, int fix_cortex_a8,
6935 struct elf32_arm_link_hash_table *globals;
6937 globals = elf32_arm_hash_table (link_info);
6938 if (globals == NULL)
6941 globals->target1_is_rel = target1_is_rel;
6942 if (strcmp (target2_type, "rel") == 0)
6943 globals->target2_reloc = R_ARM_REL32;
6944 else if (strcmp (target2_type, "abs") == 0)
6945 globals->target2_reloc = R_ARM_ABS32;
6946 else if (strcmp (target2_type, "got-rel") == 0)
6947 globals->target2_reloc = R_ARM_GOT_PREL;
6950 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6953 globals->fix_v4bx = fix_v4bx;
6954 globals->use_blx |= use_blx;
6955 globals->vfp11_fix = vfp11_fix;
6956 globals->pic_veneer = pic_veneer;
6957 globals->fix_cortex_a8 = fix_cortex_a8;
6958 globals->fix_arm1176 = fix_arm1176;
6960 BFD_ASSERT (is_arm_elf (output_bfd));
6961 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6962 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6965 /* Replace the target offset of a Thumb bl or b.w instruction. */
6968 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6974 BFD_ASSERT ((offset & 1) == 0);
6976 upper = bfd_get_16 (abfd, insn);
6977 lower = bfd_get_16 (abfd, insn + 2);
6978 reloc_sign = (offset < 0) ? 1 : 0;
6979 upper = (upper & ~(bfd_vma) 0x7ff)
6980 | ((offset >> 12) & 0x3ff)
6981 | (reloc_sign << 10);
6982 lower = (lower & ~(bfd_vma) 0x2fff)
6983 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6984 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6985 | ((offset >> 1) & 0x7ff);
6986 bfd_put_16 (abfd, upper, insn);
6987 bfd_put_16 (abfd, lower, insn + 2);
6990 /* Thumb code calling an ARM function. */
6993 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6997 asection * input_section,
6998 bfd_byte * hit_data,
7001 bfd_signed_vma addend,
7003 char **error_message)
7007 long int ret_offset;
7008 struct elf_link_hash_entry * myh;
7009 struct elf32_arm_link_hash_table * globals;
7011 myh = find_thumb_glue (info, name, error_message);
7015 globals = elf32_arm_hash_table (info);
7016 BFD_ASSERT (globals != NULL);
7017 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7019 my_offset = myh->root.u.def.value;
7021 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
7022 THUMB2ARM_GLUE_SECTION_NAME);
7024 BFD_ASSERT (s != NULL);
7025 BFD_ASSERT (s->contents != NULL);
7026 BFD_ASSERT (s->output_section != NULL);
7028 if ((my_offset & 0x01) == 0x01)
7031 && sym_sec->owner != NULL
7032 && !INTERWORK_FLAG (sym_sec->owner))
7034 (*_bfd_error_handler)
7035 (_("%B(%s): warning: interworking not enabled.\n"
7036 " first occurrence: %B: Thumb call to ARM"),
7037 sym_sec->owner, input_bfd, name);
7043 myh->root.u.def.value = my_offset;
7045 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
7046 s->contents + my_offset);
7048 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
7049 s->contents + my_offset + 2);
7052 /* Address of destination of the stub. */
7053 ((bfd_signed_vma) val)
7055 /* Offset from the start of the current section
7056 to the start of the stubs. */
7058 /* Offset of the start of this stub from the start of the stubs. */
7060 /* Address of the start of the current section. */
7061 + s->output_section->vma)
7062 /* The branch instruction is 4 bytes into the stub. */
7064 /* ARM branches work from the pc of the instruction + 8. */
7067 put_arm_insn (globals, output_bfd,
7068 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
7069 s->contents + my_offset + 4);
7072 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
7074 /* Now go back and fix up the original BL insn to point to here. */
7076 /* Address of where the stub is located. */
7077 (s->output_section->vma + s->output_offset + my_offset)
7078 /* Address of where the BL is located. */
7079 - (input_section->output_section->vma + input_section->output_offset
7081 /* Addend in the relocation. */
7083 /* Biassing for PC-relative addressing. */
7086 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
7091 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
7093 static struct elf_link_hash_entry *
7094 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
7101 char ** error_message)
7104 long int ret_offset;
7105 struct elf_link_hash_entry * myh;
7106 struct elf32_arm_link_hash_table * globals;
7108 myh = find_arm_glue (info, name, error_message);
7112 globals = elf32_arm_hash_table (info);
7113 BFD_ASSERT (globals != NULL);
7114 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7116 my_offset = myh->root.u.def.value;
7118 if ((my_offset & 0x01) == 0x01)
7121 && sym_sec->owner != NULL
7122 && !INTERWORK_FLAG (sym_sec->owner))
7124 (*_bfd_error_handler)
7125 (_("%B(%s): warning: interworking not enabled.\n"
7126 " first occurrence: %B: arm call to thumb"),
7127 sym_sec->owner, input_bfd, name);
7131 myh->root.u.def.value = my_offset;
7133 if (info->shared || globals->root.is_relocatable_executable
7134 || globals->pic_veneer)
7136 /* For relocatable objects we can't use absolute addresses,
7137 so construct the address from a relative offset. */
7138 /* TODO: If the offset is small it's probably worth
7139 constructing the address with adds. */
7140 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
7141 s->contents + my_offset);
7142 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
7143 s->contents + my_offset + 4);
7144 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
7145 s->contents + my_offset + 8);
7146 /* Adjust the offset by 4 for the position of the add,
7147 and 8 for the pipeline offset. */
7148 ret_offset = (val - (s->output_offset
7149 + s->output_section->vma
7152 bfd_put_32 (output_bfd, ret_offset,
7153 s->contents + my_offset + 12);
7155 else if (globals->use_blx)
7157 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
7158 s->contents + my_offset);
7160 /* It's a thumb address. Add the low order bit. */
7161 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
7162 s->contents + my_offset + 4);
7166 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
7167 s->contents + my_offset);
7169 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
7170 s->contents + my_offset + 4);
7172 /* It's a thumb address. Add the low order bit. */
7173 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
7174 s->contents + my_offset + 8);
7180 BFD_ASSERT (my_offset <= globals->arm_glue_size);
7185 /* Arm code calling a Thumb function. */
7188 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
7192 asection * input_section,
7193 bfd_byte * hit_data,
7196 bfd_signed_vma addend,
7198 char **error_message)
7200 unsigned long int tmp;
7203 long int ret_offset;
7204 struct elf_link_hash_entry * myh;
7205 struct elf32_arm_link_hash_table * globals;
7207 globals = elf32_arm_hash_table (info);
7208 BFD_ASSERT (globals != NULL);
7209 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7211 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
7212 ARM2THUMB_GLUE_SECTION_NAME);
7213 BFD_ASSERT (s != NULL);
7214 BFD_ASSERT (s->contents != NULL);
7215 BFD_ASSERT (s->output_section != NULL);
7217 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
7218 sym_sec, val, s, error_message);
7222 my_offset = myh->root.u.def.value;
7223 tmp = bfd_get_32 (input_bfd, hit_data);
7224 tmp = tmp & 0xFF000000;
7226 /* Somehow these are both 4 too far, so subtract 8. */
7227 ret_offset = (s->output_offset
7229 + s->output_section->vma
7230 - (input_section->output_offset
7231 + input_section->output_section->vma
7235 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
7237 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
7242 /* Populate Arm stub for an exported Thumb function. */
7245 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
7247 struct bfd_link_info * info = (struct bfd_link_info *) inf;
7249 struct elf_link_hash_entry * myh;
7250 struct elf32_arm_link_hash_entry *eh;
7251 struct elf32_arm_link_hash_table * globals;
7254 char *error_message;
7256 eh = elf32_arm_hash_entry (h);
7257 /* Allocate stubs for exported Thumb functions on v4t. */
7258 if (eh->export_glue == NULL)
7261 globals = elf32_arm_hash_table (info);
7262 BFD_ASSERT (globals != NULL);
7263 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7265 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
7266 ARM2THUMB_GLUE_SECTION_NAME);
7267 BFD_ASSERT (s != NULL);
7268 BFD_ASSERT (s->contents != NULL);
7269 BFD_ASSERT (s->output_section != NULL);
7271 sec = eh->export_glue->root.u.def.section;
7273 BFD_ASSERT (sec->output_section != NULL);
7275 val = eh->export_glue->root.u.def.value + sec->output_offset
7276 + sec->output_section->vma;
7278 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
7279 h->root.u.def.section->owner,
7280 globals->obfd, sec, val, s,
7286 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
7289 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
7294 struct elf32_arm_link_hash_table *globals;
7296 globals = elf32_arm_hash_table (info);
7297 BFD_ASSERT (globals != NULL);
7298 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7300 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
7301 ARM_BX_GLUE_SECTION_NAME);
7302 BFD_ASSERT (s != NULL);
7303 BFD_ASSERT (s->contents != NULL);
7304 BFD_ASSERT (s->output_section != NULL);
7306 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
7308 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
7310 if ((globals->bx_glue_offset[reg] & 1) == 0)
7312 p = s->contents + glue_addr;
7313 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
7314 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
7315 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
7316 globals->bx_glue_offset[reg] |= 1;
7319 return glue_addr + s->output_section->vma + s->output_offset;
7322 /* Generate Arm stubs for exported Thumb symbols. */
7324 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
7325 struct bfd_link_info *link_info)
7327 struct elf32_arm_link_hash_table * globals;
7329 if (link_info == NULL)
7330 /* Ignore this if we are not called by the ELF backend linker. */
7333 globals = elf32_arm_hash_table (link_info);
7334 if (globals == NULL)
7337 /* If blx is available then exported Thumb symbols are OK and there is
7339 if (globals->use_blx)
7342 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
7346 /* Reserve space for COUNT dynamic relocations in relocation selection
7350 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
7351 bfd_size_type count)
7353 struct elf32_arm_link_hash_table *htab;
7355 htab = elf32_arm_hash_table (info);
7356 BFD_ASSERT (htab->root.dynamic_sections_created);
7359 sreloc->size += RELOC_SIZE (htab) * count;
7362 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
7363 dynamic, the relocations should go in SRELOC, otherwise they should
7364 go in the special .rel.iplt section. */
7367 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
7368 bfd_size_type count)
7370 struct elf32_arm_link_hash_table *htab;
7372 htab = elf32_arm_hash_table (info);
7373 if (!htab->root.dynamic_sections_created)
7374 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
7377 BFD_ASSERT (sreloc != NULL);
7378 sreloc->size += RELOC_SIZE (htab) * count;
7382 /* Add relocation REL to the end of relocation section SRELOC. */
7385 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
7386 asection *sreloc, Elf_Internal_Rela *rel)
7389 struct elf32_arm_link_hash_table *htab;
7391 htab = elf32_arm_hash_table (info);
7392 if (!htab->root.dynamic_sections_created
7393 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
7394 sreloc = htab->root.irelplt;
7397 loc = sreloc->contents;
7398 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
7399 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
7401 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
7404 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
7405 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
7409 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
7410 bfd_boolean is_iplt_entry,
7411 union gotplt_union *root_plt,
7412 struct arm_plt_info *arm_plt)
7414 struct elf32_arm_link_hash_table *htab;
7418 htab = elf32_arm_hash_table (info);
7422 splt = htab->root.iplt;
7423 sgotplt = htab->root.igotplt;
7425 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
7426 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
7430 splt = htab->root.splt;
7431 sgotplt = htab->root.sgotplt;
7433 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
7434 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
7436 /* If this is the first .plt entry, make room for the special
7438 if (splt->size == 0)
7439 splt->size += htab->plt_header_size;
7442 /* Allocate the PLT entry itself, including any leading Thumb stub. */
7443 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7444 splt->size += PLT_THUMB_STUB_SIZE;
7445 root_plt->offset = splt->size;
7446 splt->size += htab->plt_entry_size;
7448 if (!htab->symbian_p)
7450 /* We also need to make an entry in the .got.plt section, which
7451 will be placed in the .got section by the linker script. */
7452 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
7458 arm_movw_immediate (bfd_vma value)
7460 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
7464 arm_movt_immediate (bfd_vma value)
7466 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
7469 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
7470 the entry lives in .iplt and resolves to (*SYM_VALUE)().
7471 Otherwise, DYNINDX is the index of the symbol in the dynamic
7472 symbol table and SYM_VALUE is undefined.
7474 ROOT_PLT points to the offset of the PLT entry from the start of its
7475 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
7476 bookkeeping information. */
7479 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
7480 union gotplt_union *root_plt,
7481 struct arm_plt_info *arm_plt,
7482 int dynindx, bfd_vma sym_value)
7484 struct elf32_arm_link_hash_table *htab;
7490 Elf_Internal_Rela rel;
7491 bfd_vma plt_header_size;
7492 bfd_vma got_header_size;
7494 htab = elf32_arm_hash_table (info);
7496 /* Pick the appropriate sections and sizes. */
7499 splt = htab->root.iplt;
7500 sgot = htab->root.igotplt;
7501 srel = htab->root.irelplt;
7503 /* There are no reserved entries in .igot.plt, and no special
7504 first entry in .iplt. */
7505 got_header_size = 0;
7506 plt_header_size = 0;
7510 splt = htab->root.splt;
7511 sgot = htab->root.sgotplt;
7512 srel = htab->root.srelplt;
7514 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
7515 plt_header_size = htab->plt_header_size;
7517 BFD_ASSERT (splt != NULL && srel != NULL);
7519 /* Fill in the entry in the procedure linkage table. */
7520 if (htab->symbian_p)
7522 BFD_ASSERT (dynindx >= 0);
7523 put_arm_insn (htab, output_bfd,
7524 elf32_arm_symbian_plt_entry[0],
7525 splt->contents + root_plt->offset);
7526 bfd_put_32 (output_bfd,
7527 elf32_arm_symbian_plt_entry[1],
7528 splt->contents + root_plt->offset + 4);
7530 /* Fill in the entry in the .rel.plt section. */
7531 rel.r_offset = (splt->output_section->vma
7532 + splt->output_offset
7533 + root_plt->offset + 4);
7534 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
7536 /* Get the index in the procedure linkage table which
7537 corresponds to this symbol. This is the index of this symbol
7538 in all the symbols for which we are making plt entries. The
7539 first entry in the procedure linkage table is reserved. */
7540 plt_index = ((root_plt->offset - plt_header_size)
7541 / htab->plt_entry_size);
7545 bfd_vma got_offset, got_address, plt_address;
7546 bfd_vma got_displacement, initial_got_entry;
7549 BFD_ASSERT (sgot != NULL);
7551 /* Get the offset into the .(i)got.plt table of the entry that
7552 corresponds to this function. */
7553 got_offset = (arm_plt->got_offset & -2);
7555 /* Get the index in the procedure linkage table which
7556 corresponds to this symbol. This is the index of this symbol
7557 in all the symbols for which we are making plt entries.
7558 After the reserved .got.plt entries, all symbols appear in
7559 the same order as in .plt. */
7560 plt_index = (got_offset - got_header_size) / 4;
7562 /* Calculate the address of the GOT entry. */
7563 got_address = (sgot->output_section->vma
7564 + sgot->output_offset
7567 /* ...and the address of the PLT entry. */
7568 plt_address = (splt->output_section->vma
7569 + splt->output_offset
7570 + root_plt->offset);
7572 ptr = splt->contents + root_plt->offset;
7573 if (htab->vxworks_p && info->shared)
7578 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7580 val = elf32_arm_vxworks_shared_plt_entry[i];
7582 val |= got_address - sgot->output_section->vma;
7584 val |= plt_index * RELOC_SIZE (htab);
7585 if (i == 2 || i == 5)
7586 bfd_put_32 (output_bfd, val, ptr);
7588 put_arm_insn (htab, output_bfd, val, ptr);
7591 else if (htab->vxworks_p)
7596 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
7598 val = elf32_arm_vxworks_exec_plt_entry[i];
7602 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
7604 val |= plt_index * RELOC_SIZE (htab);
7605 if (i == 2 || i == 5)
7606 bfd_put_32 (output_bfd, val, ptr);
7608 put_arm_insn (htab, output_bfd, val, ptr);
7611 loc = (htab->srelplt2->contents
7612 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
7614 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
7615 referencing the GOT for this PLT entry. */
7616 rel.r_offset = plt_address + 8;
7617 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
7618 rel.r_addend = got_offset;
7619 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7620 loc += RELOC_SIZE (htab);
7622 /* Create the R_ARM_ABS32 relocation referencing the
7623 beginning of the PLT for this GOT entry. */
7624 rel.r_offset = got_address;
7625 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
7627 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7629 else if (htab->nacl_p)
7631 /* Calculate the displacement between the PLT slot and the
7632 common tail that's part of the special initial PLT slot. */
7633 int32_t tail_displacement
7634 = ((splt->output_section->vma + splt->output_offset
7635 + ARM_NACL_PLT_TAIL_OFFSET)
7636 - (plt_address + htab->plt_entry_size + 4));
7637 BFD_ASSERT ((tail_displacement & 3) == 0);
7638 tail_displacement >>= 2;
7640 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
7641 || (-tail_displacement & 0xff000000) == 0);
7643 /* Calculate the displacement between the PLT slot and the entry
7644 in the GOT. The offset accounts for the value produced by
7645 adding to pc in the penultimate instruction of the PLT stub. */
7646 got_displacement = (got_address
7647 - (plt_address + htab->plt_entry_size));
7649 /* NaCl does not support interworking at all. */
7650 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
7652 put_arm_insn (htab, output_bfd,
7653 elf32_arm_nacl_plt_entry[0]
7654 | arm_movw_immediate (got_displacement),
7656 put_arm_insn (htab, output_bfd,
7657 elf32_arm_nacl_plt_entry[1]
7658 | arm_movt_immediate (got_displacement),
7660 put_arm_insn (htab, output_bfd,
7661 elf32_arm_nacl_plt_entry[2],
7663 put_arm_insn (htab, output_bfd,
7664 elf32_arm_nacl_plt_entry[3]
7665 | (tail_displacement & 0x00ffffff),
7670 /* Calculate the displacement between the PLT slot and the
7671 entry in the GOT. The eight-byte offset accounts for the
7672 value produced by adding to pc in the first instruction
7674 got_displacement = got_address - (plt_address + 8);
7676 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
7678 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
7680 put_thumb_insn (htab, output_bfd,
7681 elf32_arm_plt_thumb_stub[0], ptr - 4);
7682 put_thumb_insn (htab, output_bfd,
7683 elf32_arm_plt_thumb_stub[1], ptr - 2);
7686 put_arm_insn (htab, output_bfd,
7687 elf32_arm_plt_entry[0]
7688 | ((got_displacement & 0x0ff00000) >> 20),
7690 put_arm_insn (htab, output_bfd,
7691 elf32_arm_plt_entry[1]
7692 | ((got_displacement & 0x000ff000) >> 12),
7694 put_arm_insn (htab, output_bfd,
7695 elf32_arm_plt_entry[2]
7696 | (got_displacement & 0x00000fff),
7698 #ifdef FOUR_WORD_PLT
7699 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
7703 /* Fill in the entry in the .rel(a).(i)plt section. */
7704 rel.r_offset = got_address;
7708 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
7709 The dynamic linker or static executable then calls SYM_VALUE
7710 to determine the correct run-time value of the .igot.plt entry. */
7711 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
7712 initial_got_entry = sym_value;
7716 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
7717 initial_got_entry = (splt->output_section->vma
7718 + splt->output_offset);
7721 /* Fill in the entry in the global offset table. */
7722 bfd_put_32 (output_bfd, initial_got_entry,
7723 sgot->contents + got_offset);
7726 loc = srel->contents + plt_index * RELOC_SIZE (htab);
7727 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
7730 /* Some relocations map to different relocations depending on the
7731 target. Return the real relocation. */
7734 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
7740 if (globals->target1_is_rel)
7746 return globals->target2_reloc;
7753 /* Return the base VMA address which should be subtracted from real addresses
7754 when resolving @dtpoff relocation.
7755 This is PT_TLS segment p_vaddr. */
7758 dtpoff_base (struct bfd_link_info *info)
7760 /* If tls_sec is NULL, we should have signalled an error already. */
7761 if (elf_hash_table (info)->tls_sec == NULL)
7763 return elf_hash_table (info)->tls_sec->vma;
7766 /* Return the relocation value for @tpoff relocation
7767 if STT_TLS virtual address is ADDRESS. */
7770 tpoff (struct bfd_link_info *info, bfd_vma address)
7772 struct elf_link_hash_table *htab = elf_hash_table (info);
7775 /* If tls_sec is NULL, we should have signalled an error already. */
7776 if (htab->tls_sec == NULL)
7778 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
7779 return address - htab->tls_sec->vma + base;
7782 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
7783 VALUE is the relocation value. */
7785 static bfd_reloc_status_type
7786 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
7789 return bfd_reloc_overflow;
7791 value |= bfd_get_32 (abfd, data) & 0xfffff000;
7792 bfd_put_32 (abfd, value, data);
7793 return bfd_reloc_ok;
7796 /* Handle TLS relaxations. Relaxing is possible for symbols that use
7797 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
7798 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
7800 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
7801 is to then call final_link_relocate. Return other values in the
7804 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
7805 the pre-relaxed code. It would be nice if the relocs were updated
7806 to match the optimization. */
7808 static bfd_reloc_status_type
7809 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
7810 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
7811 Elf_Internal_Rela *rel, unsigned long is_local)
7815 switch (ELF32_R_TYPE (rel->r_info))
7818 return bfd_reloc_notsupported;
7820 case R_ARM_TLS_GOTDESC:
7825 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
7827 insn -= 5; /* THUMB */
7829 insn -= 8; /* ARM */
7831 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
7832 return bfd_reloc_continue;
7834 case R_ARM_THM_TLS_DESCSEQ:
7836 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
7837 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
7841 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7843 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
7847 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7850 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
7852 else if ((insn & 0xff87) == 0x4780) /* blx rx */
7856 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
7859 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
7860 contents + rel->r_offset);
7864 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
7865 /* It's a 32 bit instruction, fetch the rest of it for
7866 error generation. */
7868 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
7869 (*_bfd_error_handler)
7870 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
7871 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
7872 return bfd_reloc_notsupported;
7876 case R_ARM_TLS_DESCSEQ:
7878 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
7879 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
7883 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
7884 contents + rel->r_offset);
7886 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
7890 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
7893 bfd_put_32 (input_bfd, insn & 0xfffff000,
7894 contents + rel->r_offset);
7896 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
7900 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
7903 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
7904 contents + rel->r_offset);
7908 (*_bfd_error_handler)
7909 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
7910 input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
7911 return bfd_reloc_notsupported;
7915 case R_ARM_TLS_CALL:
7916 /* GD->IE relaxation, turn the instruction into 'nop' or
7917 'ldr r0, [pc,r0]' */
7918 insn = is_local ? 0xe1a00000 : 0xe79f0000;
7919 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
7922 case R_ARM_THM_TLS_CALL:
7923 /* GD->IE relaxation */
7925 /* add r0,pc; ldr r0, [r0] */
7927 else if (arch_has_thumb2_nop (globals))
7934 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
7935 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
7938 return bfd_reloc_ok;
7941 /* For a given value of n, calculate the value of G_n as required to
7942 deal with group relocations. We return it in the form of an
7943 encoded constant-and-rotation, together with the final residual. If n is
7944 specified as less than zero, then final_residual is filled with the
7945 input value and no further action is performed. */
7948 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
7952 bfd_vma encoded_g_n = 0;
7953 bfd_vma residual = value; /* Also known as Y_n. */
7955 for (current_n = 0; current_n <= n; current_n++)
7959 /* Calculate which part of the value to mask. */
7966 /* Determine the most significant bit in the residual and
7967 align the resulting value to a 2-bit boundary. */
7968 for (msb = 30; msb >= 0; msb -= 2)
7969 if (residual & (3 << msb))
7972 /* The desired shift is now (msb - 6), or zero, whichever
7979 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
7980 g_n = residual & (0xff << shift);
7981 encoded_g_n = (g_n >> shift)
7982 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
7984 /* Calculate the residual for the next time around. */
7988 *final_residual = residual;
7993 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
7994 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
7997 identify_add_or_sub (bfd_vma insn)
7999 int opcode = insn & 0x1e00000;
8001 if (opcode == 1 << 23) /* ADD */
8004 if (opcode == 1 << 22) /* SUB */
8010 /* Perform a relocation as part of a final link. */
8012 static bfd_reloc_status_type
8013 elf32_arm_final_link_relocate (reloc_howto_type * howto,
8016 asection * input_section,
8017 bfd_byte * contents,
8018 Elf_Internal_Rela * rel,
8020 struct bfd_link_info * info,
8022 const char * sym_name,
8023 unsigned char st_type,
8024 enum arm_st_branch_type branch_type,
8025 struct elf_link_hash_entry * h,
8026 bfd_boolean * unresolved_reloc_p,
8027 char ** error_message)
8029 unsigned long r_type = howto->type;
8030 unsigned long r_symndx;
8031 bfd_byte * hit_data = contents + rel->r_offset;
8032 bfd_vma * local_got_offsets;
8033 bfd_vma * local_tlsdesc_gotents;
8036 asection * sreloc = NULL;
8039 bfd_signed_vma signed_addend;
8040 unsigned char dynreloc_st_type;
8041 bfd_vma dynreloc_value;
8042 struct elf32_arm_link_hash_table * globals;
8043 struct elf32_arm_link_hash_entry *eh;
8044 union gotplt_union *root_plt;
8045 struct arm_plt_info *arm_plt;
8047 bfd_vma gotplt_offset;
8048 bfd_boolean has_iplt_entry;
8050 globals = elf32_arm_hash_table (info);
8051 if (globals == NULL)
8052 return bfd_reloc_notsupported;
8054 BFD_ASSERT (is_arm_elf (input_bfd));
8056 /* Some relocation types map to different relocations depending on the
8057 target. We pick the right one here. */
8058 r_type = arm_real_reloc_type (globals, r_type);
8060 /* It is possible to have linker relaxations on some TLS access
8061 models. Update our information here. */
8062 r_type = elf32_arm_tls_transition (info, r_type, h);
8064 if (r_type != howto->type)
8065 howto = elf32_arm_howto_from_type (r_type);
8067 /* If the start address has been set, then set the EF_ARM_HASENTRY
8068 flag. Setting this more than once is redundant, but the cost is
8069 not too high, and it keeps the code simple.
8071 The test is done here, rather than somewhere else, because the
8072 start address is only set just before the final link commences.
8074 Note - if the user deliberately sets a start address of 0, the
8075 flag will not be set. */
8076 if (bfd_get_start_address (output_bfd) != 0)
8077 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
8079 eh = (struct elf32_arm_link_hash_entry *) h;
8080 sgot = globals->root.sgot;
8081 local_got_offsets = elf_local_got_offsets (input_bfd);
8082 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
8084 if (globals->root.dynamic_sections_created)
8085 srelgot = globals->root.srelgot;
8089 r_symndx = ELF32_R_SYM (rel->r_info);
8091 if (globals->use_rel)
8093 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
8095 if (addend & ((howto->src_mask + 1) >> 1))
8098 signed_addend &= ~ howto->src_mask;
8099 signed_addend |= addend;
8102 signed_addend = addend;
8105 addend = signed_addend = rel->r_addend;
8107 /* Record the symbol information that should be used in dynamic
8109 dynreloc_st_type = st_type;
8110 dynreloc_value = value;
8111 if (branch_type == ST_BRANCH_TO_THUMB)
8112 dynreloc_value |= 1;
8114 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
8115 VALUE appropriately for relocations that we resolve at link time. */
8116 has_iplt_entry = FALSE;
8117 if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
8118 && root_plt->offset != (bfd_vma) -1)
8120 plt_offset = root_plt->offset;
8121 gotplt_offset = arm_plt->got_offset;
8123 if (h == NULL || eh->is_iplt)
8125 has_iplt_entry = TRUE;
8126 splt = globals->root.iplt;
8128 /* Populate .iplt entries here, because not all of them will
8129 be seen by finish_dynamic_symbol. The lower bit is set if
8130 we have already populated the entry. */
8135 elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
8136 -1, dynreloc_value);
8137 root_plt->offset |= 1;
8140 /* Static relocations always resolve to the .iplt entry. */
8142 value = (splt->output_section->vma
8143 + splt->output_offset
8145 branch_type = ST_BRANCH_TO_ARM;
8147 /* If there are non-call relocations that resolve to the .iplt
8148 entry, then all dynamic ones must too. */
8149 if (arm_plt->noncall_refcount != 0)
8151 dynreloc_st_type = st_type;
8152 dynreloc_value = value;
8156 /* We populate the .plt entry in finish_dynamic_symbol. */
8157 splt = globals->root.splt;
8162 plt_offset = (bfd_vma) -1;
8163 gotplt_offset = (bfd_vma) -1;
8169 /* We don't need to find a value for this symbol. It's just a
8171 *unresolved_reloc_p = FALSE;
8172 return bfd_reloc_ok;
8175 if (!globals->vxworks_p)
8176 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8180 case R_ARM_ABS32_NOI:
8182 case R_ARM_REL32_NOI:
8188 /* Handle relocations which should use the PLT entry. ABS32/REL32
8189 will use the symbol's value, which may point to a PLT entry, but we
8190 don't need to handle that here. If we created a PLT entry, all
8191 branches in this object should go to it, except if the PLT is too
8192 far away, in which case a long branch stub should be inserted. */
8193 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
8194 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
8195 && r_type != R_ARM_CALL
8196 && r_type != R_ARM_JUMP24
8197 && r_type != R_ARM_PLT32)
8198 && plt_offset != (bfd_vma) -1)
8200 /* If we've created a .plt section, and assigned a PLT entry
8201 to this function, it must either be a STT_GNU_IFUNC reference
8202 or not be known to bind locally. In other cases, we should
8203 have cleared the PLT entry by now. */
8204 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
8206 value = (splt->output_section->vma
8207 + splt->output_offset
8209 *unresolved_reloc_p = FALSE;
8210 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8211 contents, rel->r_offset, value,
8215 /* When generating a shared object or relocatable executable, these
8216 relocations are copied into the output file to be resolved at
8218 if ((info->shared || globals->root.is_relocatable_executable)
8219 && (input_section->flags & SEC_ALLOC)
8220 && !(globals->vxworks_p
8221 && strcmp (input_section->output_section->name,
8223 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
8224 || !SYMBOL_CALLS_LOCAL (info, h))
8225 && (!strstr (input_section->name, STUB_SUFFIX))
8227 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8228 || h->root.type != bfd_link_hash_undefweak)
8229 && r_type != R_ARM_PC24
8230 && r_type != R_ARM_CALL
8231 && r_type != R_ARM_JUMP24
8232 && r_type != R_ARM_PREL31
8233 && r_type != R_ARM_PLT32)
8235 Elf_Internal_Rela outrel;
8236 bfd_boolean skip, relocate;
8238 *unresolved_reloc_p = FALSE;
8240 if (sreloc == NULL && globals->root.dynamic_sections_created)
8242 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
8243 ! globals->use_rel);
8246 return bfd_reloc_notsupported;
8252 outrel.r_addend = addend;
8254 _bfd_elf_section_offset (output_bfd, info, input_section,
8256 if (outrel.r_offset == (bfd_vma) -1)
8258 else if (outrel.r_offset == (bfd_vma) -2)
8259 skip = TRUE, relocate = TRUE;
8260 outrel.r_offset += (input_section->output_section->vma
8261 + input_section->output_offset);
8264 memset (&outrel, 0, sizeof outrel);
8269 || !h->def_regular))
8270 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
8275 /* This symbol is local, or marked to become local. */
8276 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
8277 if (globals->symbian_p)
8281 /* On Symbian OS, the data segment and text segement
8282 can be relocated independently. Therefore, we
8283 must indicate the segment to which this
8284 relocation is relative. The BPABI allows us to
8285 use any symbol in the right segment; we just use
8286 the section symbol as it is convenient. (We
8287 cannot use the symbol given by "h" directly as it
8288 will not appear in the dynamic symbol table.)
8290 Note that the dynamic linker ignores the section
8291 symbol value, so we don't subtract osec->vma
8292 from the emitted reloc addend. */
8294 osec = sym_sec->output_section;
8296 osec = input_section->output_section;
8297 symbol = elf_section_data (osec)->dynindx;
8300 struct elf_link_hash_table *htab = elf_hash_table (info);
8302 if ((osec->flags & SEC_READONLY) == 0
8303 && htab->data_index_section != NULL)
8304 osec = htab->data_index_section;
8306 osec = htab->text_index_section;
8307 symbol = elf_section_data (osec)->dynindx;
8309 BFD_ASSERT (symbol != 0);
8312 /* On SVR4-ish systems, the dynamic loader cannot
8313 relocate the text and data segments independently,
8314 so the symbol does not matter. */
8316 if (dynreloc_st_type == STT_GNU_IFUNC)
8317 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
8318 to the .iplt entry. Instead, every non-call reference
8319 must use an R_ARM_IRELATIVE relocation to obtain the
8320 correct run-time address. */
8321 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
8323 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
8324 if (globals->use_rel)
8327 outrel.r_addend += dynreloc_value;
8330 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
8332 /* If this reloc is against an external symbol, we do not want to
8333 fiddle with the addend. Otherwise, we need to include the symbol
8334 value so that it becomes an addend for the dynamic reloc. */
8336 return bfd_reloc_ok;
8338 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8339 contents, rel->r_offset,
8340 dynreloc_value, (bfd_vma) 0);
8342 else switch (r_type)
8345 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
8347 case R_ARM_XPC25: /* Arm BLX instruction. */
8350 case R_ARM_PC24: /* Arm B/BL instruction. */
8353 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
8355 if (r_type == R_ARM_XPC25)
8357 /* Check for Arm calling Arm function. */
8358 /* FIXME: Should we translate the instruction into a BL
8359 instruction instead ? */
8360 if (branch_type != ST_BRANCH_TO_THUMB)
8361 (*_bfd_error_handler)
8362 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
8364 h ? h->root.root.string : "(local)");
8366 else if (r_type == R_ARM_PC24)
8368 /* Check for Arm calling Thumb function. */
8369 if (branch_type == ST_BRANCH_TO_THUMB)
8371 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
8372 output_bfd, input_section,
8373 hit_data, sym_sec, rel->r_offset,
8374 signed_addend, value,
8376 return bfd_reloc_ok;
8378 return bfd_reloc_dangerous;
8382 /* Check if a stub has to be inserted because the
8383 destination is too far or we are changing mode. */
8384 if ( r_type == R_ARM_CALL
8385 || r_type == R_ARM_JUMP24
8386 || r_type == R_ARM_PLT32)
8388 enum elf32_arm_stub_type stub_type = arm_stub_none;
8389 struct elf32_arm_link_hash_entry *hash;
8391 hash = (struct elf32_arm_link_hash_entry *) h;
8392 stub_type = arm_type_of_stub (info, input_section, rel,
8393 st_type, &branch_type,
8394 hash, value, sym_sec,
8395 input_bfd, sym_name);
8397 if (stub_type != arm_stub_none)
8399 /* The target is out of reach, so redirect the
8400 branch to the local stub for this function. */
8401 stub_entry = elf32_arm_get_stub_entry (input_section,
8406 if (stub_entry != NULL)
8407 value = (stub_entry->stub_offset
8408 + stub_entry->stub_sec->output_offset
8409 + stub_entry->stub_sec->output_section->vma);
8411 if (plt_offset != (bfd_vma) -1)
8412 *unresolved_reloc_p = FALSE;
8417 /* If the call goes through a PLT entry, make sure to
8418 check distance to the right destination address. */
8419 if (plt_offset != (bfd_vma) -1)
8421 value = (splt->output_section->vma
8422 + splt->output_offset
8424 *unresolved_reloc_p = FALSE;
8425 /* The PLT entry is in ARM mode, regardless of the
8427 branch_type = ST_BRANCH_TO_ARM;
8432 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
8434 S is the address of the symbol in the relocation.
8435 P is address of the instruction being relocated.
8436 A is the addend (extracted from the instruction) in bytes.
8438 S is held in 'value'.
8439 P is the base address of the section containing the
8440 instruction plus the offset of the reloc into that
8442 (input_section->output_section->vma +
8443 input_section->output_offset +
8445 A is the addend, converted into bytes, ie:
8448 Note: None of these operations have knowledge of the pipeline
8449 size of the processor, thus it is up to the assembler to
8450 encode this information into the addend. */
8451 value -= (input_section->output_section->vma
8452 + input_section->output_offset);
8453 value -= rel->r_offset;
8454 if (globals->use_rel)
8455 value += (signed_addend << howto->size);
8457 /* RELA addends do not have to be adjusted by howto->size. */
8458 value += signed_addend;
8460 signed_addend = value;
8461 signed_addend >>= howto->rightshift;
8463 /* A branch to an undefined weak symbol is turned into a jump to
8464 the next instruction unless a PLT entry will be created.
8465 Do the same for local undefined symbols (but not for STN_UNDEF).
8466 The jump to the next instruction is optimized as a NOP depending
8467 on the architecture. */
8468 if (h ? (h->root.type == bfd_link_hash_undefweak
8469 && plt_offset == (bfd_vma) -1)
8470 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
8472 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
8474 if (arch_has_arm_nop (globals))
8475 value |= 0x0320f000;
8477 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
8481 /* Perform a signed range check. */
8482 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
8483 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
8484 return bfd_reloc_overflow;
8486 addend = (value & 2);
8488 value = (signed_addend & howto->dst_mask)
8489 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
8491 if (r_type == R_ARM_CALL)
8493 /* Set the H bit in the BLX instruction. */
8494 if (branch_type == ST_BRANCH_TO_THUMB)
8499 value &= ~(bfd_vma)(1 << 24);
8502 /* Select the correct instruction (BL or BLX). */
8503 /* Only if we are not handling a BL to a stub. In this
8504 case, mode switching is performed by the stub. */
8505 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
8507 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
8509 value &= ~(bfd_vma)(1 << 28);
8519 if (branch_type == ST_BRANCH_TO_THUMB)
8523 case R_ARM_ABS32_NOI:
8529 if (branch_type == ST_BRANCH_TO_THUMB)
8531 value -= (input_section->output_section->vma
8532 + input_section->output_offset + rel->r_offset);
8535 case R_ARM_REL32_NOI:
8537 value -= (input_section->output_section->vma
8538 + input_section->output_offset + rel->r_offset);
8542 value -= (input_section->output_section->vma
8543 + input_section->output_offset + rel->r_offset);
8544 value += signed_addend;
8545 if (! h || h->root.type != bfd_link_hash_undefweak)
8547 /* Check for overflow. */
8548 if ((value ^ (value >> 1)) & (1 << 30))
8549 return bfd_reloc_overflow;
8551 value &= 0x7fffffff;
8552 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
8553 if (branch_type == ST_BRANCH_TO_THUMB)
8558 bfd_put_32 (input_bfd, value, hit_data);
8559 return bfd_reloc_ok;
8564 /* There is no way to tell whether the user intended to use a signed or
8565 unsigned addend. When checking for overflow we accept either,
8566 as specified by the AAELF. */
8567 if ((long) value > 0xff || (long) value < -0x80)
8568 return bfd_reloc_overflow;
8570 bfd_put_8 (input_bfd, value, hit_data);
8571 return bfd_reloc_ok;
8576 /* See comment for R_ARM_ABS8. */
8577 if ((long) value > 0xffff || (long) value < -0x8000)
8578 return bfd_reloc_overflow;
8580 bfd_put_16 (input_bfd, value, hit_data);
8581 return bfd_reloc_ok;
8583 case R_ARM_THM_ABS5:
8584 /* Support ldr and str instructions for the thumb. */
8585 if (globals->use_rel)
8587 /* Need to refetch addend. */
8588 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
8589 /* ??? Need to determine shift amount from operand size. */
8590 addend >>= howto->rightshift;
8594 /* ??? Isn't value unsigned? */
8595 if ((long) value > 0x1f || (long) value < -0x10)
8596 return bfd_reloc_overflow;
8598 /* ??? Value needs to be properly shifted into place first. */
8599 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
8600 bfd_put_16 (input_bfd, value, hit_data);
8601 return bfd_reloc_ok;
8603 case R_ARM_THM_ALU_PREL_11_0:
8604 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
8607 bfd_signed_vma relocation;
8609 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8610 | bfd_get_16 (input_bfd, hit_data + 2);
8612 if (globals->use_rel)
8614 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
8615 | ((insn & (1 << 26)) >> 15);
8616 if (insn & 0xf00000)
8617 signed_addend = -signed_addend;
8620 relocation = value + signed_addend;
8621 relocation -= (input_section->output_section->vma
8622 + input_section->output_offset
8625 value = abs (relocation);
8627 if (value >= 0x1000)
8628 return bfd_reloc_overflow;
8630 insn = (insn & 0xfb0f8f00) | (value & 0xff)
8631 | ((value & 0x700) << 4)
8632 | ((value & 0x800) << 15);
8636 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8637 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8639 return bfd_reloc_ok;
8643 /* PR 10073: This reloc is not generated by the GNU toolchain,
8644 but it is supported for compatibility with third party libraries
8645 generated by other compilers, specifically the ARM/IAR. */
8648 bfd_signed_vma relocation;
8650 insn = bfd_get_16 (input_bfd, hit_data);
8652 if (globals->use_rel)
8653 addend = (insn & 0x00ff) << 2;
8655 relocation = value + addend;
8656 relocation -= (input_section->output_section->vma
8657 + input_section->output_offset
8660 value = abs (relocation);
8662 /* We do not check for overflow of this reloc. Although strictly
8663 speaking this is incorrect, it appears to be necessary in order
8664 to work with IAR generated relocs. Since GCC and GAS do not
8665 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
8666 a problem for them. */
8669 insn = (insn & 0xff00) | (value >> 2);
8671 bfd_put_16 (input_bfd, insn, hit_data);
8673 return bfd_reloc_ok;
8676 case R_ARM_THM_PC12:
8677 /* Corresponds to: ldr.w reg, [pc, #offset]. */
8680 bfd_signed_vma relocation;
8682 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
8683 | bfd_get_16 (input_bfd, hit_data + 2);
8685 if (globals->use_rel)
8687 signed_addend = insn & 0xfff;
8688 if (!(insn & (1 << 23)))
8689 signed_addend = -signed_addend;
8692 relocation = value + signed_addend;
8693 relocation -= (input_section->output_section->vma
8694 + input_section->output_offset
8697 value = abs (relocation);
8699 if (value >= 0x1000)
8700 return bfd_reloc_overflow;
8702 insn = (insn & 0xff7ff000) | value;
8703 if (relocation >= 0)
8706 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8707 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8709 return bfd_reloc_ok;
8712 case R_ARM_THM_XPC22:
8713 case R_ARM_THM_CALL:
8714 case R_ARM_THM_JUMP24:
8715 /* Thumb BL (branch long instruction). */
8719 bfd_boolean overflow = FALSE;
8720 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
8721 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
8722 bfd_signed_vma reloc_signed_max;
8723 bfd_signed_vma reloc_signed_min;
8725 bfd_signed_vma signed_check;
8727 const int thumb2 = using_thumb2 (globals);
8729 /* A branch to an undefined weak symbol is turned into a jump to
8730 the next instruction unless a PLT entry will be created.
8731 The jump to the next instruction is optimized as a NOP.W for
8732 Thumb-2 enabled architectures. */
8733 if (h && h->root.type == bfd_link_hash_undefweak
8734 && plt_offset == (bfd_vma) -1)
8736 if (arch_has_thumb2_nop (globals))
8738 bfd_put_16 (input_bfd, 0xf3af, hit_data);
8739 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
8743 bfd_put_16 (input_bfd, 0xe000, hit_data);
8744 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
8746 return bfd_reloc_ok;
8749 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
8750 with Thumb-1) involving the J1 and J2 bits. */
8751 if (globals->use_rel)
8753 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
8754 bfd_vma upper = upper_insn & 0x3ff;
8755 bfd_vma lower = lower_insn & 0x7ff;
8756 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
8757 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
8758 bfd_vma i1 = j1 ^ s ? 0 : 1;
8759 bfd_vma i2 = j2 ^ s ? 0 : 1;
8761 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
8763 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
8765 signed_addend = addend;
8768 if (r_type == R_ARM_THM_XPC22)
8770 /* Check for Thumb to Thumb call. */
8771 /* FIXME: Should we translate the instruction into a BL
8772 instruction instead ? */
8773 if (branch_type == ST_BRANCH_TO_THUMB)
8774 (*_bfd_error_handler)
8775 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
8777 h ? h->root.root.string : "(local)");
8781 /* If it is not a call to Thumb, assume call to Arm.
8782 If it is a call relative to a section name, then it is not a
8783 function call at all, but rather a long jump. Calls through
8784 the PLT do not require stubs. */
8785 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
8787 if (globals->use_blx && r_type == R_ARM_THM_CALL)
8789 /* Convert BL to BLX. */
8790 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8792 else if (( r_type != R_ARM_THM_CALL)
8793 && (r_type != R_ARM_THM_JUMP24))
8795 if (elf32_thumb_to_arm_stub
8796 (info, sym_name, input_bfd, output_bfd, input_section,
8797 hit_data, sym_sec, rel->r_offset, signed_addend, value,
8799 return bfd_reloc_ok;
8801 return bfd_reloc_dangerous;
8804 else if (branch_type == ST_BRANCH_TO_THUMB
8806 && r_type == R_ARM_THM_CALL)
8808 /* Make sure this is a BL. */
8809 lower_insn |= 0x1800;
8813 enum elf32_arm_stub_type stub_type = arm_stub_none;
8814 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
8816 /* Check if a stub has to be inserted because the destination
8818 struct elf32_arm_stub_hash_entry *stub_entry;
8819 struct elf32_arm_link_hash_entry *hash;
8821 hash = (struct elf32_arm_link_hash_entry *) h;
8823 stub_type = arm_type_of_stub (info, input_section, rel,
8824 st_type, &branch_type,
8825 hash, value, sym_sec,
8826 input_bfd, sym_name);
8828 if (stub_type != arm_stub_none)
8830 /* The target is out of reach or we are changing modes, so
8831 redirect the branch to the local stub for this
8833 stub_entry = elf32_arm_get_stub_entry (input_section,
8837 if (stub_entry != NULL)
8839 value = (stub_entry->stub_offset
8840 + stub_entry->stub_sec->output_offset
8841 + stub_entry->stub_sec->output_section->vma);
8843 if (plt_offset != (bfd_vma) -1)
8844 *unresolved_reloc_p = FALSE;
8847 /* If this call becomes a call to Arm, force BLX. */
8848 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
8851 && !arm_stub_is_thumb (stub_entry->stub_type))
8852 || branch_type != ST_BRANCH_TO_THUMB)
8853 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8858 /* Handle calls via the PLT. */
8859 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
8861 value = (splt->output_section->vma
8862 + splt->output_offset
8865 if (globals->use_blx && r_type == R_ARM_THM_CALL)
8867 /* If the Thumb BLX instruction is available, convert
8868 the BL to a BLX instruction to call the ARM-mode
8870 lower_insn = (lower_insn & ~0x1000) | 0x0800;
8871 branch_type = ST_BRANCH_TO_ARM;
8875 /* Target the Thumb stub before the ARM PLT entry. */
8876 value -= PLT_THUMB_STUB_SIZE;
8877 branch_type = ST_BRANCH_TO_THUMB;
8879 *unresolved_reloc_p = FALSE;
8882 relocation = value + signed_addend;
8884 relocation -= (input_section->output_section->vma
8885 + input_section->output_offset
8888 check = relocation >> howto->rightshift;
8890 /* If this is a signed value, the rightshift just dropped
8891 leading 1 bits (assuming twos complement). */
8892 if ((bfd_signed_vma) relocation >= 0)
8893 signed_check = check;
8895 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
8897 /* Calculate the permissable maximum and minimum values for
8898 this relocation according to whether we're relocating for
8900 bitsize = howto->bitsize;
8903 reloc_signed_max = (1 << (bitsize - 1)) - 1;
8904 reloc_signed_min = ~reloc_signed_max;
8906 /* Assumes two's complement. */
8907 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
8910 if ((lower_insn & 0x5000) == 0x4000)
8911 /* For a BLX instruction, make sure that the relocation is rounded up
8912 to a word boundary. This follows the semantics of the instruction
8913 which specifies that bit 1 of the target address will come from bit
8914 1 of the base address. */
8915 relocation = (relocation + 2) & ~ 3;
8917 /* Put RELOCATION back into the insn. Assumes two's complement.
8918 We use the Thumb-2 encoding, which is safe even if dealing with
8919 a Thumb-1 instruction by virtue of our overflow check above. */
8920 reloc_sign = (signed_check < 0) ? 1 : 0;
8921 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
8922 | ((relocation >> 12) & 0x3ff)
8923 | (reloc_sign << 10);
8924 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
8925 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
8926 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
8927 | ((relocation >> 1) & 0x7ff);
8929 /* Put the relocated value back in the object file: */
8930 bfd_put_16 (input_bfd, upper_insn, hit_data);
8931 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
8933 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
8937 case R_ARM_THM_JUMP19:
8938 /* Thumb32 conditional branch instruction. */
8941 bfd_boolean overflow = FALSE;
8942 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
8943 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
8944 bfd_signed_vma reloc_signed_max = 0xffffe;
8945 bfd_signed_vma reloc_signed_min = -0x100000;
8946 bfd_signed_vma signed_check;
8948 /* Need to refetch the addend, reconstruct the top three bits,
8949 and squish the two 11 bit pieces together. */
8950 if (globals->use_rel)
8952 bfd_vma S = (upper_insn & 0x0400) >> 10;
8953 bfd_vma upper = (upper_insn & 0x003f);
8954 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
8955 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
8956 bfd_vma lower = (lower_insn & 0x07ff);
8961 upper -= 0x0100; /* Sign extend. */
8963 addend = (upper << 12) | (lower << 1);
8964 signed_addend = addend;
8967 /* Handle calls via the PLT. */
8968 if (plt_offset != (bfd_vma) -1)
8970 value = (splt->output_section->vma
8971 + splt->output_offset
8973 /* Target the Thumb stub before the ARM PLT entry. */
8974 value -= PLT_THUMB_STUB_SIZE;
8975 *unresolved_reloc_p = FALSE;
8978 /* ??? Should handle interworking? GCC might someday try to
8979 use this for tail calls. */
8981 relocation = value + signed_addend;
8982 relocation -= (input_section->output_section->vma
8983 + input_section->output_offset
8985 signed_check = (bfd_signed_vma) relocation;
8987 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
8990 /* Put RELOCATION back into the insn. */
8992 bfd_vma S = (relocation & 0x00100000) >> 20;
8993 bfd_vma J2 = (relocation & 0x00080000) >> 19;
8994 bfd_vma J1 = (relocation & 0x00040000) >> 18;
8995 bfd_vma hi = (relocation & 0x0003f000) >> 12;
8996 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
8998 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
8999 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
9002 /* Put the relocated value back in the object file: */
9003 bfd_put_16 (input_bfd, upper_insn, hit_data);
9004 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9006 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
9009 case R_ARM_THM_JUMP11:
9010 case R_ARM_THM_JUMP8:
9011 case R_ARM_THM_JUMP6:
9012 /* Thumb B (branch) instruction). */
9014 bfd_signed_vma relocation;
9015 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
9016 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
9017 bfd_signed_vma signed_check;
9019 /* CZB cannot jump backward. */
9020 if (r_type == R_ARM_THM_JUMP6)
9021 reloc_signed_min = 0;
9023 if (globals->use_rel)
9025 /* Need to refetch addend. */
9026 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9027 if (addend & ((howto->src_mask + 1) >> 1))
9030 signed_addend &= ~ howto->src_mask;
9031 signed_addend |= addend;
9034 signed_addend = addend;
9035 /* The value in the insn has been right shifted. We need to
9036 undo this, so that we can perform the address calculation
9037 in terms of bytes. */
9038 signed_addend <<= howto->rightshift;
9040 relocation = value + signed_addend;
9042 relocation -= (input_section->output_section->vma
9043 + input_section->output_offset
9046 relocation >>= howto->rightshift;
9047 signed_check = relocation;
9049 if (r_type == R_ARM_THM_JUMP6)
9050 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
9052 relocation &= howto->dst_mask;
9053 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
9055 bfd_put_16 (input_bfd, relocation, hit_data);
9057 /* Assumes two's complement. */
9058 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
9059 return bfd_reloc_overflow;
9061 return bfd_reloc_ok;
9064 case R_ARM_ALU_PCREL7_0:
9065 case R_ARM_ALU_PCREL15_8:
9066 case R_ARM_ALU_PCREL23_15:
9071 insn = bfd_get_32 (input_bfd, hit_data);
9072 if (globals->use_rel)
9074 /* Extract the addend. */
9075 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
9076 signed_addend = addend;
9078 relocation = value + signed_addend;
9080 relocation -= (input_section->output_section->vma
9081 + input_section->output_offset
9083 insn = (insn & ~0xfff)
9084 | ((howto->bitpos << 7) & 0xf00)
9085 | ((relocation >> howto->bitpos) & 0xff);
9086 bfd_put_32 (input_bfd, value, hit_data);
9088 return bfd_reloc_ok;
9090 case R_ARM_GNU_VTINHERIT:
9091 case R_ARM_GNU_VTENTRY:
9092 return bfd_reloc_ok;
9094 case R_ARM_GOTOFF32:
9095 /* Relocation is relative to the start of the
9096 global offset table. */
9098 BFD_ASSERT (sgot != NULL);
9100 return bfd_reloc_notsupported;
9102 /* If we are addressing a Thumb function, we need to adjust the
9103 address by one, so that attempts to call the function pointer will
9104 correctly interpret it as Thumb code. */
9105 if (branch_type == ST_BRANCH_TO_THUMB)
9108 /* Note that sgot->output_offset is not involved in this
9109 calculation. We always want the start of .got. If we
9110 define _GLOBAL_OFFSET_TABLE in a different way, as is
9111 permitted by the ABI, we might have to change this
9113 value -= sgot->output_section->vma;
9114 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9115 contents, rel->r_offset, value,
9119 /* Use global offset table as symbol value. */
9120 BFD_ASSERT (sgot != NULL);
9123 return bfd_reloc_notsupported;
9125 *unresolved_reloc_p = FALSE;
9126 value = sgot->output_section->vma;
9127 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9128 contents, rel->r_offset, value,
9132 case R_ARM_GOT_PREL:
9133 /* Relocation is to the entry for this symbol in the
9134 global offset table. */
9136 return bfd_reloc_notsupported;
9138 if (dynreloc_st_type == STT_GNU_IFUNC
9139 && plt_offset != (bfd_vma) -1
9140 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
9142 /* We have a relocation against a locally-binding STT_GNU_IFUNC
9143 symbol, and the relocation resolves directly to the runtime
9144 target rather than to the .iplt entry. This means that any
9145 .got entry would be the same value as the .igot.plt entry,
9146 so there's no point creating both. */
9147 sgot = globals->root.igotplt;
9148 value = sgot->output_offset + gotplt_offset;
9154 off = h->got.offset;
9155 BFD_ASSERT (off != (bfd_vma) -1);
9158 /* We have already processsed one GOT relocation against
9161 if (globals->root.dynamic_sections_created
9162 && !SYMBOL_REFERENCES_LOCAL (info, h))
9163 *unresolved_reloc_p = FALSE;
9167 Elf_Internal_Rela outrel;
9169 if (!SYMBOL_REFERENCES_LOCAL (info, h))
9171 /* If the symbol doesn't resolve locally in a static
9172 object, we have an undefined reference. If the
9173 symbol doesn't resolve locally in a dynamic object,
9174 it should be resolved by the dynamic linker. */
9175 if (globals->root.dynamic_sections_created)
9177 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
9178 *unresolved_reloc_p = FALSE;
9182 outrel.r_addend = 0;
9186 if (dynreloc_st_type == STT_GNU_IFUNC)
9187 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9188 else if (info->shared)
9189 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9192 outrel.r_addend = dynreloc_value;
9195 /* The GOT entry is initialized to zero by default.
9196 See if we should install a different value. */
9197 if (outrel.r_addend != 0
9198 && (outrel.r_info == 0 || globals->use_rel))
9200 bfd_put_32 (output_bfd, outrel.r_addend,
9201 sgot->contents + off);
9202 outrel.r_addend = 0;
9205 if (outrel.r_info != 0)
9207 outrel.r_offset = (sgot->output_section->vma
9208 + sgot->output_offset
9210 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9214 value = sgot->output_offset + off;
9220 BFD_ASSERT (local_got_offsets != NULL &&
9221 local_got_offsets[r_symndx] != (bfd_vma) -1);
9223 off = local_got_offsets[r_symndx];
9225 /* The offset must always be a multiple of 4. We use the
9226 least significant bit to record whether we have already
9227 generated the necessary reloc. */
9232 if (globals->use_rel)
9233 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
9235 if (info->shared || dynreloc_st_type == STT_GNU_IFUNC)
9237 Elf_Internal_Rela outrel;
9239 outrel.r_addend = addend + dynreloc_value;
9240 outrel.r_offset = (sgot->output_section->vma
9241 + sgot->output_offset
9243 if (dynreloc_st_type == STT_GNU_IFUNC)
9244 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9246 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
9247 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9250 local_got_offsets[r_symndx] |= 1;
9253 value = sgot->output_offset + off;
9255 if (r_type != R_ARM_GOT32)
9256 value += sgot->output_section->vma;
9258 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9259 contents, rel->r_offset, value,
9262 case R_ARM_TLS_LDO32:
9263 value = value - dtpoff_base (info);
9265 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9266 contents, rel->r_offset, value,
9269 case R_ARM_TLS_LDM32:
9276 off = globals->tls_ldm_got.offset;
9282 /* If we don't know the module number, create a relocation
9286 Elf_Internal_Rela outrel;
9288 if (srelgot == NULL)
9291 outrel.r_addend = 0;
9292 outrel.r_offset = (sgot->output_section->vma
9293 + sgot->output_offset + off);
9294 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
9296 if (globals->use_rel)
9297 bfd_put_32 (output_bfd, outrel.r_addend,
9298 sgot->contents + off);
9300 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9303 bfd_put_32 (output_bfd, 1, sgot->contents + off);
9305 globals->tls_ldm_got.offset |= 1;
9308 value = sgot->output_section->vma + sgot->output_offset + off
9309 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
9311 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9312 contents, rel->r_offset, value,
9316 case R_ARM_TLS_CALL:
9317 case R_ARM_THM_TLS_CALL:
9318 case R_ARM_TLS_GD32:
9319 case R_ARM_TLS_IE32:
9320 case R_ARM_TLS_GOTDESC:
9321 case R_ARM_TLS_DESCSEQ:
9322 case R_ARM_THM_TLS_DESCSEQ:
9324 bfd_vma off, offplt;
9328 BFD_ASSERT (sgot != NULL);
9333 dyn = globals->root.dynamic_sections_created;
9334 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
9336 || !SYMBOL_REFERENCES_LOCAL (info, h)))
9338 *unresolved_reloc_p = FALSE;
9341 off = h->got.offset;
9342 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
9343 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
9347 BFD_ASSERT (local_got_offsets != NULL);
9348 off = local_got_offsets[r_symndx];
9349 offplt = local_tlsdesc_gotents[r_symndx];
9350 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
9353 /* Linker relaxations happens from one of the
9354 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
9355 if (ELF32_R_TYPE(rel->r_info) != r_type)
9356 tls_type = GOT_TLS_IE;
9358 BFD_ASSERT (tls_type != GOT_UNKNOWN);
9364 bfd_boolean need_relocs = FALSE;
9365 Elf_Internal_Rela outrel;
9368 /* The GOT entries have not been initialized yet. Do it
9369 now, and emit any relocations. If both an IE GOT and a
9370 GD GOT are necessary, we emit the GD first. */
9372 if ((info->shared || indx != 0)
9374 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9375 || h->root.type != bfd_link_hash_undefweak))
9378 BFD_ASSERT (srelgot != NULL);
9381 if (tls_type & GOT_TLS_GDESC)
9385 /* We should have relaxed, unless this is an undefined
9387 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
9389 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
9390 <= globals->root.sgotplt->size);
9392 outrel.r_addend = 0;
9393 outrel.r_offset = (globals->root.sgotplt->output_section->vma
9394 + globals->root.sgotplt->output_offset
9396 + globals->sgotplt_jump_table_size);
9398 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
9399 sreloc = globals->root.srelplt;
9400 loc = sreloc->contents;
9401 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
9402 BFD_ASSERT (loc + RELOC_SIZE (globals)
9403 <= sreloc->contents + sreloc->size);
9405 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
9407 /* For globals, the first word in the relocation gets
9408 the relocation index and the top bit set, or zero,
9409 if we're binding now. For locals, it gets the
9410 symbol's offset in the tls section. */
9411 bfd_put_32 (output_bfd,
9412 !h ? value - elf_hash_table (info)->tls_sec->vma
9413 : info->flags & DF_BIND_NOW ? 0
9414 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
9415 globals->root.sgotplt->contents + offplt
9416 + globals->sgotplt_jump_table_size);
9418 /* Second word in the relocation is always zero. */
9419 bfd_put_32 (output_bfd, 0,
9420 globals->root.sgotplt->contents + offplt
9421 + globals->sgotplt_jump_table_size + 4);
9423 if (tls_type & GOT_TLS_GD)
9427 outrel.r_addend = 0;
9428 outrel.r_offset = (sgot->output_section->vma
9429 + sgot->output_offset
9431 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
9433 if (globals->use_rel)
9434 bfd_put_32 (output_bfd, outrel.r_addend,
9435 sgot->contents + cur_off);
9437 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9440 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9441 sgot->contents + cur_off + 4);
9444 outrel.r_addend = 0;
9445 outrel.r_info = ELF32_R_INFO (indx,
9446 R_ARM_TLS_DTPOFF32);
9447 outrel.r_offset += 4;
9449 if (globals->use_rel)
9450 bfd_put_32 (output_bfd, outrel.r_addend,
9451 sgot->contents + cur_off + 4);
9453 elf32_arm_add_dynreloc (output_bfd, info,
9459 /* If we are not emitting relocations for a
9460 general dynamic reference, then we must be in a
9461 static link or an executable link with the
9462 symbol binding locally. Mark it as belonging
9463 to module 1, the executable. */
9464 bfd_put_32 (output_bfd, 1,
9465 sgot->contents + cur_off);
9466 bfd_put_32 (output_bfd, value - dtpoff_base (info),
9467 sgot->contents + cur_off + 4);
9473 if (tls_type & GOT_TLS_IE)
9478 outrel.r_addend = value - dtpoff_base (info);
9480 outrel.r_addend = 0;
9481 outrel.r_offset = (sgot->output_section->vma
9482 + sgot->output_offset
9484 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
9486 if (globals->use_rel)
9487 bfd_put_32 (output_bfd, outrel.r_addend,
9488 sgot->contents + cur_off);
9490 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
9493 bfd_put_32 (output_bfd, tpoff (info, value),
9494 sgot->contents + cur_off);
9501 local_got_offsets[r_symndx] |= 1;
9504 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
9506 else if (tls_type & GOT_TLS_GDESC)
9509 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
9510 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
9512 bfd_signed_vma offset;
9513 /* TLS stubs are arm mode. The original symbol is a
9514 data object, so branch_type is bogus. */
9515 branch_type = ST_BRANCH_TO_ARM;
9516 enum elf32_arm_stub_type stub_type
9517 = arm_type_of_stub (info, input_section, rel,
9518 st_type, &branch_type,
9519 (struct elf32_arm_link_hash_entry *)h,
9520 globals->tls_trampoline, globals->root.splt,
9521 input_bfd, sym_name);
9523 if (stub_type != arm_stub_none)
9525 struct elf32_arm_stub_hash_entry *stub_entry
9526 = elf32_arm_get_stub_entry
9527 (input_section, globals->root.splt, 0, rel,
9528 globals, stub_type);
9529 offset = (stub_entry->stub_offset
9530 + stub_entry->stub_sec->output_offset
9531 + stub_entry->stub_sec->output_section->vma);
9534 offset = (globals->root.splt->output_section->vma
9535 + globals->root.splt->output_offset
9536 + globals->tls_trampoline);
9538 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
9542 offset -= (input_section->output_section->vma
9543 + input_section->output_offset
9544 + rel->r_offset + 8);
9548 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
9552 /* Thumb blx encodes the offset in a complicated
9554 unsigned upper_insn, lower_insn;
9557 offset -= (input_section->output_section->vma
9558 + input_section->output_offset
9559 + rel->r_offset + 4);
9561 if (stub_type != arm_stub_none
9562 && arm_stub_is_thumb (stub_type))
9564 lower_insn = 0xd000;
9568 lower_insn = 0xc000;
9569 /* Round up the offset to a word boundary */
9570 offset = (offset + 2) & ~2;
9574 upper_insn = (0xf000
9575 | ((offset >> 12) & 0x3ff)
9577 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
9578 | (((!((offset >> 22) & 1)) ^ neg) << 11)
9579 | ((offset >> 1) & 0x7ff);
9580 bfd_put_16 (input_bfd, upper_insn, hit_data);
9581 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
9582 return bfd_reloc_ok;
9585 /* These relocations needs special care, as besides the fact
9586 they point somewhere in .gotplt, the addend must be
9587 adjusted accordingly depending on the type of instruction
9589 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
9591 unsigned long data, insn;
9594 data = bfd_get_32 (input_bfd, hit_data);
9600 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
9601 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
9603 | bfd_get_16 (input_bfd,
9604 contents + rel->r_offset - data + 2);
9605 if ((insn & 0xf800c000) == 0xf000c000)
9608 else if ((insn & 0xffffff00) == 0x4400)
9613 (*_bfd_error_handler)
9614 (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
9615 input_bfd, input_section,
9616 (unsigned long)rel->r_offset, insn);
9617 return bfd_reloc_notsupported;
9622 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
9627 case 0xfa: /* blx */
9631 case 0xe0: /* add */
9636 (*_bfd_error_handler)
9637 (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
9638 input_bfd, input_section,
9639 (unsigned long)rel->r_offset, insn);
9640 return bfd_reloc_notsupported;
9644 value += ((globals->root.sgotplt->output_section->vma
9645 + globals->root.sgotplt->output_offset + off)
9646 - (input_section->output_section->vma
9647 + input_section->output_offset
9649 + globals->sgotplt_jump_table_size);
9652 value = ((globals->root.sgot->output_section->vma
9653 + globals->root.sgot->output_offset + off)
9654 - (input_section->output_section->vma
9655 + input_section->output_offset + rel->r_offset));
9657 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9658 contents, rel->r_offset, value,
9662 case R_ARM_TLS_LE32:
9663 if (info->shared && !info->pie)
9665 (*_bfd_error_handler)
9666 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
9667 input_bfd, input_section,
9668 (long) rel->r_offset, howto->name);
9669 return bfd_reloc_notsupported;
9672 value = tpoff (info, value);
9674 return _bfd_final_link_relocate (howto, input_bfd, input_section,
9675 contents, rel->r_offset, value,
9679 if (globals->fix_v4bx)
9681 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9683 /* Ensure that we have a BX instruction. */
9684 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
9686 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
9688 /* Branch to veneer. */
9690 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
9691 glue_addr -= input_section->output_section->vma
9692 + input_section->output_offset
9693 + rel->r_offset + 8;
9694 insn = (insn & 0xf0000000) | 0x0a000000
9695 | ((glue_addr >> 2) & 0x00ffffff);
9699 /* Preserve Rm (lowest four bits) and the condition code
9700 (highest four bits). Other bits encode MOV PC,Rm. */
9701 insn = (insn & 0xf000000f) | 0x01a0f000;
9704 bfd_put_32 (input_bfd, insn, hit_data);
9706 return bfd_reloc_ok;
9708 case R_ARM_MOVW_ABS_NC:
9709 case R_ARM_MOVT_ABS:
9710 case R_ARM_MOVW_PREL_NC:
9711 case R_ARM_MOVT_PREL:
9712 /* Until we properly support segment-base-relative addressing then
9713 we assume the segment base to be zero, as for the group relocations.
9714 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
9715 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
9716 case R_ARM_MOVW_BREL_NC:
9717 case R_ARM_MOVW_BREL:
9718 case R_ARM_MOVT_BREL:
9720 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9722 if (globals->use_rel)
9724 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
9725 signed_addend = (addend ^ 0x8000) - 0x8000;
9728 value += signed_addend;
9730 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
9731 value -= (input_section->output_section->vma
9732 + input_section->output_offset + rel->r_offset);
9734 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
9735 return bfd_reloc_overflow;
9737 if (branch_type == ST_BRANCH_TO_THUMB)
9740 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
9741 || r_type == R_ARM_MOVT_BREL)
9745 insn |= value & 0xfff;
9746 insn |= (value & 0xf000) << 4;
9747 bfd_put_32 (input_bfd, insn, hit_data);
9749 return bfd_reloc_ok;
9751 case R_ARM_THM_MOVW_ABS_NC:
9752 case R_ARM_THM_MOVT_ABS:
9753 case R_ARM_THM_MOVW_PREL_NC:
9754 case R_ARM_THM_MOVT_PREL:
9755 /* Until we properly support segment-base-relative addressing then
9756 we assume the segment base to be zero, as for the above relocations.
9757 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
9758 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
9759 as R_ARM_THM_MOVT_ABS. */
9760 case R_ARM_THM_MOVW_BREL_NC:
9761 case R_ARM_THM_MOVW_BREL:
9762 case R_ARM_THM_MOVT_BREL:
9766 insn = bfd_get_16 (input_bfd, hit_data) << 16;
9767 insn |= bfd_get_16 (input_bfd, hit_data + 2);
9769 if (globals->use_rel)
9771 addend = ((insn >> 4) & 0xf000)
9772 | ((insn >> 15) & 0x0800)
9773 | ((insn >> 4) & 0x0700)
9775 signed_addend = (addend ^ 0x8000) - 0x8000;
9778 value += signed_addend;
9780 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
9781 value -= (input_section->output_section->vma
9782 + input_section->output_offset + rel->r_offset);
9784 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
9785 return bfd_reloc_overflow;
9787 if (branch_type == ST_BRANCH_TO_THUMB)
9790 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
9791 || r_type == R_ARM_THM_MOVT_BREL)
9795 insn |= (value & 0xf000) << 4;
9796 insn |= (value & 0x0800) << 15;
9797 insn |= (value & 0x0700) << 4;
9798 insn |= (value & 0x00ff);
9800 bfd_put_16 (input_bfd, insn >> 16, hit_data);
9801 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9803 return bfd_reloc_ok;
9805 case R_ARM_ALU_PC_G0_NC:
9806 case R_ARM_ALU_PC_G1_NC:
9807 case R_ARM_ALU_PC_G0:
9808 case R_ARM_ALU_PC_G1:
9809 case R_ARM_ALU_PC_G2:
9810 case R_ARM_ALU_SB_G0_NC:
9811 case R_ARM_ALU_SB_G1_NC:
9812 case R_ARM_ALU_SB_G0:
9813 case R_ARM_ALU_SB_G1:
9814 case R_ARM_ALU_SB_G2:
9816 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9817 bfd_vma pc = input_section->output_section->vma
9818 + input_section->output_offset + rel->r_offset;
9819 /* sb should be the origin of the *segment* containing the symbol.
9820 It is not clear how to obtain this OS-dependent value, so we
9821 make an arbitrary choice of zero. */
9825 bfd_signed_vma signed_value;
9828 /* Determine which group of bits to select. */
9831 case R_ARM_ALU_PC_G0_NC:
9832 case R_ARM_ALU_PC_G0:
9833 case R_ARM_ALU_SB_G0_NC:
9834 case R_ARM_ALU_SB_G0:
9838 case R_ARM_ALU_PC_G1_NC:
9839 case R_ARM_ALU_PC_G1:
9840 case R_ARM_ALU_SB_G1_NC:
9841 case R_ARM_ALU_SB_G1:
9845 case R_ARM_ALU_PC_G2:
9846 case R_ARM_ALU_SB_G2:
9854 /* If REL, extract the addend from the insn. If RELA, it will
9855 have already been fetched for us. */
9856 if (globals->use_rel)
9859 bfd_vma constant = insn & 0xff;
9860 bfd_vma rotation = (insn & 0xf00) >> 8;
9863 signed_addend = constant;
9866 /* Compensate for the fact that in the instruction, the
9867 rotation is stored in multiples of 2 bits. */
9870 /* Rotate "constant" right by "rotation" bits. */
9871 signed_addend = (constant >> rotation) |
9872 (constant << (8 * sizeof (bfd_vma) - rotation));
9875 /* Determine if the instruction is an ADD or a SUB.
9876 (For REL, this determines the sign of the addend.) */
9877 negative = identify_add_or_sub (insn);
9880 (*_bfd_error_handler)
9881 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
9882 input_bfd, input_section,
9883 (long) rel->r_offset, howto->name);
9884 return bfd_reloc_overflow;
9887 signed_addend *= negative;
9890 /* Compute the value (X) to go in the place. */
9891 if (r_type == R_ARM_ALU_PC_G0_NC
9892 || r_type == R_ARM_ALU_PC_G1_NC
9893 || r_type == R_ARM_ALU_PC_G0
9894 || r_type == R_ARM_ALU_PC_G1
9895 || r_type == R_ARM_ALU_PC_G2)
9897 signed_value = value - pc + signed_addend;
9899 /* Section base relative. */
9900 signed_value = value - sb + signed_addend;
9902 /* If the target symbol is a Thumb function, then set the
9903 Thumb bit in the address. */
9904 if (branch_type == ST_BRANCH_TO_THUMB)
9907 /* Calculate the value of the relevant G_n, in encoded
9908 constant-with-rotation format. */
9909 g_n = calculate_group_reloc_mask (abs (signed_value), group,
9912 /* Check for overflow if required. */
9913 if ((r_type == R_ARM_ALU_PC_G0
9914 || r_type == R_ARM_ALU_PC_G1
9915 || r_type == R_ARM_ALU_PC_G2
9916 || r_type == R_ARM_ALU_SB_G0
9917 || r_type == R_ARM_ALU_SB_G1
9918 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
9920 (*_bfd_error_handler)
9921 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
9922 input_bfd, input_section,
9923 (long) rel->r_offset, abs (signed_value), howto->name);
9924 return bfd_reloc_overflow;
9927 /* Mask out the value and the ADD/SUB part of the opcode; take care
9928 not to destroy the S bit. */
9931 /* Set the opcode according to whether the value to go in the
9932 place is negative. */
9933 if (signed_value < 0)
9938 /* Encode the offset. */
9941 bfd_put_32 (input_bfd, insn, hit_data);
9943 return bfd_reloc_ok;
9945 case R_ARM_LDR_PC_G0:
9946 case R_ARM_LDR_PC_G1:
9947 case R_ARM_LDR_PC_G2:
9948 case R_ARM_LDR_SB_G0:
9949 case R_ARM_LDR_SB_G1:
9950 case R_ARM_LDR_SB_G2:
9952 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
9953 bfd_vma pc = input_section->output_section->vma
9954 + input_section->output_offset + rel->r_offset;
9955 bfd_vma sb = 0; /* See note above. */
9957 bfd_signed_vma signed_value;
9960 /* Determine which groups of bits to calculate. */
9963 case R_ARM_LDR_PC_G0:
9964 case R_ARM_LDR_SB_G0:
9968 case R_ARM_LDR_PC_G1:
9969 case R_ARM_LDR_SB_G1:
9973 case R_ARM_LDR_PC_G2:
9974 case R_ARM_LDR_SB_G2:
9982 /* If REL, extract the addend from the insn. If RELA, it will
9983 have already been fetched for us. */
9984 if (globals->use_rel)
9986 int negative = (insn & (1 << 23)) ? 1 : -1;
9987 signed_addend = negative * (insn & 0xfff);
9990 /* Compute the value (X) to go in the place. */
9991 if (r_type == R_ARM_LDR_PC_G0
9992 || r_type == R_ARM_LDR_PC_G1
9993 || r_type == R_ARM_LDR_PC_G2)
9995 signed_value = value - pc + signed_addend;
9997 /* Section base relative. */
9998 signed_value = value - sb + signed_addend;
10000 /* Calculate the value of the relevant G_{n-1} to obtain
10001 the residual at that stage. */
10002 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10004 /* Check for overflow. */
10005 if (residual >= 0x1000)
10007 (*_bfd_error_handler)
10008 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10009 input_bfd, input_section,
10010 (long) rel->r_offset, abs (signed_value), howto->name);
10011 return bfd_reloc_overflow;
10014 /* Mask out the value and U bit. */
10015 insn &= 0xff7ff000;
10017 /* Set the U bit if the value to go in the place is non-negative. */
10018 if (signed_value >= 0)
10021 /* Encode the offset. */
10024 bfd_put_32 (input_bfd, insn, hit_data);
10026 return bfd_reloc_ok;
10028 case R_ARM_LDRS_PC_G0:
10029 case R_ARM_LDRS_PC_G1:
10030 case R_ARM_LDRS_PC_G2:
10031 case R_ARM_LDRS_SB_G0:
10032 case R_ARM_LDRS_SB_G1:
10033 case R_ARM_LDRS_SB_G2:
10035 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10036 bfd_vma pc = input_section->output_section->vma
10037 + input_section->output_offset + rel->r_offset;
10038 bfd_vma sb = 0; /* See note above. */
10040 bfd_signed_vma signed_value;
10043 /* Determine which groups of bits to calculate. */
10046 case R_ARM_LDRS_PC_G0:
10047 case R_ARM_LDRS_SB_G0:
10051 case R_ARM_LDRS_PC_G1:
10052 case R_ARM_LDRS_SB_G1:
10056 case R_ARM_LDRS_PC_G2:
10057 case R_ARM_LDRS_SB_G2:
10065 /* If REL, extract the addend from the insn. If RELA, it will
10066 have already been fetched for us. */
10067 if (globals->use_rel)
10069 int negative = (insn & (1 << 23)) ? 1 : -1;
10070 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
10073 /* Compute the value (X) to go in the place. */
10074 if (r_type == R_ARM_LDRS_PC_G0
10075 || r_type == R_ARM_LDRS_PC_G1
10076 || r_type == R_ARM_LDRS_PC_G2)
10078 signed_value = value - pc + signed_addend;
10080 /* Section base relative. */
10081 signed_value = value - sb + signed_addend;
10083 /* Calculate the value of the relevant G_{n-1} to obtain
10084 the residual at that stage. */
10085 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10087 /* Check for overflow. */
10088 if (residual >= 0x100)
10090 (*_bfd_error_handler)
10091 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10092 input_bfd, input_section,
10093 (long) rel->r_offset, abs (signed_value), howto->name);
10094 return bfd_reloc_overflow;
10097 /* Mask out the value and U bit. */
10098 insn &= 0xff7ff0f0;
10100 /* Set the U bit if the value to go in the place is non-negative. */
10101 if (signed_value >= 0)
10104 /* Encode the offset. */
10105 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
10107 bfd_put_32 (input_bfd, insn, hit_data);
10109 return bfd_reloc_ok;
10111 case R_ARM_LDC_PC_G0:
10112 case R_ARM_LDC_PC_G1:
10113 case R_ARM_LDC_PC_G2:
10114 case R_ARM_LDC_SB_G0:
10115 case R_ARM_LDC_SB_G1:
10116 case R_ARM_LDC_SB_G2:
10118 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10119 bfd_vma pc = input_section->output_section->vma
10120 + input_section->output_offset + rel->r_offset;
10121 bfd_vma sb = 0; /* See note above. */
10123 bfd_signed_vma signed_value;
10126 /* Determine which groups of bits to calculate. */
10129 case R_ARM_LDC_PC_G0:
10130 case R_ARM_LDC_SB_G0:
10134 case R_ARM_LDC_PC_G1:
10135 case R_ARM_LDC_SB_G1:
10139 case R_ARM_LDC_PC_G2:
10140 case R_ARM_LDC_SB_G2:
10148 /* If REL, extract the addend from the insn. If RELA, it will
10149 have already been fetched for us. */
10150 if (globals->use_rel)
10152 int negative = (insn & (1 << 23)) ? 1 : -1;
10153 signed_addend = negative * ((insn & 0xff) << 2);
10156 /* Compute the value (X) to go in the place. */
10157 if (r_type == R_ARM_LDC_PC_G0
10158 || r_type == R_ARM_LDC_PC_G1
10159 || r_type == R_ARM_LDC_PC_G2)
10161 signed_value = value - pc + signed_addend;
10163 /* Section base relative. */
10164 signed_value = value - sb + signed_addend;
10166 /* Calculate the value of the relevant G_{n-1} to obtain
10167 the residual at that stage. */
10168 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
10170 /* Check for overflow. (The absolute value to go in the place must be
10171 divisible by four and, after having been divided by four, must
10172 fit in eight bits.) */
10173 if ((residual & 0x3) != 0 || residual >= 0x400)
10175 (*_bfd_error_handler)
10176 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
10177 input_bfd, input_section,
10178 (long) rel->r_offset, abs (signed_value), howto->name);
10179 return bfd_reloc_overflow;
10182 /* Mask out the value and U bit. */
10183 insn &= 0xff7fff00;
10185 /* Set the U bit if the value to go in the place is non-negative. */
10186 if (signed_value >= 0)
10189 /* Encode the offset. */
10190 insn |= residual >> 2;
10192 bfd_put_32 (input_bfd, insn, hit_data);
10194 return bfd_reloc_ok;
10197 return bfd_reloc_notsupported;
10201 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
10203 arm_add_to_rel (bfd * abfd,
10204 bfd_byte * address,
10205 reloc_howto_type * howto,
10206 bfd_signed_vma increment)
10208 bfd_signed_vma addend;
10210 if (howto->type == R_ARM_THM_CALL
10211 || howto->type == R_ARM_THM_JUMP24)
10213 int upper_insn, lower_insn;
10216 upper_insn = bfd_get_16 (abfd, address);
10217 lower_insn = bfd_get_16 (abfd, address + 2);
10218 upper = upper_insn & 0x7ff;
10219 lower = lower_insn & 0x7ff;
10221 addend = (upper << 12) | (lower << 1);
10222 addend += increment;
10225 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
10226 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
10228 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
10229 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
10235 contents = bfd_get_32 (abfd, address);
10237 /* Get the (signed) value from the instruction. */
10238 addend = contents & howto->src_mask;
10239 if (addend & ((howto->src_mask + 1) >> 1))
10241 bfd_signed_vma mask;
10244 mask &= ~ howto->src_mask;
10248 /* Add in the increment, (which is a byte value). */
10249 switch (howto->type)
10252 addend += increment;
10259 addend <<= howto->size;
10260 addend += increment;
10262 /* Should we check for overflow here ? */
10264 /* Drop any undesired bits. */
10265 addend >>= howto->rightshift;
10269 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
10271 bfd_put_32 (abfd, contents, address);
10275 #define IS_ARM_TLS_RELOC(R_TYPE) \
10276 ((R_TYPE) == R_ARM_TLS_GD32 \
10277 || (R_TYPE) == R_ARM_TLS_LDO32 \
10278 || (R_TYPE) == R_ARM_TLS_LDM32 \
10279 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
10280 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
10281 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
10282 || (R_TYPE) == R_ARM_TLS_LE32 \
10283 || (R_TYPE) == R_ARM_TLS_IE32 \
10284 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
10286 /* Specific set of relocations for the gnu tls dialect. */
10287 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
10288 ((R_TYPE) == R_ARM_TLS_GOTDESC \
10289 || (R_TYPE) == R_ARM_TLS_CALL \
10290 || (R_TYPE) == R_ARM_THM_TLS_CALL \
10291 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
10292 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
10294 /* Relocate an ARM ELF section. */
10297 elf32_arm_relocate_section (bfd * output_bfd,
10298 struct bfd_link_info * info,
10300 asection * input_section,
10301 bfd_byte * contents,
10302 Elf_Internal_Rela * relocs,
10303 Elf_Internal_Sym * local_syms,
10304 asection ** local_sections)
10306 Elf_Internal_Shdr *symtab_hdr;
10307 struct elf_link_hash_entry **sym_hashes;
10308 Elf_Internal_Rela *rel;
10309 Elf_Internal_Rela *relend;
10311 struct elf32_arm_link_hash_table * globals;
10313 globals = elf32_arm_hash_table (info);
10314 if (globals == NULL)
10317 symtab_hdr = & elf_symtab_hdr (input_bfd);
10318 sym_hashes = elf_sym_hashes (input_bfd);
10321 relend = relocs + input_section->reloc_count;
10322 for (; rel < relend; rel++)
10325 reloc_howto_type * howto;
10326 unsigned long r_symndx;
10327 Elf_Internal_Sym * sym;
10329 struct elf_link_hash_entry * h;
10330 bfd_vma relocation;
10331 bfd_reloc_status_type r;
10334 bfd_boolean unresolved_reloc = FALSE;
10335 char *error_message = NULL;
10337 r_symndx = ELF32_R_SYM (rel->r_info);
10338 r_type = ELF32_R_TYPE (rel->r_info);
10339 r_type = arm_real_reloc_type (globals, r_type);
10341 if ( r_type == R_ARM_GNU_VTENTRY
10342 || r_type == R_ARM_GNU_VTINHERIT)
10345 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
10346 howto = bfd_reloc.howto;
10352 if (r_symndx < symtab_hdr->sh_info)
10354 sym = local_syms + r_symndx;
10355 sym_type = ELF32_ST_TYPE (sym->st_info);
10356 sec = local_sections[r_symndx];
10358 /* An object file might have a reference to a local
10359 undefined symbol. This is a daft object file, but we
10360 should at least do something about it. V4BX & NONE
10361 relocations do not use the symbol and are explicitly
10362 allowed to use the undefined symbol, so allow those.
10363 Likewise for relocations against STN_UNDEF. */
10364 if (r_type != R_ARM_V4BX
10365 && r_type != R_ARM_NONE
10366 && r_symndx != STN_UNDEF
10367 && bfd_is_und_section (sec)
10368 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
10370 if (!info->callbacks->undefined_symbol
10371 (info, bfd_elf_string_from_elf_section
10372 (input_bfd, symtab_hdr->sh_link, sym->st_name),
10373 input_bfd, input_section,
10374 rel->r_offset, TRUE))
10378 if (globals->use_rel)
10380 relocation = (sec->output_section->vma
10381 + sec->output_offset
10383 if (!info->relocatable
10384 && (sec->flags & SEC_MERGE)
10385 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10388 bfd_vma addend, value;
10392 case R_ARM_MOVW_ABS_NC:
10393 case R_ARM_MOVT_ABS:
10394 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10395 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
10396 addend = (addend ^ 0x8000) - 0x8000;
10399 case R_ARM_THM_MOVW_ABS_NC:
10400 case R_ARM_THM_MOVT_ABS:
10401 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
10403 value |= bfd_get_16 (input_bfd,
10404 contents + rel->r_offset + 2);
10405 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
10406 | ((value & 0x04000000) >> 15);
10407 addend = (addend ^ 0x8000) - 0x8000;
10411 if (howto->rightshift
10412 || (howto->src_mask & (howto->src_mask + 1)))
10414 (*_bfd_error_handler)
10415 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
10416 input_bfd, input_section,
10417 (long) rel->r_offset, howto->name);
10421 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
10423 /* Get the (signed) value from the instruction. */
10424 addend = value & howto->src_mask;
10425 if (addend & ((howto->src_mask + 1) >> 1))
10427 bfd_signed_vma mask;
10430 mask &= ~ howto->src_mask;
10438 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
10440 addend += msec->output_section->vma + msec->output_offset;
10442 /* Cases here must match those in the preceding
10443 switch statement. */
10446 case R_ARM_MOVW_ABS_NC:
10447 case R_ARM_MOVT_ABS:
10448 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
10449 | (addend & 0xfff);
10450 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10453 case R_ARM_THM_MOVW_ABS_NC:
10454 case R_ARM_THM_MOVT_ABS:
10455 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
10456 | (addend & 0xff) | ((addend & 0x0800) << 15);
10457 bfd_put_16 (input_bfd, value >> 16,
10458 contents + rel->r_offset);
10459 bfd_put_16 (input_bfd, value,
10460 contents + rel->r_offset + 2);
10464 value = (value & ~ howto->dst_mask)
10465 | (addend & howto->dst_mask);
10466 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
10472 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
10476 bfd_boolean warned;
10478 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
10479 r_symndx, symtab_hdr, sym_hashes,
10480 h, sec, relocation,
10481 unresolved_reloc, warned);
10483 sym_type = h->type;
10486 if (sec != NULL && discarded_section (sec))
10487 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
10488 rel, 1, relend, howto, 0, contents);
10490 if (info->relocatable)
10492 /* This is a relocatable link. We don't have to change
10493 anything, unless the reloc is against a section symbol,
10494 in which case we have to adjust according to where the
10495 section symbol winds up in the output section. */
10496 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
10498 if (globals->use_rel)
10499 arm_add_to_rel (input_bfd, contents + rel->r_offset,
10500 howto, (bfd_signed_vma) sec->output_offset);
10502 rel->r_addend += sec->output_offset;
10508 name = h->root.root.string;
10511 name = (bfd_elf_string_from_elf_section
10512 (input_bfd, symtab_hdr->sh_link, sym->st_name));
10513 if (name == NULL || *name == '\0')
10514 name = bfd_section_name (input_bfd, sec);
10517 if (r_symndx != STN_UNDEF
10518 && r_type != R_ARM_NONE
10520 || h->root.type == bfd_link_hash_defined
10521 || h->root.type == bfd_link_hash_defweak)
10522 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
10524 (*_bfd_error_handler)
10525 ((sym_type == STT_TLS
10526 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
10527 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
10530 (long) rel->r_offset,
10535 /* We call elf32_arm_final_link_relocate unless we're completely
10536 done, i.e., the relaxation produced the final output we want,
10537 and we won't let anybody mess with it. Also, we have to do
10538 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
10539 both in relaxed and non-relaxed cases */
10540 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
10541 || (IS_ARM_TLS_GNU_RELOC (r_type)
10542 && !((h ? elf32_arm_hash_entry (h)->tls_type :
10543 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
10546 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
10547 contents, rel, h == NULL);
10548 /* This may have been marked unresolved because it came from
10549 a shared library. But we've just dealt with that. */
10550 unresolved_reloc = 0;
10553 r = bfd_reloc_continue;
10555 if (r == bfd_reloc_continue)
10556 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
10557 input_section, contents, rel,
10558 relocation, info, sec, name, sym_type,
10559 (h ? h->target_internal
10560 : ARM_SYM_BRANCH_TYPE (sym)), h,
10561 &unresolved_reloc, &error_message);
10563 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
10564 because such sections are not SEC_ALLOC and thus ld.so will
10565 not process them. */
10566 if (unresolved_reloc
10567 && !((input_section->flags & SEC_DEBUGGING) != 0
10569 && _bfd_elf_section_offset (output_bfd, info, input_section,
10570 rel->r_offset) != (bfd_vma) -1)
10572 (*_bfd_error_handler)
10573 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
10576 (long) rel->r_offset,
10578 h->root.root.string);
10582 if (r != bfd_reloc_ok)
10586 case bfd_reloc_overflow:
10587 /* If the overflowing reloc was to an undefined symbol,
10588 we have already printed one error message and there
10589 is no point complaining again. */
10591 h->root.type != bfd_link_hash_undefined)
10592 && (!((*info->callbacks->reloc_overflow)
10593 (info, (h ? &h->root : NULL), name, howto->name,
10594 (bfd_vma) 0, input_bfd, input_section,
10599 case bfd_reloc_undefined:
10600 if (!((*info->callbacks->undefined_symbol)
10601 (info, name, input_bfd, input_section,
10602 rel->r_offset, TRUE)))
10606 case bfd_reloc_outofrange:
10607 error_message = _("out of range");
10610 case bfd_reloc_notsupported:
10611 error_message = _("unsupported relocation");
10614 case bfd_reloc_dangerous:
10615 /* error_message should already be set. */
10619 error_message = _("unknown error");
10620 /* Fall through. */
10623 BFD_ASSERT (error_message != NULL);
10624 if (!((*info->callbacks->reloc_dangerous)
10625 (info, error_message, input_bfd, input_section,
10636 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
10637 adds the edit to the start of the list. (The list must be built in order of
10638 ascending TINDEX: the function's callers are primarily responsible for
10639 maintaining that condition). */
10642 add_unwind_table_edit (arm_unwind_table_edit **head,
10643 arm_unwind_table_edit **tail,
10644 arm_unwind_edit_type type,
10645 asection *linked_section,
10646 unsigned int tindex)
10648 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
10649 xmalloc (sizeof (arm_unwind_table_edit));
10651 new_edit->type = type;
10652 new_edit->linked_section = linked_section;
10653 new_edit->index = tindex;
10657 new_edit->next = NULL;
10660 (*tail)->next = new_edit;
10662 (*tail) = new_edit;
10665 (*head) = new_edit;
10669 new_edit->next = *head;
10678 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
10680 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
10682 adjust_exidx_size(asection *exidx_sec, int adjust)
10686 if (!exidx_sec->rawsize)
10687 exidx_sec->rawsize = exidx_sec->size;
10689 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
10690 out_sec = exidx_sec->output_section;
10691 /* Adjust size of output section. */
10692 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
10695 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
10697 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
10699 struct _arm_elf_section_data *exidx_arm_data;
10701 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10702 add_unwind_table_edit (
10703 &exidx_arm_data->u.exidx.unwind_edit_list,
10704 &exidx_arm_data->u.exidx.unwind_edit_tail,
10705 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
10707 adjust_exidx_size(exidx_sec, 8);
10710 /* Scan .ARM.exidx tables, and create a list describing edits which should be
10711 made to those tables, such that:
10713 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
10714 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
10715 codes which have been inlined into the index).
10717 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
10719 The edits are applied when the tables are written
10720 (in elf32_arm_write_section). */
10723 elf32_arm_fix_exidx_coverage (asection **text_section_order,
10724 unsigned int num_text_sections,
10725 struct bfd_link_info *info,
10726 bfd_boolean merge_exidx_entries)
10729 unsigned int last_second_word = 0, i;
10730 asection *last_exidx_sec = NULL;
10731 asection *last_text_sec = NULL;
10732 int last_unwind_type = -1;
10734 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
10736 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
10740 for (sec = inp->sections; sec != NULL; sec = sec->next)
10742 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
10743 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
10745 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
10748 if (elf_sec->linked_to)
10750 Elf_Internal_Shdr *linked_hdr
10751 = &elf_section_data (elf_sec->linked_to)->this_hdr;
10752 struct _arm_elf_section_data *linked_sec_arm_data
10753 = get_arm_elf_section_data (linked_hdr->bfd_section);
10755 if (linked_sec_arm_data == NULL)
10758 /* Link this .ARM.exidx section back from the text section it
10760 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
10765 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
10766 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
10767 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
10769 for (i = 0; i < num_text_sections; i++)
10771 asection *sec = text_section_order[i];
10772 asection *exidx_sec;
10773 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
10774 struct _arm_elf_section_data *exidx_arm_data;
10775 bfd_byte *contents = NULL;
10776 int deleted_exidx_bytes = 0;
10778 arm_unwind_table_edit *unwind_edit_head = NULL;
10779 arm_unwind_table_edit *unwind_edit_tail = NULL;
10780 Elf_Internal_Shdr *hdr;
10783 if (arm_data == NULL)
10786 exidx_sec = arm_data->u.text.arm_exidx_sec;
10787 if (exidx_sec == NULL)
10789 /* Section has no unwind data. */
10790 if (last_unwind_type == 0 || !last_exidx_sec)
10793 /* Ignore zero sized sections. */
10794 if (sec->size == 0)
10797 insert_cantunwind_after(last_text_sec, last_exidx_sec);
10798 last_unwind_type = 0;
10802 /* Skip /DISCARD/ sections. */
10803 if (bfd_is_abs_section (exidx_sec->output_section))
10806 hdr = &elf_section_data (exidx_sec)->this_hdr;
10807 if (hdr->sh_type != SHT_ARM_EXIDX)
10810 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
10811 if (exidx_arm_data == NULL)
10814 ibfd = exidx_sec->owner;
10816 if (hdr->contents != NULL)
10817 contents = hdr->contents;
10818 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
10822 for (j = 0; j < hdr->sh_size; j += 8)
10824 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
10828 /* An EXIDX_CANTUNWIND entry. */
10829 if (second_word == 1)
10831 if (last_unwind_type == 0)
10835 /* Inlined unwinding data. Merge if equal to previous. */
10836 else if ((second_word & 0x80000000) != 0)
10838 if (merge_exidx_entries
10839 && last_second_word == second_word && last_unwind_type == 1)
10842 last_second_word = second_word;
10844 /* Normal table entry. In theory we could merge these too,
10845 but duplicate entries are likely to be much less common. */
10851 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
10852 DELETE_EXIDX_ENTRY, NULL, j / 8);
10854 deleted_exidx_bytes += 8;
10857 last_unwind_type = unwind_type;
10860 /* Free contents if we allocated it ourselves. */
10861 if (contents != hdr->contents)
10864 /* Record edits to be applied later (in elf32_arm_write_section). */
10865 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
10866 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
10868 if (deleted_exidx_bytes > 0)
10869 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
10871 last_exidx_sec = exidx_sec;
10872 last_text_sec = sec;
10875 /* Add terminating CANTUNWIND entry. */
10876 if (last_exidx_sec && last_unwind_type != 0)
10877 insert_cantunwind_after(last_text_sec, last_exidx_sec);
10883 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
10884 bfd *ibfd, const char *name)
10886 asection *sec, *osec;
10888 sec = bfd_get_section_by_name (ibfd, name);
10889 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
10892 osec = sec->output_section;
10893 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
10896 if (! bfd_set_section_contents (obfd, osec, sec->contents,
10897 sec->output_offset, sec->size))
10904 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
10906 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
10907 asection *sec, *osec;
10909 if (globals == NULL)
10912 /* Invoke the regular ELF backend linker to do all the work. */
10913 if (!bfd_elf_final_link (abfd, info))
10916 /* Process stub sections (eg BE8 encoding, ...). */
10917 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
10919 for (i=0; i<htab->top_id; i++)
10921 sec = htab->stub_group[i].stub_sec;
10922 /* Only process it once, in its link_sec slot. */
10923 if (sec && i == htab->stub_group[i].link_sec->id)
10925 osec = sec->output_section;
10926 elf32_arm_write_section (abfd, info, sec, sec->contents);
10927 if (! bfd_set_section_contents (abfd, osec, sec->contents,
10928 sec->output_offset, sec->size))
10933 /* Write out any glue sections now that we have created all the
10935 if (globals->bfd_of_glue_owner != NULL)
10937 if (! elf32_arm_output_glue_section (info, abfd,
10938 globals->bfd_of_glue_owner,
10939 ARM2THUMB_GLUE_SECTION_NAME))
10942 if (! elf32_arm_output_glue_section (info, abfd,
10943 globals->bfd_of_glue_owner,
10944 THUMB2ARM_GLUE_SECTION_NAME))
10947 if (! elf32_arm_output_glue_section (info, abfd,
10948 globals->bfd_of_glue_owner,
10949 VFP11_ERRATUM_VENEER_SECTION_NAME))
10952 if (! elf32_arm_output_glue_section (info, abfd,
10953 globals->bfd_of_glue_owner,
10954 ARM_BX_GLUE_SECTION_NAME))
10961 /* Return a best guess for the machine number based on the attributes. */
10963 static unsigned int
10964 bfd_arm_get_mach_from_attributes (bfd * abfd)
10966 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
10970 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
10971 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
10972 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
10974 case TAG_CPU_ARCH_V5TE:
10978 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
10979 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
10983 if (strcmp (name, "IWMMXT2") == 0)
10984 return bfd_mach_arm_iWMMXt2;
10986 if (strcmp (name, "IWMMXT") == 0)
10987 return bfd_mach_arm_iWMMXt;
10990 return bfd_mach_arm_5TE;
10994 return bfd_mach_arm_unknown;
10998 /* Set the right machine number. */
11001 elf32_arm_object_p (bfd *abfd)
11005 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
11007 if (mach == bfd_mach_arm_unknown)
11009 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
11010 mach = bfd_mach_arm_ep9312;
11012 mach = bfd_arm_get_mach_from_attributes (abfd);
11015 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
11019 /* Function to keep ARM specific flags in the ELF header. */
11022 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
11024 if (elf_flags_init (abfd)
11025 && elf_elfheader (abfd)->e_flags != flags)
11027 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
11029 if (flags & EF_ARM_INTERWORK)
11030 (*_bfd_error_handler)
11031 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
11035 (_("Warning: Clearing the interworking flag of %B due to outside request"),
11041 elf_elfheader (abfd)->e_flags = flags;
11042 elf_flags_init (abfd) = TRUE;
11048 /* Copy backend specific data from one object module to another. */
11051 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
11054 flagword out_flags;
11056 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
11059 in_flags = elf_elfheader (ibfd)->e_flags;
11060 out_flags = elf_elfheader (obfd)->e_flags;
11062 if (elf_flags_init (obfd)
11063 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
11064 && in_flags != out_flags)
11066 /* Cannot mix APCS26 and APCS32 code. */
11067 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
11070 /* Cannot mix float APCS and non-float APCS code. */
11071 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
11074 /* If the src and dest have different interworking flags
11075 then turn off the interworking bit. */
11076 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
11078 if (out_flags & EF_ARM_INTERWORK)
11080 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
11083 in_flags &= ~EF_ARM_INTERWORK;
11086 /* Likewise for PIC, though don't warn for this case. */
11087 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
11088 in_flags &= ~EF_ARM_PIC;
11091 elf_elfheader (obfd)->e_flags = in_flags;
11092 elf_flags_init (obfd) = TRUE;
11094 /* Also copy the EI_OSABI field. */
11095 elf_elfheader (obfd)->e_ident[EI_OSABI] =
11096 elf_elfheader (ibfd)->e_ident[EI_OSABI];
11098 /* Copy object attributes. */
11099 _bfd_elf_copy_obj_attributes (ibfd, obfd);
11104 /* Values for Tag_ABI_PCS_R9_use. */
11113 /* Values for Tag_ABI_PCS_RW_data. */
11116 AEABI_PCS_RW_data_absolute,
11117 AEABI_PCS_RW_data_PCrel,
11118 AEABI_PCS_RW_data_SBrel,
11119 AEABI_PCS_RW_data_unused
11122 /* Values for Tag_ABI_enum_size. */
11128 AEABI_enum_forced_wide
11131 /* Determine whether an object attribute tag takes an integer, a
11135 elf32_arm_obj_attrs_arg_type (int tag)
11137 if (tag == Tag_compatibility)
11138 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
11139 else if (tag == Tag_nodefaults)
11140 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
11141 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
11142 return ATTR_TYPE_FLAG_STR_VAL;
11144 return ATTR_TYPE_FLAG_INT_VAL;
11146 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
11149 /* The ABI defines that Tag_conformance should be emitted first, and that
11150 Tag_nodefaults should be second (if either is defined). This sets those
11151 two positions, and bumps up the position of all the remaining tags to
11154 elf32_arm_obj_attrs_order (int num)
11156 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
11157 return Tag_conformance;
11158 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
11159 return Tag_nodefaults;
11160 if ((num - 2) < Tag_nodefaults)
11162 if ((num - 1) < Tag_conformance)
11167 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
11169 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
11171 if ((tag & 127) < 64)
11174 (_("%B: Unknown mandatory EABI object attribute %d"),
11176 bfd_set_error (bfd_error_bad_value);
11182 (_("Warning: %B: Unknown EABI object attribute %d"),
11188 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
11189 Returns -1 if no architecture could be read. */
11192 get_secondary_compatible_arch (bfd *abfd)
11194 obj_attribute *attr =
11195 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
11197 /* Note: the tag and its argument below are uleb128 values, though
11198 currently-defined values fit in one byte for each. */
11200 && attr->s[0] == Tag_CPU_arch
11201 && (attr->s[1] & 128) != 128
11202 && attr->s[2] == 0)
11205 /* This tag is "safely ignorable", so don't complain if it looks funny. */
11209 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
11210 The tag is removed if ARCH is -1. */
11213 set_secondary_compatible_arch (bfd *abfd, int arch)
11215 obj_attribute *attr =
11216 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
11224 /* Note: the tag and its argument below are uleb128 values, though
11225 currently-defined values fit in one byte for each. */
11227 attr->s = (char *) bfd_alloc (abfd, 3);
11228 attr->s[0] = Tag_CPU_arch;
11233 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
11237 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
11238 int newtag, int secondary_compat)
11240 #define T(X) TAG_CPU_ARCH_##X
11241 int tagl, tagh, result;
11244 T(V6T2), /* PRE_V4. */
11246 T(V6T2), /* V4T. */
11247 T(V6T2), /* V5T. */
11248 T(V6T2), /* V5TE. */
11249 T(V6T2), /* V5TEJ. */
11252 T(V6T2) /* V6T2. */
11256 T(V6K), /* PRE_V4. */
11260 T(V6K), /* V5TE. */
11261 T(V6K), /* V5TEJ. */
11263 T(V6KZ), /* V6KZ. */
11269 T(V7), /* PRE_V4. */
11274 T(V7), /* V5TEJ. */
11287 T(V6K), /* V5TE. */
11288 T(V6K), /* V5TEJ. */
11290 T(V6KZ), /* V6KZ. */
11294 T(V6_M) /* V6_M. */
11296 const int v6s_m[] =
11302 T(V6K), /* V5TE. */
11303 T(V6K), /* V5TEJ. */
11305 T(V6KZ), /* V6KZ. */
11309 T(V6S_M), /* V6_M. */
11310 T(V6S_M) /* V6S_M. */
11312 const int v7e_m[] =
11316 T(V7E_M), /* V4T. */
11317 T(V7E_M), /* V5T. */
11318 T(V7E_M), /* V5TE. */
11319 T(V7E_M), /* V5TEJ. */
11320 T(V7E_M), /* V6. */
11321 T(V7E_M), /* V6KZ. */
11322 T(V7E_M), /* V6T2. */
11323 T(V7E_M), /* V6K. */
11324 T(V7E_M), /* V7. */
11325 T(V7E_M), /* V6_M. */
11326 T(V7E_M), /* V6S_M. */
11327 T(V7E_M) /* V7E_M. */
11329 const int v4t_plus_v6_m[] =
11335 T(V5TE), /* V5TE. */
11336 T(V5TEJ), /* V5TEJ. */
11338 T(V6KZ), /* V6KZ. */
11339 T(V6T2), /* V6T2. */
11342 T(V6_M), /* V6_M. */
11343 T(V6S_M), /* V6S_M. */
11344 T(V7E_M), /* V7E_M. */
11345 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
11347 const int *comb[] =
11355 /* Pseudo-architecture. */
11359 /* Check we've not got a higher architecture than we know about. */
11361 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
11363 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
11367 /* Override old tag if we have a Tag_also_compatible_with on the output. */
11369 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
11370 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
11371 oldtag = T(V4T_PLUS_V6_M);
11373 /* And override the new tag if we have a Tag_also_compatible_with on the
11376 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
11377 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
11378 newtag = T(V4T_PLUS_V6_M);
11380 tagl = (oldtag < newtag) ? oldtag : newtag;
11381 result = tagh = (oldtag > newtag) ? oldtag : newtag;
11383 /* Architectures before V6KZ add features monotonically. */
11384 if (tagh <= TAG_CPU_ARCH_V6KZ)
11387 result = comb[tagh - T(V6T2)][tagl];
11389 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
11390 as the canonical version. */
11391 if (result == T(V4T_PLUS_V6_M))
11394 *secondary_compat_out = T(V6_M);
11397 *secondary_compat_out = -1;
11401 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
11402 ibfd, oldtag, newtag);
11410 /* Query attributes object to see if integer divide instructions may be
11411 present in an object. */
11413 elf32_arm_attributes_accept_div (const obj_attribute *attr)
11415 int arch = attr[Tag_CPU_arch].i;
11416 int profile = attr[Tag_CPU_arch_profile].i;
11418 switch (attr[Tag_DIV_use].i)
11421 /* Integer divide allowed if instruction contained in archetecture. */
11422 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
11424 else if (arch >= TAG_CPU_ARCH_V7E_M)
11430 /* Integer divide explicitly prohibited. */
11434 /* Unrecognised case - treat as allowing divide everywhere. */
11436 /* Integer divide allowed in ARM state. */
11441 /* Query attributes object to see if integer divide instructions are
11442 forbidden to be in the object. This is not the inverse of
11443 elf32_arm_attributes_accept_div. */
11445 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
11447 return attr[Tag_DIV_use].i == 1;
11450 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
11451 are conflicting attributes. */
11454 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
11456 obj_attribute *in_attr;
11457 obj_attribute *out_attr;
11458 /* Some tags have 0 = don't care, 1 = strong requirement,
11459 2 = weak requirement. */
11460 static const int order_021[3] = {0, 2, 1};
11462 bfd_boolean result = TRUE;
11464 /* Skip the linker stubs file. This preserves previous behavior
11465 of accepting unknown attributes in the first input file - but
11467 if (ibfd->flags & BFD_LINKER_CREATED)
11470 if (!elf_known_obj_attributes_proc (obfd)[0].i)
11472 /* This is the first object. Copy the attributes. */
11473 _bfd_elf_copy_obj_attributes (ibfd, obfd);
11475 out_attr = elf_known_obj_attributes_proc (obfd);
11477 /* Use the Tag_null value to indicate the attributes have been
11481 /* We do not output objects with Tag_MPextension_use_legacy - we move
11482 the attribute's value to Tag_MPextension_use. */
11483 if (out_attr[Tag_MPextension_use_legacy].i != 0)
11485 if (out_attr[Tag_MPextension_use].i != 0
11486 && out_attr[Tag_MPextension_use_legacy].i
11487 != out_attr[Tag_MPextension_use].i)
11490 (_("Error: %B has both the current and legacy "
11491 "Tag_MPextension_use attributes"), ibfd);
11495 out_attr[Tag_MPextension_use] =
11496 out_attr[Tag_MPextension_use_legacy];
11497 out_attr[Tag_MPextension_use_legacy].type = 0;
11498 out_attr[Tag_MPextension_use_legacy].i = 0;
11504 in_attr = elf_known_obj_attributes_proc (ibfd);
11505 out_attr = elf_known_obj_attributes_proc (obfd);
11506 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
11507 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
11509 /* Ignore mismatches if the object doesn't use floating point. */
11510 if (out_attr[Tag_ABI_FP_number_model].i == 0)
11511 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
11512 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
11515 (_("error: %B uses VFP register arguments, %B does not"),
11516 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
11517 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
11522 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
11524 /* Merge this attribute with existing attributes. */
11527 case Tag_CPU_raw_name:
11529 /* These are merged after Tag_CPU_arch. */
11532 case Tag_ABI_optimization_goals:
11533 case Tag_ABI_FP_optimization_goals:
11534 /* Use the first value seen. */
11539 int secondary_compat = -1, secondary_compat_out = -1;
11540 unsigned int saved_out_attr = out_attr[i].i;
11541 static const char *name_table[] = {
11542 /* These aren't real CPU names, but we can't guess
11543 that from the architecture version alone. */
11559 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
11560 secondary_compat = get_secondary_compatible_arch (ibfd);
11561 secondary_compat_out = get_secondary_compatible_arch (obfd);
11562 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
11563 &secondary_compat_out,
11566 set_secondary_compatible_arch (obfd, secondary_compat_out);
11568 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
11569 if (out_attr[i].i == saved_out_attr)
11570 ; /* Leave the names alone. */
11571 else if (out_attr[i].i == in_attr[i].i)
11573 /* The output architecture has been changed to match the
11574 input architecture. Use the input names. */
11575 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
11576 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
11578 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
11579 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
11584 out_attr[Tag_CPU_name].s = NULL;
11585 out_attr[Tag_CPU_raw_name].s = NULL;
11588 /* If we still don't have a value for Tag_CPU_name,
11589 make one up now. Tag_CPU_raw_name remains blank. */
11590 if (out_attr[Tag_CPU_name].s == NULL
11591 && out_attr[i].i < ARRAY_SIZE (name_table))
11592 out_attr[Tag_CPU_name].s =
11593 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
11597 case Tag_ARM_ISA_use:
11598 case Tag_THUMB_ISA_use:
11599 case Tag_WMMX_arch:
11600 case Tag_Advanced_SIMD_arch:
11601 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
11602 case Tag_ABI_FP_rounding:
11603 case Tag_ABI_FP_exceptions:
11604 case Tag_ABI_FP_user_exceptions:
11605 case Tag_ABI_FP_number_model:
11606 case Tag_FP_HP_extension:
11607 case Tag_CPU_unaligned_access:
11609 case Tag_MPextension_use:
11610 /* Use the largest value specified. */
11611 if (in_attr[i].i > out_attr[i].i)
11612 out_attr[i].i = in_attr[i].i;
11615 case Tag_ABI_align_preserved:
11616 case Tag_ABI_PCS_RO_data:
11617 /* Use the smallest value specified. */
11618 if (in_attr[i].i < out_attr[i].i)
11619 out_attr[i].i = in_attr[i].i;
11622 case Tag_ABI_align_needed:
11623 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
11624 && (in_attr[Tag_ABI_align_preserved].i == 0
11625 || out_attr[Tag_ABI_align_preserved].i == 0))
11627 /* This error message should be enabled once all non-conformant
11628 binaries in the toolchain have had the attributes set
11631 (_("error: %B: 8-byte data alignment conflicts with %B"),
11635 /* Fall through. */
11636 case Tag_ABI_FP_denormal:
11637 case Tag_ABI_PCS_GOT_use:
11638 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
11639 value if greater than 2 (for future-proofing). */
11640 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
11641 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
11642 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
11643 out_attr[i].i = in_attr[i].i;
11646 case Tag_Virtualization_use:
11647 /* The virtualization tag effectively stores two bits of
11648 information: the intended use of TrustZone (in bit 0), and the
11649 intended use of Virtualization (in bit 1). */
11650 if (out_attr[i].i == 0)
11651 out_attr[i].i = in_attr[i].i;
11652 else if (in_attr[i].i != 0
11653 && in_attr[i].i != out_attr[i].i)
11655 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
11660 (_("error: %B: unable to merge virtualization attributes "
11668 case Tag_CPU_arch_profile:
11669 if (out_attr[i].i != in_attr[i].i)
11671 /* 0 will merge with anything.
11672 'A' and 'S' merge to 'A'.
11673 'R' and 'S' merge to 'R'.
11674 'M' and 'A|R|S' is an error. */
11675 if (out_attr[i].i == 0
11676 || (out_attr[i].i == 'S'
11677 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
11678 out_attr[i].i = in_attr[i].i;
11679 else if (in_attr[i].i == 0
11680 || (in_attr[i].i == 'S'
11681 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
11682 ; /* Do nothing. */
11686 (_("error: %B: Conflicting architecture profiles %c/%c"),
11688 in_attr[i].i ? in_attr[i].i : '0',
11689 out_attr[i].i ? out_attr[i].i : '0');
11696 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
11697 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
11698 when it's 0. It might mean absence of FP hardware if
11699 Tag_FP_arch is zero, otherwise it is effectively SP + DP. */
11701 static const struct
11705 } vfp_versions[7] =
11719 /* If the output has no requirement about FP hardware,
11720 follow the requirement of the input. */
11721 if (out_attr[i].i == 0)
11723 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
11724 out_attr[i].i = in_attr[i].i;
11725 out_attr[Tag_ABI_HardFP_use].i
11726 = in_attr[Tag_ABI_HardFP_use].i;
11729 /* If the input has no requirement about FP hardware, do
11731 else if (in_attr[i].i == 0)
11733 BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
11737 /* Both the input and the output have nonzero Tag_FP_arch.
11738 So Tag_ABI_HardFP_use is (SP & DP) when it's zero. */
11740 /* If both the input and the output have zero Tag_ABI_HardFP_use,
11742 if (in_attr[Tag_ABI_HardFP_use].i == 0
11743 && out_attr[Tag_ABI_HardFP_use].i == 0)
11745 /* If the input and the output have different Tag_ABI_HardFP_use,
11746 the combination of them is 3 (SP & DP). */
11747 else if (in_attr[Tag_ABI_HardFP_use].i
11748 != out_attr[Tag_ABI_HardFP_use].i)
11749 out_attr[Tag_ABI_HardFP_use].i = 3;
11751 /* Now we can handle Tag_FP_arch. */
11753 /* Values greater than 6 aren't defined, so just pick the
11755 if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
11757 out_attr[i] = in_attr[i];
11760 /* The output uses the superset of input features
11761 (ISA version) and registers. */
11762 ver = vfp_versions[in_attr[i].i].ver;
11763 if (ver < vfp_versions[out_attr[i].i].ver)
11764 ver = vfp_versions[out_attr[i].i].ver;
11765 regs = vfp_versions[in_attr[i].i].regs;
11766 if (regs < vfp_versions[out_attr[i].i].regs)
11767 regs = vfp_versions[out_attr[i].i].regs;
11768 /* This assumes all possible supersets are also a valid
11770 for (newval = 6; newval > 0; newval--)
11772 if (regs == vfp_versions[newval].regs
11773 && ver == vfp_versions[newval].ver)
11776 out_attr[i].i = newval;
11779 case Tag_PCS_config:
11780 if (out_attr[i].i == 0)
11781 out_attr[i].i = in_attr[i].i;
11782 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
11784 /* It's sometimes ok to mix different configs, so this is only
11787 (_("Warning: %B: Conflicting platform configuration"), ibfd);
11790 case Tag_ABI_PCS_R9_use:
11791 if (in_attr[i].i != out_attr[i].i
11792 && out_attr[i].i != AEABI_R9_unused
11793 && in_attr[i].i != AEABI_R9_unused)
11796 (_("error: %B: Conflicting use of R9"), ibfd);
11799 if (out_attr[i].i == AEABI_R9_unused)
11800 out_attr[i].i = in_attr[i].i;
11802 case Tag_ABI_PCS_RW_data:
11803 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
11804 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
11805 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
11808 (_("error: %B: SB relative addressing conflicts with use of R9"),
11812 /* Use the smallest value specified. */
11813 if (in_attr[i].i < out_attr[i].i)
11814 out_attr[i].i = in_attr[i].i;
11816 case Tag_ABI_PCS_wchar_t:
11817 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
11818 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
11821 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
11822 ibfd, in_attr[i].i, out_attr[i].i);
11824 else if (in_attr[i].i && !out_attr[i].i)
11825 out_attr[i].i = in_attr[i].i;
11827 case Tag_ABI_enum_size:
11828 if (in_attr[i].i != AEABI_enum_unused)
11830 if (out_attr[i].i == AEABI_enum_unused
11831 || out_attr[i].i == AEABI_enum_forced_wide)
11833 /* The existing object is compatible with anything.
11834 Use whatever requirements the new object has. */
11835 out_attr[i].i = in_attr[i].i;
11837 else if (in_attr[i].i != AEABI_enum_forced_wide
11838 && out_attr[i].i != in_attr[i].i
11839 && !elf_arm_tdata (obfd)->no_enum_size_warning)
11841 static const char *aeabi_enum_names[] =
11842 { "", "variable-size", "32-bit", "" };
11843 const char *in_name =
11844 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
11845 ? aeabi_enum_names[in_attr[i].i]
11847 const char *out_name =
11848 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
11849 ? aeabi_enum_names[out_attr[i].i]
11852 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
11853 ibfd, in_name, out_name);
11857 case Tag_ABI_VFP_args:
11860 case Tag_ABI_WMMX_args:
11861 if (in_attr[i].i != out_attr[i].i)
11864 (_("error: %B uses iWMMXt register arguments, %B does not"),
11869 case Tag_compatibility:
11870 /* Merged in target-independent code. */
11872 case Tag_ABI_HardFP_use:
11873 /* This is handled along with Tag_FP_arch. */
11875 case Tag_ABI_FP_16bit_format:
11876 if (in_attr[i].i != 0 && out_attr[i].i != 0)
11878 if (in_attr[i].i != out_attr[i].i)
11881 (_("error: fp16 format mismatch between %B and %B"),
11886 if (in_attr[i].i != 0)
11887 out_attr[i].i = in_attr[i].i;
11891 /* A value of zero on input means that the divide instruction may
11892 be used if available in the base architecture as specified via
11893 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
11894 the user did not want divide instructions. A value of 2
11895 explicitly means that divide instructions were allowed in ARM
11896 and Thumb state. */
11897 if (in_attr[i].i == out_attr[i].i)
11898 /* Do nothing. */ ;
11899 else if (elf32_arm_attributes_forbid_div (in_attr)
11900 && !elf32_arm_attributes_accept_div (out_attr))
11902 else if (elf32_arm_attributes_forbid_div (out_attr)
11903 && elf32_arm_attributes_accept_div (in_attr))
11904 out_attr[i].i = in_attr[i].i;
11905 else if (in_attr[i].i == 2)
11906 out_attr[i].i = in_attr[i].i;
11909 case Tag_MPextension_use_legacy:
11910 /* We don't output objects with Tag_MPextension_use_legacy - we
11911 move the value to Tag_MPextension_use. */
11912 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
11914 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
11917 (_("%B has has both the current and legacy "
11918 "Tag_MPextension_use attributes"),
11924 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
11925 out_attr[Tag_MPextension_use] = in_attr[i];
11929 case Tag_nodefaults:
11930 /* This tag is set if it exists, but the value is unused (and is
11931 typically zero). We don't actually need to do anything here -
11932 the merge happens automatically when the type flags are merged
11935 case Tag_also_compatible_with:
11936 /* Already done in Tag_CPU_arch. */
11938 case Tag_conformance:
11939 /* Keep the attribute if it matches. Throw it away otherwise.
11940 No attribute means no claim to conform. */
11941 if (!in_attr[i].s || !out_attr[i].s
11942 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
11943 out_attr[i].s = NULL;
11948 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
11951 /* If out_attr was copied from in_attr then it won't have a type yet. */
11952 if (in_attr[i].type && !out_attr[i].type)
11953 out_attr[i].type = in_attr[i].type;
11956 /* Merge Tag_compatibility attributes and any common GNU ones. */
11957 if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
11960 /* Check for any attributes not known on ARM. */
11961 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
11967 /* Return TRUE if the two EABI versions are incompatible. */
11970 elf32_arm_versions_compatible (unsigned iver, unsigned over)
11972 /* v4 and v5 are the same spec before and after it was released,
11973 so allow mixing them. */
11974 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
11975 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
11978 return (iver == over);
11981 /* Merge backend specific data from an object file to the output
11982 object file when linking. */
11985 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
11987 /* Display the flags field. */
11990 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
11992 FILE * file = (FILE *) ptr;
11993 unsigned long flags;
11995 BFD_ASSERT (abfd != NULL && ptr != NULL);
11997 /* Print normal ELF private data. */
11998 _bfd_elf_print_private_bfd_data (abfd, ptr);
12000 flags = elf_elfheader (abfd)->e_flags;
12001 /* Ignore init flag - it may not be set, despite the flags field
12002 containing valid data. */
12004 /* xgettext:c-format */
12005 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
12007 switch (EF_ARM_EABI_VERSION (flags))
12009 case EF_ARM_EABI_UNKNOWN:
12010 /* The following flag bits are GNU extensions and not part of the
12011 official ARM ELF extended ABI. Hence they are only decoded if
12012 the EABI version is not set. */
12013 if (flags & EF_ARM_INTERWORK)
12014 fprintf (file, _(" [interworking enabled]"));
12016 if (flags & EF_ARM_APCS_26)
12017 fprintf (file, " [APCS-26]");
12019 fprintf (file, " [APCS-32]");
12021 if (flags & EF_ARM_VFP_FLOAT)
12022 fprintf (file, _(" [VFP float format]"));
12023 else if (flags & EF_ARM_MAVERICK_FLOAT)
12024 fprintf (file, _(" [Maverick float format]"));
12026 fprintf (file, _(" [FPA float format]"));
12028 if (flags & EF_ARM_APCS_FLOAT)
12029 fprintf (file, _(" [floats passed in float registers]"));
12031 if (flags & EF_ARM_PIC)
12032 fprintf (file, _(" [position independent]"));
12034 if (flags & EF_ARM_NEW_ABI)
12035 fprintf (file, _(" [new ABI]"));
12037 if (flags & EF_ARM_OLD_ABI)
12038 fprintf (file, _(" [old ABI]"));
12040 if (flags & EF_ARM_SOFT_FLOAT)
12041 fprintf (file, _(" [software FP]"));
12043 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
12044 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
12045 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
12046 | EF_ARM_MAVERICK_FLOAT);
12049 case EF_ARM_EABI_VER1:
12050 fprintf (file, _(" [Version1 EABI]"));
12052 if (flags & EF_ARM_SYMSARESORTED)
12053 fprintf (file, _(" [sorted symbol table]"));
12055 fprintf (file, _(" [unsorted symbol table]"));
12057 flags &= ~ EF_ARM_SYMSARESORTED;
12060 case EF_ARM_EABI_VER2:
12061 fprintf (file, _(" [Version2 EABI]"));
12063 if (flags & EF_ARM_SYMSARESORTED)
12064 fprintf (file, _(" [sorted symbol table]"));
12066 fprintf (file, _(" [unsorted symbol table]"));
12068 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
12069 fprintf (file, _(" [dynamic symbols use segment index]"));
12071 if (flags & EF_ARM_MAPSYMSFIRST)
12072 fprintf (file, _(" [mapping symbols precede others]"));
12074 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
12075 | EF_ARM_MAPSYMSFIRST);
12078 case EF_ARM_EABI_VER3:
12079 fprintf (file, _(" [Version3 EABI]"));
12082 case EF_ARM_EABI_VER4:
12083 fprintf (file, _(" [Version4 EABI]"));
12086 case EF_ARM_EABI_VER5:
12087 fprintf (file, _(" [Version5 EABI]"));
12089 if (flags & EF_ARM_BE8)
12090 fprintf (file, _(" [BE8]"));
12092 if (flags & EF_ARM_LE8)
12093 fprintf (file, _(" [LE8]"));
12095 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
12099 fprintf (file, _(" <EABI version unrecognised>"));
12103 flags &= ~ EF_ARM_EABIMASK;
12105 if (flags & EF_ARM_RELEXEC)
12106 fprintf (file, _(" [relocatable executable]"));
12108 if (flags & EF_ARM_HASENTRY)
12109 fprintf (file, _(" [has entry point]"));
12111 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
12114 fprintf (file, _("<Unrecognised flag bits set>"));
12116 fputc ('\n', file);
12122 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
12124 switch (ELF_ST_TYPE (elf_sym->st_info))
12126 case STT_ARM_TFUNC:
12127 return ELF_ST_TYPE (elf_sym->st_info);
12129 case STT_ARM_16BIT:
12130 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
12131 This allows us to distinguish between data used by Thumb instructions
12132 and non-data (which is probably code) inside Thumb regions of an
12134 if (type != STT_OBJECT && type != STT_TLS)
12135 return ELF_ST_TYPE (elf_sym->st_info);
12146 elf32_arm_gc_mark_hook (asection *sec,
12147 struct bfd_link_info *info,
12148 Elf_Internal_Rela *rel,
12149 struct elf_link_hash_entry *h,
12150 Elf_Internal_Sym *sym)
12153 switch (ELF32_R_TYPE (rel->r_info))
12155 case R_ARM_GNU_VTINHERIT:
12156 case R_ARM_GNU_VTENTRY:
12160 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
12163 /* Update the got entry reference counts for the section being removed. */
12166 elf32_arm_gc_sweep_hook (bfd * abfd,
12167 struct bfd_link_info * info,
12169 const Elf_Internal_Rela * relocs)
12171 Elf_Internal_Shdr *symtab_hdr;
12172 struct elf_link_hash_entry **sym_hashes;
12173 bfd_signed_vma *local_got_refcounts;
12174 const Elf_Internal_Rela *rel, *relend;
12175 struct elf32_arm_link_hash_table * globals;
12177 if (info->relocatable)
12180 globals = elf32_arm_hash_table (info);
12181 if (globals == NULL)
12184 elf_section_data (sec)->local_dynrel = NULL;
12186 symtab_hdr = & elf_symtab_hdr (abfd);
12187 sym_hashes = elf_sym_hashes (abfd);
12188 local_got_refcounts = elf_local_got_refcounts (abfd);
12190 check_use_blx (globals);
12192 relend = relocs + sec->reloc_count;
12193 for (rel = relocs; rel < relend; rel++)
12195 unsigned long r_symndx;
12196 struct elf_link_hash_entry *h = NULL;
12197 struct elf32_arm_link_hash_entry *eh;
12199 bfd_boolean call_reloc_p;
12200 bfd_boolean may_become_dynamic_p;
12201 bfd_boolean may_need_local_target_p;
12202 union gotplt_union *root_plt;
12203 struct arm_plt_info *arm_plt;
12205 r_symndx = ELF32_R_SYM (rel->r_info);
12206 if (r_symndx >= symtab_hdr->sh_info)
12208 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12209 while (h->root.type == bfd_link_hash_indirect
12210 || h->root.type == bfd_link_hash_warning)
12211 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12213 eh = (struct elf32_arm_link_hash_entry *) h;
12215 call_reloc_p = FALSE;
12216 may_become_dynamic_p = FALSE;
12217 may_need_local_target_p = FALSE;
12219 r_type = ELF32_R_TYPE (rel->r_info);
12220 r_type = arm_real_reloc_type (globals, r_type);
12224 case R_ARM_GOT_PREL:
12225 case R_ARM_TLS_GD32:
12226 case R_ARM_TLS_IE32:
12229 if (h->got.refcount > 0)
12230 h->got.refcount -= 1;
12232 else if (local_got_refcounts != NULL)
12234 if (local_got_refcounts[r_symndx] > 0)
12235 local_got_refcounts[r_symndx] -= 1;
12239 case R_ARM_TLS_LDM32:
12240 globals->tls_ldm_got.refcount -= 1;
12248 case R_ARM_THM_CALL:
12249 case R_ARM_THM_JUMP24:
12250 case R_ARM_THM_JUMP19:
12251 call_reloc_p = TRUE;
12252 may_need_local_target_p = TRUE;
12256 if (!globals->vxworks_p)
12258 may_need_local_target_p = TRUE;
12261 /* Fall through. */
12263 case R_ARM_ABS32_NOI:
12265 case R_ARM_REL32_NOI:
12266 case R_ARM_MOVW_ABS_NC:
12267 case R_ARM_MOVT_ABS:
12268 case R_ARM_MOVW_PREL_NC:
12269 case R_ARM_MOVT_PREL:
12270 case R_ARM_THM_MOVW_ABS_NC:
12271 case R_ARM_THM_MOVT_ABS:
12272 case R_ARM_THM_MOVW_PREL_NC:
12273 case R_ARM_THM_MOVT_PREL:
12274 /* Should the interworking branches be here also? */
12275 if ((info->shared || globals->root.is_relocatable_executable)
12276 && (sec->flags & SEC_ALLOC) != 0)
12279 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
12281 call_reloc_p = TRUE;
12282 may_need_local_target_p = TRUE;
12285 may_become_dynamic_p = TRUE;
12288 may_need_local_target_p = TRUE;
12295 if (may_need_local_target_p
12296 && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
12298 /* If PLT refcount book-keeping is wrong and too low, we'll
12299 see a zero value (going to -1) for the root PLT reference
12301 if (root_plt->refcount >= 0)
12303 BFD_ASSERT (root_plt->refcount != 0);
12304 root_plt->refcount -= 1;
12307 /* A value of -1 means the symbol has become local, forced
12308 or seeing a hidden definition. Any other negative value
12310 BFD_ASSERT (root_plt->refcount == -1);
12313 arm_plt->noncall_refcount--;
12315 if (r_type == R_ARM_THM_CALL)
12316 arm_plt->maybe_thumb_refcount--;
12318 if (r_type == R_ARM_THM_JUMP24
12319 || r_type == R_ARM_THM_JUMP19)
12320 arm_plt->thumb_refcount--;
12323 if (may_become_dynamic_p)
12325 struct elf_dyn_relocs **pp;
12326 struct elf_dyn_relocs *p;
12329 pp = &(eh->dyn_relocs);
12332 Elf_Internal_Sym *isym;
12334 isym = bfd_sym_from_r_symndx (&globals->sym_cache,
12338 pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12342 for (; (p = *pp) != NULL; pp = &p->next)
12345 /* Everything must go for SEC. */
12355 /* Look through the relocs for a section during the first phase. */
12358 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
12359 asection *sec, const Elf_Internal_Rela *relocs)
12361 Elf_Internal_Shdr *symtab_hdr;
12362 struct elf_link_hash_entry **sym_hashes;
12363 const Elf_Internal_Rela *rel;
12364 const Elf_Internal_Rela *rel_end;
12367 struct elf32_arm_link_hash_table *htab;
12368 bfd_boolean call_reloc_p;
12369 bfd_boolean may_become_dynamic_p;
12370 bfd_boolean may_need_local_target_p;
12371 unsigned long nsyms;
12373 if (info->relocatable)
12376 BFD_ASSERT (is_arm_elf (abfd));
12378 htab = elf32_arm_hash_table (info);
12384 /* Create dynamic sections for relocatable executables so that we can
12385 copy relocations. */
12386 if (htab->root.is_relocatable_executable
12387 && ! htab->root.dynamic_sections_created)
12389 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
12393 if (htab->root.dynobj == NULL)
12394 htab->root.dynobj = abfd;
12395 if (!create_ifunc_sections (info))
12398 dynobj = htab->root.dynobj;
12400 symtab_hdr = & elf_symtab_hdr (abfd);
12401 sym_hashes = elf_sym_hashes (abfd);
12402 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
12404 rel_end = relocs + sec->reloc_count;
12405 for (rel = relocs; rel < rel_end; rel++)
12407 Elf_Internal_Sym *isym;
12408 struct elf_link_hash_entry *h;
12409 struct elf32_arm_link_hash_entry *eh;
12410 unsigned long r_symndx;
12413 r_symndx = ELF32_R_SYM (rel->r_info);
12414 r_type = ELF32_R_TYPE (rel->r_info);
12415 r_type = arm_real_reloc_type (htab, r_type);
12417 if (r_symndx >= nsyms
12418 /* PR 9934: It is possible to have relocations that do not
12419 refer to symbols, thus it is also possible to have an
12420 object file containing relocations but no symbol table. */
12421 && (r_symndx > STN_UNDEF || nsyms > 0))
12423 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
12432 if (r_symndx < symtab_hdr->sh_info)
12434 /* A local symbol. */
12435 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
12442 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
12443 while (h->root.type == bfd_link_hash_indirect
12444 || h->root.type == bfd_link_hash_warning)
12445 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12449 eh = (struct elf32_arm_link_hash_entry *) h;
12451 call_reloc_p = FALSE;
12452 may_become_dynamic_p = FALSE;
12453 may_need_local_target_p = FALSE;
12455 /* Could be done earlier, if h were already available. */
12456 r_type = elf32_arm_tls_transition (info, r_type, h);
12460 case R_ARM_GOT_PREL:
12461 case R_ARM_TLS_GD32:
12462 case R_ARM_TLS_IE32:
12463 case R_ARM_TLS_GOTDESC:
12464 case R_ARM_TLS_DESCSEQ:
12465 case R_ARM_THM_TLS_DESCSEQ:
12466 case R_ARM_TLS_CALL:
12467 case R_ARM_THM_TLS_CALL:
12468 /* This symbol requires a global offset table entry. */
12470 int tls_type, old_tls_type;
12474 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
12476 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
12478 case R_ARM_TLS_GOTDESC:
12479 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
12480 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
12481 tls_type = GOT_TLS_GDESC; break;
12483 default: tls_type = GOT_NORMAL; break;
12489 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
12493 /* This is a global offset table entry for a local symbol. */
12494 if (!elf32_arm_allocate_local_sym_info (abfd))
12496 elf_local_got_refcounts (abfd)[r_symndx] += 1;
12497 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
12500 /* If a variable is accessed with both tls methods, two
12501 slots may be created. */
12502 if (GOT_TLS_GD_ANY_P (old_tls_type)
12503 && GOT_TLS_GD_ANY_P (tls_type))
12504 tls_type |= old_tls_type;
12506 /* We will already have issued an error message if there
12507 is a TLS/non-TLS mismatch, based on the symbol
12508 type. So just combine any TLS types needed. */
12509 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
12510 && tls_type != GOT_NORMAL)
12511 tls_type |= old_tls_type;
12513 /* If the symbol is accessed in both IE and GDESC
12514 method, we're able to relax. Turn off the GDESC flag,
12515 without messing up with any other kind of tls types
12516 that may be involved */
12517 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
12518 tls_type &= ~GOT_TLS_GDESC;
12520 if (old_tls_type != tls_type)
12523 elf32_arm_hash_entry (h)->tls_type = tls_type;
12525 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
12528 /* Fall through. */
12530 case R_ARM_TLS_LDM32:
12531 if (r_type == R_ARM_TLS_LDM32)
12532 htab->tls_ldm_got.refcount++;
12533 /* Fall through. */
12535 case R_ARM_GOTOFF32:
12537 if (htab->root.sgot == NULL
12538 && !create_got_section (htab->root.dynobj, info))
12547 case R_ARM_THM_CALL:
12548 case R_ARM_THM_JUMP24:
12549 case R_ARM_THM_JUMP19:
12550 call_reloc_p = TRUE;
12551 may_need_local_target_p = TRUE;
12555 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
12556 ldr __GOTT_INDEX__ offsets. */
12557 if (!htab->vxworks_p)
12559 may_need_local_target_p = TRUE;
12562 /* Fall through. */
12564 case R_ARM_MOVW_ABS_NC:
12565 case R_ARM_MOVT_ABS:
12566 case R_ARM_THM_MOVW_ABS_NC:
12567 case R_ARM_THM_MOVT_ABS:
12570 (*_bfd_error_handler)
12571 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
12572 abfd, elf32_arm_howto_table_1[r_type].name,
12573 (h) ? h->root.root.string : "a local symbol");
12574 bfd_set_error (bfd_error_bad_value);
12578 /* Fall through. */
12580 case R_ARM_ABS32_NOI:
12582 case R_ARM_REL32_NOI:
12583 case R_ARM_MOVW_PREL_NC:
12584 case R_ARM_MOVT_PREL:
12585 case R_ARM_THM_MOVW_PREL_NC:
12586 case R_ARM_THM_MOVT_PREL:
12588 /* Should the interworking branches be listed here? */
12589 if ((info->shared || htab->root.is_relocatable_executable)
12590 && (sec->flags & SEC_ALLOC) != 0)
12593 && (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI))
12595 /* In shared libraries and relocatable executables,
12596 we treat local relative references as calls;
12597 see the related SYMBOL_CALLS_LOCAL code in
12598 allocate_dynrelocs. */
12599 call_reloc_p = TRUE;
12600 may_need_local_target_p = TRUE;
12603 /* We are creating a shared library or relocatable
12604 executable, and this is a reloc against a global symbol,
12605 or a non-PC-relative reloc against a local symbol.
12606 We may need to copy the reloc into the output. */
12607 may_become_dynamic_p = TRUE;
12610 may_need_local_target_p = TRUE;
12613 /* This relocation describes the C++ object vtable hierarchy.
12614 Reconstruct it for later use during GC. */
12615 case R_ARM_GNU_VTINHERIT:
12616 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
12620 /* This relocation describes which C++ vtable entries are actually
12621 used. Record for later use during GC. */
12622 case R_ARM_GNU_VTENTRY:
12623 BFD_ASSERT (h != NULL);
12625 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
12633 /* We may need a .plt entry if the function this reloc
12634 refers to is in a different object, regardless of the
12635 symbol's type. We can't tell for sure yet, because
12636 something later might force the symbol local. */
12638 else if (may_need_local_target_p)
12639 /* If this reloc is in a read-only section, we might
12640 need a copy reloc. We can't check reliably at this
12641 stage whether the section is read-only, as input
12642 sections have not yet been mapped to output sections.
12643 Tentatively set the flag for now, and correct in
12644 adjust_dynamic_symbol. */
12645 h->non_got_ref = 1;
12648 if (may_need_local_target_p
12649 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
12651 union gotplt_union *root_plt;
12652 struct arm_plt_info *arm_plt;
12653 struct arm_local_iplt_info *local_iplt;
12657 root_plt = &h->plt;
12658 arm_plt = &eh->plt;
12662 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
12663 if (local_iplt == NULL)
12665 root_plt = &local_iplt->root;
12666 arm_plt = &local_iplt->arm;
12669 /* If the symbol is a function that doesn't bind locally,
12670 this relocation will need a PLT entry. */
12671 root_plt->refcount += 1;
12674 arm_plt->noncall_refcount++;
12676 /* It's too early to use htab->use_blx here, so we have to
12677 record possible blx references separately from
12678 relocs that definitely need a thumb stub. */
12680 if (r_type == R_ARM_THM_CALL)
12681 arm_plt->maybe_thumb_refcount += 1;
12683 if (r_type == R_ARM_THM_JUMP24
12684 || r_type == R_ARM_THM_JUMP19)
12685 arm_plt->thumb_refcount += 1;
12688 if (may_become_dynamic_p)
12690 struct elf_dyn_relocs *p, **head;
12692 /* Create a reloc section in dynobj. */
12693 if (sreloc == NULL)
12695 sreloc = _bfd_elf_make_dynamic_reloc_section
12696 (sec, dynobj, 2, abfd, ! htab->use_rel);
12698 if (sreloc == NULL)
12701 /* BPABI objects never have dynamic relocations mapped. */
12702 if (htab->symbian_p)
12706 flags = bfd_get_section_flags (dynobj, sreloc);
12707 flags &= ~(SEC_LOAD | SEC_ALLOC);
12708 bfd_set_section_flags (dynobj, sreloc, flags);
12712 /* If this is a global symbol, count the number of
12713 relocations we need for this symbol. */
12715 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
12718 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
12724 if (p == NULL || p->sec != sec)
12726 bfd_size_type amt = sizeof *p;
12728 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
12738 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
12747 /* Unwinding tables are not referenced directly. This pass marks them as
12748 required if the corresponding code section is marked. */
12751 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
12752 elf_gc_mark_hook_fn gc_mark_hook)
12755 Elf_Internal_Shdr **elf_shdrp;
12758 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
12760 /* Marking EH data may cause additional code sections to be marked,
12761 requiring multiple passes. */
12766 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
12770 if (! is_arm_elf (sub))
12773 elf_shdrp = elf_elfsections (sub);
12774 for (o = sub->sections; o != NULL; o = o->next)
12776 Elf_Internal_Shdr *hdr;
12778 hdr = &elf_section_data (o)->this_hdr;
12779 if (hdr->sh_type == SHT_ARM_EXIDX
12781 && hdr->sh_link < elf_numsections (sub)
12783 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
12786 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
12796 /* Treat mapping symbols as special target symbols. */
12799 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
12801 return bfd_is_arm_special_symbol_name (sym->name,
12802 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
12805 /* This is a copy of elf_find_function() from elf.c except that
12806 ARM mapping symbols are ignored when looking for function names
12807 and STT_ARM_TFUNC is considered to a function type. */
12810 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
12811 asection * section,
12812 asymbol ** symbols,
12814 const char ** filename_ptr,
12815 const char ** functionname_ptr)
12817 const char * filename = NULL;
12818 asymbol * func = NULL;
12819 bfd_vma low_func = 0;
12822 for (p = symbols; *p != NULL; p++)
12824 elf_symbol_type *q;
12826 q = (elf_symbol_type *) *p;
12828 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
12833 filename = bfd_asymbol_name (&q->symbol);
12836 case STT_ARM_TFUNC:
12838 /* Skip mapping symbols. */
12839 if ((q->symbol.flags & BSF_LOCAL)
12840 && bfd_is_arm_special_symbol_name (q->symbol.name,
12841 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
12843 /* Fall through. */
12844 if (bfd_get_section (&q->symbol) == section
12845 && q->symbol.value >= low_func
12846 && q->symbol.value <= offset)
12848 func = (asymbol *) q;
12849 low_func = q->symbol.value;
12859 *filename_ptr = filename;
12860 if (functionname_ptr)
12861 *functionname_ptr = bfd_asymbol_name (func);
12867 /* Find the nearest line to a particular section and offset, for error
12868 reporting. This code is a duplicate of the code in elf.c, except
12869 that it uses arm_elf_find_function. */
12872 elf32_arm_find_nearest_line (bfd * abfd,
12873 asection * section,
12874 asymbol ** symbols,
12876 const char ** filename_ptr,
12877 const char ** functionname_ptr,
12878 unsigned int * line_ptr)
12880 bfd_boolean found = FALSE;
12882 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
12884 if (_bfd_dwarf2_find_nearest_line (abfd, dwarf_debug_sections,
12885 section, symbols, offset,
12886 filename_ptr, functionname_ptr,
12888 & elf_tdata (abfd)->dwarf2_find_line_info))
12890 if (!*functionname_ptr)
12891 arm_elf_find_function (abfd, section, symbols, offset,
12892 *filename_ptr ? NULL : filename_ptr,
12898 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
12899 & found, filename_ptr,
12900 functionname_ptr, line_ptr,
12901 & elf_tdata (abfd)->line_info))
12904 if (found && (*functionname_ptr || *line_ptr))
12907 if (symbols == NULL)
12910 if (! arm_elf_find_function (abfd, section, symbols, offset,
12911 filename_ptr, functionname_ptr))
12919 elf32_arm_find_inliner_info (bfd * abfd,
12920 const char ** filename_ptr,
12921 const char ** functionname_ptr,
12922 unsigned int * line_ptr)
12925 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
12926 functionname_ptr, line_ptr,
12927 & elf_tdata (abfd)->dwarf2_find_line_info);
12931 /* Adjust a symbol defined by a dynamic object and referenced by a
12932 regular object. The current definition is in some section of the
12933 dynamic object, but we're not including those sections. We have to
12934 change the definition to something the rest of the link can
12938 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
12939 struct elf_link_hash_entry * h)
12943 struct elf32_arm_link_hash_entry * eh;
12944 struct elf32_arm_link_hash_table *globals;
12946 globals = elf32_arm_hash_table (info);
12947 if (globals == NULL)
12950 dynobj = elf_hash_table (info)->dynobj;
12952 /* Make sure we know what is going on here. */
12953 BFD_ASSERT (dynobj != NULL
12955 || h->type == STT_GNU_IFUNC
12956 || h->u.weakdef != NULL
12959 && !h->def_regular)));
12961 eh = (struct elf32_arm_link_hash_entry *) h;
12963 /* If this is a function, put it in the procedure linkage table. We
12964 will fill in the contents of the procedure linkage table later,
12965 when we know the address of the .got section. */
12966 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
12968 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
12969 symbol binds locally. */
12970 if (h->plt.refcount <= 0
12971 || (h->type != STT_GNU_IFUNC
12972 && (SYMBOL_CALLS_LOCAL (info, h)
12973 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
12974 && h->root.type == bfd_link_hash_undefweak))))
12976 /* This case can occur if we saw a PLT32 reloc in an input
12977 file, but the symbol was never referred to by a dynamic
12978 object, or if all references were garbage collected. In
12979 such a case, we don't actually need to build a procedure
12980 linkage table, and we can just do a PC24 reloc instead. */
12981 h->plt.offset = (bfd_vma) -1;
12982 eh->plt.thumb_refcount = 0;
12983 eh->plt.maybe_thumb_refcount = 0;
12984 eh->plt.noncall_refcount = 0;
12992 /* It's possible that we incorrectly decided a .plt reloc was
12993 needed for an R_ARM_PC24 or similar reloc to a non-function sym
12994 in check_relocs. We can't decide accurately between function
12995 and non-function syms in check-relocs; Objects loaded later in
12996 the link may change h->type. So fix it now. */
12997 h->plt.offset = (bfd_vma) -1;
12998 eh->plt.thumb_refcount = 0;
12999 eh->plt.maybe_thumb_refcount = 0;
13000 eh->plt.noncall_refcount = 0;
13003 /* If this is a weak symbol, and there is a real definition, the
13004 processor independent code will have arranged for us to see the
13005 real definition first, and we can just use the same value. */
13006 if (h->u.weakdef != NULL)
13008 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
13009 || h->u.weakdef->root.type == bfd_link_hash_defweak);
13010 h->root.u.def.section = h->u.weakdef->root.u.def.section;
13011 h->root.u.def.value = h->u.weakdef->root.u.def.value;
13015 /* If there are no non-GOT references, we do not need a copy
13017 if (!h->non_got_ref)
13020 /* This is a reference to a symbol defined by a dynamic object which
13021 is not a function. */
13023 /* If we are creating a shared library, we must presume that the
13024 only references to the symbol are via the global offset table.
13025 For such cases we need not do anything here; the relocations will
13026 be handled correctly by relocate_section. Relocatable executables
13027 can reference data in shared objects directly, so we don't need to
13028 do anything here. */
13029 if (info->shared || globals->root.is_relocatable_executable)
13032 /* We must allocate the symbol in our .dynbss section, which will
13033 become part of the .bss section of the executable. There will be
13034 an entry for this symbol in the .dynsym section. The dynamic
13035 object will contain position independent code, so all references
13036 from the dynamic object to this symbol will go through the global
13037 offset table. The dynamic linker will use the .dynsym entry to
13038 determine the address it must put in the global offset table, so
13039 both the dynamic object and the regular object will refer to the
13040 same memory location for the variable. */
13041 s = bfd_get_section_by_name (dynobj, ".dynbss");
13042 BFD_ASSERT (s != NULL);
13044 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
13045 copy the initial value out of the dynamic object and into the
13046 runtime process image. We need to remember the offset into the
13047 .rel(a).bss section we are going to use. */
13048 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
13052 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
13053 elf32_arm_allocate_dynrelocs (info, srel, 1);
13057 return _bfd_elf_adjust_dynamic_copy (h, s);
13060 /* Allocate space in .plt, .got and associated reloc sections for
13064 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
13066 struct bfd_link_info *info;
13067 struct elf32_arm_link_hash_table *htab;
13068 struct elf32_arm_link_hash_entry *eh;
13069 struct elf_dyn_relocs *p;
13071 if (h->root.type == bfd_link_hash_indirect)
13074 eh = (struct elf32_arm_link_hash_entry *) h;
13076 info = (struct bfd_link_info *) inf;
13077 htab = elf32_arm_hash_table (info);
13081 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
13082 && h->plt.refcount > 0)
13084 /* Make sure this symbol is output as a dynamic symbol.
13085 Undefined weak syms won't yet be marked as dynamic. */
13086 if (h->dynindx == -1
13087 && !h->forced_local)
13089 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13093 /* If the call in the PLT entry binds locally, the associated
13094 GOT entry should use an R_ARM_IRELATIVE relocation instead of
13095 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
13096 than the .plt section. */
13097 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
13100 if (eh->plt.noncall_refcount == 0
13101 && SYMBOL_REFERENCES_LOCAL (info, h))
13102 /* All non-call references can be resolved directly.
13103 This means that they can (and in some cases, must)
13104 resolve directly to the run-time target, rather than
13105 to the PLT. That in turns means that any .got entry
13106 would be equal to the .igot.plt entry, so there's
13107 no point having both. */
13108 h->got.refcount = 0;
13113 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
13115 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
13117 /* If this symbol is not defined in a regular file, and we are
13118 not generating a shared library, then set the symbol to this
13119 location in the .plt. This is required to make function
13120 pointers compare as equal between the normal executable and
13121 the shared library. */
13123 && !h->def_regular)
13125 h->root.u.def.section = htab->root.splt;
13126 h->root.u.def.value = h->plt.offset;
13128 /* Make sure the function is not marked as Thumb, in case
13129 it is the target of an ABS32 relocation, which will
13130 point to the PLT entry. */
13131 h->target_internal = ST_BRANCH_TO_ARM;
13134 htab->next_tls_desc_index++;
13136 /* VxWorks executables have a second set of relocations for
13137 each PLT entry. They go in a separate relocation section,
13138 which is processed by the kernel loader. */
13139 if (htab->vxworks_p && !info->shared)
13141 /* There is a relocation for the initial PLT entry:
13142 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
13143 if (h->plt.offset == htab->plt_header_size)
13144 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
13146 /* There are two extra relocations for each subsequent
13147 PLT entry: an R_ARM_32 relocation for the GOT entry,
13148 and an R_ARM_32 relocation for the PLT entry. */
13149 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
13154 h->plt.offset = (bfd_vma) -1;
13160 h->plt.offset = (bfd_vma) -1;
13164 eh = (struct elf32_arm_link_hash_entry *) h;
13165 eh->tlsdesc_got = (bfd_vma) -1;
13167 if (h->got.refcount > 0)
13171 int tls_type = elf32_arm_hash_entry (h)->tls_type;
13174 /* Make sure this symbol is output as a dynamic symbol.
13175 Undefined weak syms won't yet be marked as dynamic. */
13176 if (h->dynindx == -1
13177 && !h->forced_local)
13179 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13183 if (!htab->symbian_p)
13185 s = htab->root.sgot;
13186 h->got.offset = s->size;
13188 if (tls_type == GOT_UNKNOWN)
13191 if (tls_type == GOT_NORMAL)
13192 /* Non-TLS symbols need one GOT slot. */
13196 if (tls_type & GOT_TLS_GDESC)
13198 /* R_ARM_TLS_DESC needs 2 GOT slots. */
13200 = (htab->root.sgotplt->size
13201 - elf32_arm_compute_jump_table_size (htab));
13202 htab->root.sgotplt->size += 8;
13203 h->got.offset = (bfd_vma) -2;
13204 /* plt.got_offset needs to know there's a TLS_DESC
13205 reloc in the middle of .got.plt. */
13206 htab->num_tls_desc++;
13209 if (tls_type & GOT_TLS_GD)
13211 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. If
13212 the symbol is both GD and GDESC, got.offset may
13213 have been overwritten. */
13214 h->got.offset = s->size;
13218 if (tls_type & GOT_TLS_IE)
13219 /* R_ARM_TLS_IE32 needs one GOT slot. */
13223 dyn = htab->root.dynamic_sections_created;
13226 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
13228 || !SYMBOL_REFERENCES_LOCAL (info, h)))
13231 if (tls_type != GOT_NORMAL
13232 && (info->shared || indx != 0)
13233 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
13234 || h->root.type != bfd_link_hash_undefweak))
13236 if (tls_type & GOT_TLS_IE)
13237 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13239 if (tls_type & GOT_TLS_GD)
13240 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13242 if (tls_type & GOT_TLS_GDESC)
13244 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
13245 /* GDESC needs a trampoline to jump to. */
13246 htab->tls_trampoline = -1;
13249 /* Only GD needs it. GDESC just emits one relocation per
13251 if ((tls_type & GOT_TLS_GD) && indx != 0)
13252 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13254 else if (!SYMBOL_REFERENCES_LOCAL (info, h))
13256 if (htab->root.dynamic_sections_created)
13257 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
13258 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13260 else if (h->type == STT_GNU_IFUNC
13261 && eh->plt.noncall_refcount == 0)
13262 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
13263 they all resolve dynamically instead. Reserve room for the
13264 GOT entry's R_ARM_IRELATIVE relocation. */
13265 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
13266 else if (info->shared)
13267 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
13268 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13272 h->got.offset = (bfd_vma) -1;
13274 /* Allocate stubs for exported Thumb functions on v4t. */
13275 if (!htab->use_blx && h->dynindx != -1
13277 && h->target_internal == ST_BRANCH_TO_THUMB
13278 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
13280 struct elf_link_hash_entry * th;
13281 struct bfd_link_hash_entry * bh;
13282 struct elf_link_hash_entry * myh;
13286 /* Create a new symbol to regist the real location of the function. */
13287 s = h->root.u.def.section;
13288 sprintf (name, "__real_%s", h->root.root.string);
13289 _bfd_generic_link_add_one_symbol (info, s->owner,
13290 name, BSF_GLOBAL, s,
13291 h->root.u.def.value,
13292 NULL, TRUE, FALSE, &bh);
13294 myh = (struct elf_link_hash_entry *) bh;
13295 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
13296 myh->forced_local = 1;
13297 myh->target_internal = ST_BRANCH_TO_THUMB;
13298 eh->export_glue = myh;
13299 th = record_arm_to_thumb_glue (info, h);
13300 /* Point the symbol at the stub. */
13301 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
13302 h->target_internal = ST_BRANCH_TO_ARM;
13303 h->root.u.def.section = th->root.u.def.section;
13304 h->root.u.def.value = th->root.u.def.value & ~1;
13307 if (eh->dyn_relocs == NULL)
13310 /* In the shared -Bsymbolic case, discard space allocated for
13311 dynamic pc-relative relocs against symbols which turn out to be
13312 defined in regular objects. For the normal shared case, discard
13313 space for pc-relative relocs that have become local due to symbol
13314 visibility changes. */
13316 if (info->shared || htab->root.is_relocatable_executable)
13318 /* The only relocs that use pc_count are R_ARM_REL32 and
13319 R_ARM_REL32_NOI, which will appear on something like
13320 ".long foo - .". We want calls to protected symbols to resolve
13321 directly to the function rather than going via the plt. If people
13322 want function pointer comparisons to work as expected then they
13323 should avoid writing assembly like ".long foo - .". */
13324 if (SYMBOL_CALLS_LOCAL (info, h))
13326 struct elf_dyn_relocs **pp;
13328 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13330 p->count -= p->pc_count;
13339 if (htab->vxworks_p)
13341 struct elf_dyn_relocs **pp;
13343 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
13345 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
13352 /* Also discard relocs on undefined weak syms with non-default
13354 if (eh->dyn_relocs != NULL
13355 && h->root.type == bfd_link_hash_undefweak)
13357 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
13358 eh->dyn_relocs = NULL;
13360 /* Make sure undefined weak symbols are output as a dynamic
13362 else if (h->dynindx == -1
13363 && !h->forced_local)
13365 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13370 else if (htab->root.is_relocatable_executable && h->dynindx == -1
13371 && h->root.type == bfd_link_hash_new)
13373 /* Output absolute symbols so that we can create relocations
13374 against them. For normal symbols we output a relocation
13375 against the section that contains them. */
13376 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13383 /* For the non-shared case, discard space for relocs against
13384 symbols which turn out to need copy relocs or are not
13387 if (!h->non_got_ref
13388 && ((h->def_dynamic
13389 && !h->def_regular)
13390 || (htab->root.dynamic_sections_created
13391 && (h->root.type == bfd_link_hash_undefweak
13392 || h->root.type == bfd_link_hash_undefined))))
13394 /* Make sure this symbol is output as a dynamic symbol.
13395 Undefined weak syms won't yet be marked as dynamic. */
13396 if (h->dynindx == -1
13397 && !h->forced_local)
13399 if (! bfd_elf_link_record_dynamic_symbol (info, h))
13403 /* If that succeeded, we know we'll be keeping all the
13405 if (h->dynindx != -1)
13409 eh->dyn_relocs = NULL;
13414 /* Finally, allocate space. */
13415 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13417 asection *sreloc = elf_section_data (p->sec)->sreloc;
13418 if (h->type == STT_GNU_IFUNC
13419 && eh->plt.noncall_refcount == 0
13420 && SYMBOL_REFERENCES_LOCAL (info, h))
13421 elf32_arm_allocate_irelocs (info, sreloc, p->count);
13423 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
13429 /* Find any dynamic relocs that apply to read-only sections. */
13432 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
13434 struct elf32_arm_link_hash_entry * eh;
13435 struct elf_dyn_relocs * p;
13437 eh = (struct elf32_arm_link_hash_entry *) h;
13438 for (p = eh->dyn_relocs; p != NULL; p = p->next)
13440 asection *s = p->sec;
13442 if (s != NULL && (s->flags & SEC_READONLY) != 0)
13444 struct bfd_link_info *info = (struct bfd_link_info *) inf;
13446 info->flags |= DF_TEXTREL;
13448 /* Not an error, just cut short the traversal. */
13456 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
13459 struct elf32_arm_link_hash_table *globals;
13461 globals = elf32_arm_hash_table (info);
13462 if (globals == NULL)
13465 globals->byteswap_code = byteswap_code;
13468 /* Set the sizes of the dynamic sections. */
13471 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
13472 struct bfd_link_info * info)
13477 bfd_boolean relocs;
13479 struct elf32_arm_link_hash_table *htab;
13481 htab = elf32_arm_hash_table (info);
13485 dynobj = elf_hash_table (info)->dynobj;
13486 BFD_ASSERT (dynobj != NULL);
13487 check_use_blx (htab);
13489 if (elf_hash_table (info)->dynamic_sections_created)
13491 /* Set the contents of the .interp section to the interpreter. */
13492 if (info->executable)
13494 s = bfd_get_section_by_name (dynobj, ".interp");
13495 BFD_ASSERT (s != NULL);
13496 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
13497 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
13501 /* Set up .got offsets for local syms, and space for local dynamic
13503 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
13505 bfd_signed_vma *local_got;
13506 bfd_signed_vma *end_local_got;
13507 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
13508 char *local_tls_type;
13509 bfd_vma *local_tlsdesc_gotent;
13510 bfd_size_type locsymcount;
13511 Elf_Internal_Shdr *symtab_hdr;
13513 bfd_boolean is_vxworks = htab->vxworks_p;
13514 unsigned int symndx;
13516 if (! is_arm_elf (ibfd))
13519 for (s = ibfd->sections; s != NULL; s = s->next)
13521 struct elf_dyn_relocs *p;
13523 for (p = (struct elf_dyn_relocs *)
13524 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
13526 if (!bfd_is_abs_section (p->sec)
13527 && bfd_is_abs_section (p->sec->output_section))
13529 /* Input section has been discarded, either because
13530 it is a copy of a linkonce section or due to
13531 linker script /DISCARD/, so we'll be discarding
13534 else if (is_vxworks
13535 && strcmp (p->sec->output_section->name,
13538 /* Relocations in vxworks .tls_vars sections are
13539 handled specially by the loader. */
13541 else if (p->count != 0)
13543 srel = elf_section_data (p->sec)->sreloc;
13544 elf32_arm_allocate_dynrelocs (info, srel, p->count);
13545 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
13546 info->flags |= DF_TEXTREL;
13551 local_got = elf_local_got_refcounts (ibfd);
13555 symtab_hdr = & elf_symtab_hdr (ibfd);
13556 locsymcount = symtab_hdr->sh_info;
13557 end_local_got = local_got + locsymcount;
13558 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
13559 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
13560 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
13562 s = htab->root.sgot;
13563 srel = htab->root.srelgot;
13564 for (; local_got < end_local_got;
13565 ++local_got, ++local_iplt_ptr, ++local_tls_type,
13566 ++local_tlsdesc_gotent, ++symndx)
13568 *local_tlsdesc_gotent = (bfd_vma) -1;
13569 local_iplt = *local_iplt_ptr;
13570 if (local_iplt != NULL)
13572 struct elf_dyn_relocs *p;
13574 if (local_iplt->root.refcount > 0)
13576 elf32_arm_allocate_plt_entry (info, TRUE,
13579 if (local_iplt->arm.noncall_refcount == 0)
13580 /* All references to the PLT are calls, so all
13581 non-call references can resolve directly to the
13582 run-time target. This means that the .got entry
13583 would be the same as the .igot.plt entry, so there's
13584 no point creating both. */
13589 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
13590 local_iplt->root.offset = (bfd_vma) -1;
13593 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
13597 psrel = elf_section_data (p->sec)->sreloc;
13598 if (local_iplt->arm.noncall_refcount == 0)
13599 elf32_arm_allocate_irelocs (info, psrel, p->count);
13601 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
13604 if (*local_got > 0)
13606 Elf_Internal_Sym *isym;
13608 *local_got = s->size;
13609 if (*local_tls_type & GOT_TLS_GD)
13610 /* TLS_GD relocs need an 8-byte structure in the GOT. */
13612 if (*local_tls_type & GOT_TLS_GDESC)
13614 *local_tlsdesc_gotent = htab->root.sgotplt->size
13615 - elf32_arm_compute_jump_table_size (htab);
13616 htab->root.sgotplt->size += 8;
13617 *local_got = (bfd_vma) -2;
13618 /* plt.got_offset needs to know there's a TLS_DESC
13619 reloc in the middle of .got.plt. */
13620 htab->num_tls_desc++;
13622 if (*local_tls_type & GOT_TLS_IE)
13625 if (*local_tls_type & GOT_NORMAL)
13627 /* If the symbol is both GD and GDESC, *local_got
13628 may have been overwritten. */
13629 *local_got = s->size;
13633 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
13637 /* If all references to an STT_GNU_IFUNC PLT are calls,
13638 then all non-call references, including this GOT entry,
13639 resolve directly to the run-time target. */
13640 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
13641 && (local_iplt == NULL
13642 || local_iplt->arm.noncall_refcount == 0))
13643 elf32_arm_allocate_irelocs (info, srel, 1);
13644 else if ((info->shared && !(*local_tls_type & GOT_TLS_GDESC))
13645 || *local_tls_type & GOT_TLS_GD)
13646 elf32_arm_allocate_dynrelocs (info, srel, 1);
13648 if (info->shared && *local_tls_type & GOT_TLS_GDESC)
13650 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
13651 htab->tls_trampoline = -1;
13655 *local_got = (bfd_vma) -1;
13659 if (htab->tls_ldm_got.refcount > 0)
13661 /* Allocate two GOT entries and one dynamic relocation (if necessary)
13662 for R_ARM_TLS_LDM32 relocations. */
13663 htab->tls_ldm_got.offset = htab->root.sgot->size;
13664 htab->root.sgot->size += 8;
13666 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
13669 htab->tls_ldm_got.offset = -1;
13671 /* Allocate global sym .plt and .got entries, and space for global
13672 sym dynamic relocs. */
13673 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
13675 /* Here we rummage through the found bfds to collect glue information. */
13676 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
13678 if (! is_arm_elf (ibfd))
13681 /* Initialise mapping tables for code/data. */
13682 bfd_elf32_arm_init_maps (ibfd);
13684 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
13685 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
13686 /* xgettext:c-format */
13687 _bfd_error_handler (_("Errors encountered processing file %s"),
13691 /* Allocate space for the glue sections now that we've sized them. */
13692 bfd_elf32_arm_allocate_interworking_sections (info);
13694 /* For every jump slot reserved in the sgotplt, reloc_count is
13695 incremented. However, when we reserve space for TLS descriptors,
13696 it's not incremented, so in order to compute the space reserved
13697 for them, it suffices to multiply the reloc count by the jump
13699 if (htab->root.srelplt)
13700 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
13702 if (htab->tls_trampoline)
13704 if (htab->root.splt->size == 0)
13705 htab->root.splt->size += htab->plt_header_size;
13707 htab->tls_trampoline = htab->root.splt->size;
13708 htab->root.splt->size += htab->plt_entry_size;
13710 /* If we're not using lazy TLS relocations, don't generate the
13711 PLT and GOT entries they require. */
13712 if (!(info->flags & DF_BIND_NOW))
13714 htab->dt_tlsdesc_got = htab->root.sgot->size;
13715 htab->root.sgot->size += 4;
13717 htab->dt_tlsdesc_plt = htab->root.splt->size;
13718 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
13722 /* The check_relocs and adjust_dynamic_symbol entry points have
13723 determined the sizes of the various dynamic sections. Allocate
13724 memory for them. */
13727 for (s = dynobj->sections; s != NULL; s = s->next)
13731 if ((s->flags & SEC_LINKER_CREATED) == 0)
13734 /* It's OK to base decisions on the section name, because none
13735 of the dynobj section names depend upon the input files. */
13736 name = bfd_get_section_name (dynobj, s);
13738 if (s == htab->root.splt)
13740 /* Remember whether there is a PLT. */
13741 plt = s->size != 0;
13743 else if (CONST_STRNEQ (name, ".rel"))
13747 /* Remember whether there are any reloc sections other
13748 than .rel(a).plt and .rela.plt.unloaded. */
13749 if (s != htab->root.srelplt && s != htab->srelplt2)
13752 /* We use the reloc_count field as a counter if we need
13753 to copy relocs into the output file. */
13754 s->reloc_count = 0;
13757 else if (s != htab->root.sgot
13758 && s != htab->root.sgotplt
13759 && s != htab->root.iplt
13760 && s != htab->root.igotplt
13761 && s != htab->sdynbss)
13763 /* It's not one of our sections, so don't allocate space. */
13769 /* If we don't need this section, strip it from the
13770 output file. This is mostly to handle .rel(a).bss and
13771 .rel(a).plt. We must create both sections in
13772 create_dynamic_sections, because they must be created
13773 before the linker maps input sections to output
13774 sections. The linker does that before
13775 adjust_dynamic_symbol is called, and it is that
13776 function which decides whether anything needs to go
13777 into these sections. */
13778 s->flags |= SEC_EXCLUDE;
13782 if ((s->flags & SEC_HAS_CONTENTS) == 0)
13785 /* Allocate memory for the section contents. */
13786 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
13787 if (s->contents == NULL)
13791 if (elf_hash_table (info)->dynamic_sections_created)
13793 /* Add some entries to the .dynamic section. We fill in the
13794 values later, in elf32_arm_finish_dynamic_sections, but we
13795 must add the entries now so that we get the correct size for
13796 the .dynamic section. The DT_DEBUG entry is filled in by the
13797 dynamic linker and used by the debugger. */
13798 #define add_dynamic_entry(TAG, VAL) \
13799 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
13801 if (info->executable)
13803 if (!add_dynamic_entry (DT_DEBUG, 0))
13809 if ( !add_dynamic_entry (DT_PLTGOT, 0)
13810 || !add_dynamic_entry (DT_PLTRELSZ, 0)
13811 || !add_dynamic_entry (DT_PLTREL,
13812 htab->use_rel ? DT_REL : DT_RELA)
13813 || !add_dynamic_entry (DT_JMPREL, 0))
13816 if (htab->dt_tlsdesc_plt &&
13817 (!add_dynamic_entry (DT_TLSDESC_PLT,0)
13818 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
13826 if (!add_dynamic_entry (DT_REL, 0)
13827 || !add_dynamic_entry (DT_RELSZ, 0)
13828 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
13833 if (!add_dynamic_entry (DT_RELA, 0)
13834 || !add_dynamic_entry (DT_RELASZ, 0)
13835 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
13840 /* If any dynamic relocs apply to a read-only section,
13841 then we need a DT_TEXTREL entry. */
13842 if ((info->flags & DF_TEXTREL) == 0)
13843 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
13846 if ((info->flags & DF_TEXTREL) != 0)
13848 if (!add_dynamic_entry (DT_TEXTREL, 0))
13851 if (htab->vxworks_p
13852 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
13855 #undef add_dynamic_entry
13860 /* Size sections even though they're not dynamic. We use it to setup
13861 _TLS_MODULE_BASE_, if needed. */
13864 elf32_arm_always_size_sections (bfd *output_bfd,
13865 struct bfd_link_info *info)
13869 if (info->relocatable)
13872 tls_sec = elf_hash_table (info)->tls_sec;
13876 struct elf_link_hash_entry *tlsbase;
13878 tlsbase = elf_link_hash_lookup
13879 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
13883 struct bfd_link_hash_entry *bh = NULL;
13884 const struct elf_backend_data *bed
13885 = get_elf_backend_data (output_bfd);
13887 if (!(_bfd_generic_link_add_one_symbol
13888 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
13889 tls_sec, 0, NULL, FALSE,
13890 bed->collect, &bh)))
13893 tlsbase->type = STT_TLS;
13894 tlsbase = (struct elf_link_hash_entry *)bh;
13895 tlsbase->def_regular = 1;
13896 tlsbase->other = STV_HIDDEN;
13897 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
13903 /* Finish up dynamic symbol handling. We set the contents of various
13904 dynamic sections here. */
13907 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
13908 struct bfd_link_info * info,
13909 struct elf_link_hash_entry * h,
13910 Elf_Internal_Sym * sym)
13912 struct elf32_arm_link_hash_table *htab;
13913 struct elf32_arm_link_hash_entry *eh;
13915 htab = elf32_arm_hash_table (info);
13919 eh = (struct elf32_arm_link_hash_entry *) h;
13921 if (h->plt.offset != (bfd_vma) -1)
13925 BFD_ASSERT (h->dynindx != -1);
13926 elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
13930 if (!h->def_regular)
13932 /* Mark the symbol as undefined, rather than as defined in
13933 the .plt section. Leave the value alone. */
13934 sym->st_shndx = SHN_UNDEF;
13935 /* If the symbol is weak, we do need to clear the value.
13936 Otherwise, the PLT entry would provide a definition for
13937 the symbol even if the symbol wasn't defined anywhere,
13938 and so the symbol would never be NULL. */
13939 if (!h->ref_regular_nonweak)
13942 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
13944 /* At least one non-call relocation references this .iplt entry,
13945 so the .iplt entry is the function's canonical address. */
13946 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
13947 sym->st_target_internal = ST_BRANCH_TO_ARM;
13948 sym->st_shndx = (_bfd_elf_section_from_bfd_section
13949 (output_bfd, htab->root.iplt->output_section));
13950 sym->st_value = (h->plt.offset
13951 + htab->root.iplt->output_section->vma
13952 + htab->root.iplt->output_offset);
13959 Elf_Internal_Rela rel;
13961 /* This symbol needs a copy reloc. Set it up. */
13962 BFD_ASSERT (h->dynindx != -1
13963 && (h->root.type == bfd_link_hash_defined
13964 || h->root.type == bfd_link_hash_defweak));
13967 BFD_ASSERT (s != NULL);
13970 rel.r_offset = (h->root.u.def.value
13971 + h->root.u.def.section->output_section->vma
13972 + h->root.u.def.section->output_offset);
13973 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
13974 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
13977 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
13978 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
13979 to the ".got" section. */
13980 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
13981 || (!htab->vxworks_p && h == htab->root.hgot))
13982 sym->st_shndx = SHN_ABS;
13988 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
13990 const unsigned long *template, unsigned count)
13994 for (ix = 0; ix != count; ix++)
13996 unsigned long insn = template[ix];
13998 /* Emit mov pc,rx if bx is not permitted. */
13999 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
14000 insn = (insn & 0xf000000f) | 0x01a0f000;
14001 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
14005 /* Finish up the dynamic sections. */
14008 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
14013 struct elf32_arm_link_hash_table *htab;
14015 htab = elf32_arm_hash_table (info);
14019 dynobj = elf_hash_table (info)->dynobj;
14021 sgot = htab->root.sgotplt;
14022 /* A broken linker script might have discarded the dynamic sections.
14023 Catch this here so that we do not seg-fault later on. */
14024 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
14026 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
14028 if (elf_hash_table (info)->dynamic_sections_created)
14031 Elf32_External_Dyn *dyncon, *dynconend;
14033 splt = htab->root.splt;
14034 BFD_ASSERT (splt != NULL && sdyn != NULL);
14035 BFD_ASSERT (htab->symbian_p || sgot != NULL);
14037 dyncon = (Elf32_External_Dyn *) sdyn->contents;
14038 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
14040 for (; dyncon < dynconend; dyncon++)
14042 Elf_Internal_Dyn dyn;
14046 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
14053 if (htab->vxworks_p
14054 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
14055 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14060 goto get_vma_if_bpabi;
14063 goto get_vma_if_bpabi;
14066 goto get_vma_if_bpabi;
14068 name = ".gnu.version";
14069 goto get_vma_if_bpabi;
14071 name = ".gnu.version_d";
14072 goto get_vma_if_bpabi;
14074 name = ".gnu.version_r";
14075 goto get_vma_if_bpabi;
14081 name = RELOC_SECTION (htab, ".plt");
14083 s = bfd_get_section_by_name (output_bfd, name);
14084 BFD_ASSERT (s != NULL);
14085 if (!htab->symbian_p)
14086 dyn.d_un.d_ptr = s->vma;
14088 /* In the BPABI, tags in the PT_DYNAMIC section point
14089 at the file offset, not the memory address, for the
14090 convenience of the post linker. */
14091 dyn.d_un.d_ptr = s->filepos;
14092 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14096 if (htab->symbian_p)
14101 s = htab->root.srelplt;
14102 BFD_ASSERT (s != NULL);
14103 dyn.d_un.d_val = s->size;
14104 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14109 if (!htab->symbian_p)
14111 /* My reading of the SVR4 ABI indicates that the
14112 procedure linkage table relocs (DT_JMPREL) should be
14113 included in the overall relocs (DT_REL). This is
14114 what Solaris does. However, UnixWare can not handle
14115 that case. Therefore, we override the DT_RELSZ entry
14116 here to make it not include the JMPREL relocs. Since
14117 the linker script arranges for .rel(a).plt to follow all
14118 other relocation sections, we don't have to worry
14119 about changing the DT_REL entry. */
14120 s = htab->root.srelplt;
14122 dyn.d_un.d_val -= s->size;
14123 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14126 /* Fall through. */
14130 /* In the BPABI, the DT_REL tag must point at the file
14131 offset, not the VMA, of the first relocation
14132 section. So, we use code similar to that in
14133 elflink.c, but do not check for SHF_ALLOC on the
14134 relcoation section, since relocations sections are
14135 never allocated under the BPABI. The comments above
14136 about Unixware notwithstanding, we include all of the
14137 relocations here. */
14138 if (htab->symbian_p)
14141 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
14142 ? SHT_REL : SHT_RELA);
14143 dyn.d_un.d_val = 0;
14144 for (i = 1; i < elf_numsections (output_bfd); i++)
14146 Elf_Internal_Shdr *hdr
14147 = elf_elfsections (output_bfd)[i];
14148 if (hdr->sh_type == type)
14150 if (dyn.d_tag == DT_RELSZ
14151 || dyn.d_tag == DT_RELASZ)
14152 dyn.d_un.d_val += hdr->sh_size;
14153 else if ((ufile_ptr) hdr->sh_offset
14154 <= dyn.d_un.d_val - 1)
14155 dyn.d_un.d_val = hdr->sh_offset;
14158 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14162 case DT_TLSDESC_PLT:
14163 s = htab->root.splt;
14164 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
14165 + htab->dt_tlsdesc_plt);
14166 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14169 case DT_TLSDESC_GOT:
14170 s = htab->root.sgot;
14171 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
14172 + htab->dt_tlsdesc_got);
14173 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14176 /* Set the bottom bit of DT_INIT/FINI if the
14177 corresponding function is Thumb. */
14179 name = info->init_function;
14182 name = info->fini_function;
14184 /* If it wasn't set by elf_bfd_final_link
14185 then there is nothing to adjust. */
14186 if (dyn.d_un.d_val != 0)
14188 struct elf_link_hash_entry * eh;
14190 eh = elf_link_hash_lookup (elf_hash_table (info), name,
14191 FALSE, FALSE, TRUE);
14192 if (eh != NULL && eh->target_internal == ST_BRANCH_TO_THUMB)
14194 dyn.d_un.d_val |= 1;
14195 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
14202 /* Fill in the first entry in the procedure linkage table. */
14203 if (splt->size > 0 && htab->plt_header_size)
14205 const bfd_vma *plt0_entry;
14206 bfd_vma got_address, plt_address, got_displacement;
14208 /* Calculate the addresses of the GOT and PLT. */
14209 got_address = sgot->output_section->vma + sgot->output_offset;
14210 plt_address = splt->output_section->vma + splt->output_offset;
14212 if (htab->vxworks_p)
14214 /* The VxWorks GOT is relocated by the dynamic linker.
14215 Therefore, we must emit relocations rather than simply
14216 computing the values now. */
14217 Elf_Internal_Rela rel;
14219 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
14220 put_arm_insn (htab, output_bfd, plt0_entry[0],
14221 splt->contents + 0);
14222 put_arm_insn (htab, output_bfd, plt0_entry[1],
14223 splt->contents + 4);
14224 put_arm_insn (htab, output_bfd, plt0_entry[2],
14225 splt->contents + 8);
14226 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
14228 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
14229 rel.r_offset = plt_address + 12;
14230 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14232 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
14233 htab->srelplt2->contents);
14235 else if (htab->nacl_p)
14239 got_displacement = got_address + 8 - (plt_address + 16);
14241 put_arm_insn (htab, output_bfd,
14242 elf32_arm_nacl_plt0_entry[0]
14243 | arm_movw_immediate (got_displacement),
14244 splt->contents + 0);
14245 put_arm_insn (htab, output_bfd,
14246 elf32_arm_nacl_plt0_entry[1]
14247 | arm_movt_immediate (got_displacement),
14248 splt->contents + 4);
14249 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
14250 put_arm_insn (htab, output_bfd,
14251 elf32_arm_nacl_plt0_entry[i],
14252 splt->contents + (i * 4));
14256 got_displacement = got_address - (plt_address + 16);
14258 plt0_entry = elf32_arm_plt0_entry;
14259 put_arm_insn (htab, output_bfd, plt0_entry[0],
14260 splt->contents + 0);
14261 put_arm_insn (htab, output_bfd, plt0_entry[1],
14262 splt->contents + 4);
14263 put_arm_insn (htab, output_bfd, plt0_entry[2],
14264 splt->contents + 8);
14265 put_arm_insn (htab, output_bfd, plt0_entry[3],
14266 splt->contents + 12);
14268 #ifdef FOUR_WORD_PLT
14269 /* The displacement value goes in the otherwise-unused
14270 last word of the second entry. */
14271 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
14273 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
14278 /* UnixWare sets the entsize of .plt to 4, although that doesn't
14279 really seem like the right value. */
14280 if (splt->output_section->owner == output_bfd)
14281 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
14283 if (htab->dt_tlsdesc_plt)
14285 bfd_vma got_address
14286 = sgot->output_section->vma + sgot->output_offset;
14287 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
14288 + htab->root.sgot->output_offset);
14289 bfd_vma plt_address
14290 = splt->output_section->vma + splt->output_offset;
14292 arm_put_trampoline (htab, output_bfd,
14293 splt->contents + htab->dt_tlsdesc_plt,
14294 dl_tlsdesc_lazy_trampoline, 6);
14296 bfd_put_32 (output_bfd,
14297 gotplt_address + htab->dt_tlsdesc_got
14298 - (plt_address + htab->dt_tlsdesc_plt)
14299 - dl_tlsdesc_lazy_trampoline[6],
14300 splt->contents + htab->dt_tlsdesc_plt + 24);
14301 bfd_put_32 (output_bfd,
14302 got_address - (plt_address + htab->dt_tlsdesc_plt)
14303 - dl_tlsdesc_lazy_trampoline[7],
14304 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
14307 if (htab->tls_trampoline)
14309 arm_put_trampoline (htab, output_bfd,
14310 splt->contents + htab->tls_trampoline,
14311 tls_trampoline, 3);
14312 #ifdef FOUR_WORD_PLT
14313 bfd_put_32 (output_bfd, 0x00000000,
14314 splt->contents + htab->tls_trampoline + 12);
14318 if (htab->vxworks_p && !info->shared && htab->root.splt->size > 0)
14320 /* Correct the .rel(a).plt.unloaded relocations. They will have
14321 incorrect symbol indexes. */
14325 num_plts = ((htab->root.splt->size - htab->plt_header_size)
14326 / htab->plt_entry_size);
14327 p = htab->srelplt2->contents + RELOC_SIZE (htab);
14329 for (; num_plts; num_plts--)
14331 Elf_Internal_Rela rel;
14333 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14334 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
14335 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14336 p += RELOC_SIZE (htab);
14338 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
14339 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
14340 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
14341 p += RELOC_SIZE (htab);
14346 /* Fill in the first three entries in the global offset table. */
14349 if (sgot->size > 0)
14352 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
14354 bfd_put_32 (output_bfd,
14355 sdyn->output_section->vma + sdyn->output_offset,
14357 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
14358 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
14361 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
14368 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
14370 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
14371 struct elf32_arm_link_hash_table *globals;
14373 i_ehdrp = elf_elfheader (abfd);
14375 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
14376 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
14378 i_ehdrp->e_ident[EI_OSABI] = 0;
14379 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
14383 globals = elf32_arm_hash_table (link_info);
14384 if (globals != NULL && globals->byteswap_code)
14385 i_ehdrp->e_flags |= EF_ARM_BE8;
14389 static enum elf_reloc_type_class
14390 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
14392 switch ((int) ELF32_R_TYPE (rela->r_info))
14394 case R_ARM_RELATIVE:
14395 return reloc_class_relative;
14396 case R_ARM_JUMP_SLOT:
14397 return reloc_class_plt;
14399 return reloc_class_copy;
14401 return reloc_class_normal;
14406 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
14408 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
14411 /* Return TRUE if this is an unwinding table entry. */
14414 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
14416 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
14417 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
14421 /* Set the type and flags for an ARM section. We do this by
14422 the section name, which is a hack, but ought to work. */
14425 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
14429 name = bfd_get_section_name (abfd, sec);
14431 if (is_arm_elf_unwind_section_name (abfd, name))
14433 hdr->sh_type = SHT_ARM_EXIDX;
14434 hdr->sh_flags |= SHF_LINK_ORDER;
14439 /* Handle an ARM specific section when reading an object file. This is
14440 called when bfd_section_from_shdr finds a section with an unknown
14444 elf32_arm_section_from_shdr (bfd *abfd,
14445 Elf_Internal_Shdr * hdr,
14449 /* There ought to be a place to keep ELF backend specific flags, but
14450 at the moment there isn't one. We just keep track of the
14451 sections by their name, instead. Fortunately, the ABI gives
14452 names for all the ARM specific sections, so we will probably get
14454 switch (hdr->sh_type)
14456 case SHT_ARM_EXIDX:
14457 case SHT_ARM_PREEMPTMAP:
14458 case SHT_ARM_ATTRIBUTES:
14465 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
14471 static _arm_elf_section_data *
14472 get_arm_elf_section_data (asection * sec)
14474 if (sec && sec->owner && is_arm_elf (sec->owner))
14475 return elf32_arm_section_data (sec);
14483 struct bfd_link_info *info;
14486 int (*func) (void *, const char *, Elf_Internal_Sym *,
14487 asection *, struct elf_link_hash_entry *);
14488 } output_arch_syminfo;
14490 enum map_symbol_type
14498 /* Output a single mapping symbol. */
14501 elf32_arm_output_map_sym (output_arch_syminfo *osi,
14502 enum map_symbol_type type,
14505 static const char *names[3] = {"$a", "$t", "$d"};
14506 Elf_Internal_Sym sym;
14508 sym.st_value = osi->sec->output_section->vma
14509 + osi->sec->output_offset
14513 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
14514 sym.st_shndx = osi->sec_shndx;
14515 sym.st_target_internal = 0;
14516 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
14517 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
14520 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
14521 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
14524 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
14525 bfd_boolean is_iplt_entry_p,
14526 union gotplt_union *root_plt,
14527 struct arm_plt_info *arm_plt)
14529 struct elf32_arm_link_hash_table *htab;
14530 bfd_vma addr, plt_header_size;
14532 if (root_plt->offset == (bfd_vma) -1)
14535 htab = elf32_arm_hash_table (osi->info);
14539 if (is_iplt_entry_p)
14541 osi->sec = htab->root.iplt;
14542 plt_header_size = 0;
14546 osi->sec = htab->root.splt;
14547 plt_header_size = htab->plt_header_size;
14549 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
14550 (osi->info->output_bfd, osi->sec->output_section));
14552 addr = root_plt->offset & -2;
14553 if (htab->symbian_p)
14555 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14557 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
14560 else if (htab->vxworks_p)
14562 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14564 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
14566 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
14568 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
14571 else if (htab->nacl_p)
14573 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14578 bfd_boolean thumb_stub_p;
14580 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
14583 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
14586 #ifdef FOUR_WORD_PLT
14587 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14589 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
14592 /* A three-word PLT with no Thumb thunk contains only Arm code,
14593 so only need to output a mapping symbol for the first PLT entry and
14594 entries with thumb thunks. */
14595 if (thumb_stub_p || addr == plt_header_size)
14597 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
14606 /* Output mapping symbols for PLT entries associated with H. */
14609 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
14611 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
14612 struct elf32_arm_link_hash_entry *eh;
14614 if (h->root.type == bfd_link_hash_indirect)
14617 if (h->root.type == bfd_link_hash_warning)
14618 /* When warning symbols are created, they **replace** the "real"
14619 entry in the hash table, thus we never get to see the real
14620 symbol in a hash traversal. So look at it now. */
14621 h = (struct elf_link_hash_entry *) h->root.u.i.link;
14623 eh = (struct elf32_arm_link_hash_entry *) h;
14624 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
14625 &h->plt, &eh->plt);
14628 /* Output a single local symbol for a generated stub. */
14631 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
14632 bfd_vma offset, bfd_vma size)
14634 Elf_Internal_Sym sym;
14636 sym.st_value = osi->sec->output_section->vma
14637 + osi->sec->output_offset
14639 sym.st_size = size;
14641 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14642 sym.st_shndx = osi->sec_shndx;
14643 sym.st_target_internal = 0;
14644 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
14648 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
14651 struct elf32_arm_stub_hash_entry *stub_entry;
14652 asection *stub_sec;
14655 output_arch_syminfo *osi;
14656 const insn_sequence *template_sequence;
14657 enum stub_insn_type prev_type;
14660 enum map_symbol_type sym_type;
14662 /* Massage our args to the form they really have. */
14663 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
14664 osi = (output_arch_syminfo *) in_arg;
14666 stub_sec = stub_entry->stub_sec;
14668 /* Ensure this stub is attached to the current section being
14670 if (stub_sec != osi->sec)
14673 addr = (bfd_vma) stub_entry->stub_offset;
14674 stub_name = stub_entry->output_name;
14676 template_sequence = stub_entry->stub_template;
14677 switch (template_sequence[0].type)
14680 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
14685 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
14686 stub_entry->stub_size))
14694 prev_type = DATA_TYPE;
14696 for (i = 0; i < stub_entry->stub_template_size; i++)
14698 switch (template_sequence[i].type)
14701 sym_type = ARM_MAP_ARM;
14706 sym_type = ARM_MAP_THUMB;
14710 sym_type = ARM_MAP_DATA;
14718 if (template_sequence[i].type != prev_type)
14720 prev_type = template_sequence[i].type;
14721 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
14725 switch (template_sequence[i].type)
14749 /* Output mapping symbols for linker generated sections,
14750 and for those data-only sections that do not have a
14754 elf32_arm_output_arch_local_syms (bfd *output_bfd,
14755 struct bfd_link_info *info,
14757 int (*func) (void *, const char *,
14758 Elf_Internal_Sym *,
14760 struct elf_link_hash_entry *))
14762 output_arch_syminfo osi;
14763 struct elf32_arm_link_hash_table *htab;
14765 bfd_size_type size;
14768 htab = elf32_arm_hash_table (info);
14772 check_use_blx (htab);
14774 osi.flaginfo = flaginfo;
14778 /* Add a $d mapping symbol to data-only sections that
14779 don't have any mapping symbol. This may result in (harmless) redundant
14780 mapping symbols. */
14781 for (input_bfd = info->input_bfds;
14783 input_bfd = input_bfd->link_next)
14785 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
14786 for (osi.sec = input_bfd->sections;
14788 osi.sec = osi.sec->next)
14790 if (osi.sec->output_section != NULL
14791 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
14793 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
14794 == SEC_HAS_CONTENTS
14795 && get_arm_elf_section_data (osi.sec) != NULL
14796 && get_arm_elf_section_data (osi.sec)->mapcount == 0
14797 && osi.sec->size > 0
14798 && (osi.sec->flags & SEC_EXCLUDE) == 0)
14800 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14801 (output_bfd, osi.sec->output_section);
14802 if (osi.sec_shndx != (int)SHN_BAD)
14803 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
14808 /* ARM->Thumb glue. */
14809 if (htab->arm_glue_size > 0)
14811 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
14812 ARM2THUMB_GLUE_SECTION_NAME);
14814 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14815 (output_bfd, osi.sec->output_section);
14816 if (info->shared || htab->root.is_relocatable_executable
14817 || htab->pic_veneer)
14818 size = ARM2THUMB_PIC_GLUE_SIZE;
14819 else if (htab->use_blx)
14820 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
14822 size = ARM2THUMB_STATIC_GLUE_SIZE;
14824 for (offset = 0; offset < htab->arm_glue_size; offset += size)
14826 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
14827 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
14831 /* Thumb->ARM glue. */
14832 if (htab->thumb_glue_size > 0)
14834 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
14835 THUMB2ARM_GLUE_SECTION_NAME);
14837 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14838 (output_bfd, osi.sec->output_section);
14839 size = THUMB2ARM_GLUE_SIZE;
14841 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
14843 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
14844 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
14848 /* ARMv4 BX veneers. */
14849 if (htab->bx_glue_size > 0)
14851 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
14852 ARM_BX_GLUE_SECTION_NAME);
14854 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14855 (output_bfd, osi.sec->output_section);
14857 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
14860 /* Long calls stubs. */
14861 if (htab->stub_bfd && htab->stub_bfd->sections)
14863 asection* stub_sec;
14865 for (stub_sec = htab->stub_bfd->sections;
14867 stub_sec = stub_sec->next)
14869 /* Ignore non-stub sections. */
14870 if (!strstr (stub_sec->name, STUB_SUFFIX))
14873 osi.sec = stub_sec;
14875 osi.sec_shndx = _bfd_elf_section_from_bfd_section
14876 (output_bfd, osi.sec->output_section);
14878 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
14882 /* Finally, output mapping symbols for the PLT. */
14883 if (htab->root.splt && htab->root.splt->size > 0)
14885 osi.sec = htab->root.splt;
14886 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
14887 (output_bfd, osi.sec->output_section));
14889 /* Output mapping symbols for the plt header. SymbianOS does not have a
14891 if (htab->vxworks_p)
14893 /* VxWorks shared libraries have no PLT header. */
14896 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14898 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
14902 else if (htab->nacl_p)
14904 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14907 else if (!htab->symbian_p)
14909 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
14911 #ifndef FOUR_WORD_PLT
14912 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
14917 if ((htab->root.splt && htab->root.splt->size > 0)
14918 || (htab->root.iplt && htab->root.iplt->size > 0))
14920 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
14921 for (input_bfd = info->input_bfds;
14923 input_bfd = input_bfd->link_next)
14925 struct arm_local_iplt_info **local_iplt;
14926 unsigned int i, num_syms;
14928 local_iplt = elf32_arm_local_iplt (input_bfd);
14929 if (local_iplt != NULL)
14931 num_syms = elf_symtab_hdr (input_bfd).sh_info;
14932 for (i = 0; i < num_syms; i++)
14933 if (local_iplt[i] != NULL
14934 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
14935 &local_iplt[i]->root,
14936 &local_iplt[i]->arm))
14941 if (htab->dt_tlsdesc_plt != 0)
14943 /* Mapping symbols for the lazy tls trampoline. */
14944 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
14947 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
14948 htab->dt_tlsdesc_plt + 24))
14951 if (htab->tls_trampoline != 0)
14953 /* Mapping symbols for the tls trampoline. */
14954 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
14956 #ifdef FOUR_WORD_PLT
14957 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
14958 htab->tls_trampoline + 12))
14966 /* Allocate target specific section data. */
14969 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
14971 if (!sec->used_by_bfd)
14973 _arm_elf_section_data *sdata;
14974 bfd_size_type amt = sizeof (*sdata);
14976 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
14979 sec->used_by_bfd = sdata;
14982 return _bfd_elf_new_section_hook (abfd, sec);
14986 /* Used to order a list of mapping symbols by address. */
14989 elf32_arm_compare_mapping (const void * a, const void * b)
14991 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
14992 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
14994 if (amap->vma > bmap->vma)
14996 else if (amap->vma < bmap->vma)
14998 else if (amap->type > bmap->type)
14999 /* Ensure results do not depend on the host qsort for objects with
15000 multiple mapping symbols at the same address by sorting on type
15003 else if (amap->type < bmap->type)
15009 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
15011 static unsigned long
15012 offset_prel31 (unsigned long addr, bfd_vma offset)
15014 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
15017 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
15021 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
15023 unsigned long first_word = bfd_get_32 (output_bfd, from);
15024 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
15026 /* High bit of first word is supposed to be zero. */
15027 if ((first_word & 0x80000000ul) == 0)
15028 first_word = offset_prel31 (first_word, offset);
15030 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
15031 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
15032 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
15033 second_word = offset_prel31 (second_word, offset);
15035 bfd_put_32 (output_bfd, first_word, to);
15036 bfd_put_32 (output_bfd, second_word, to + 4);
15039 /* Data for make_branch_to_a8_stub(). */
15041 struct a8_branch_to_stub_data
15043 asection *writing_section;
15044 bfd_byte *contents;
15048 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
15049 places for a particular section. */
15052 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
15055 struct elf32_arm_stub_hash_entry *stub_entry;
15056 struct a8_branch_to_stub_data *data;
15057 bfd_byte *contents;
15058 unsigned long branch_insn;
15059 bfd_vma veneered_insn_loc, veneer_entry_loc;
15060 bfd_signed_vma branch_offset;
15062 unsigned int target;
15064 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
15065 data = (struct a8_branch_to_stub_data *) in_arg;
15067 if (stub_entry->target_section != data->writing_section
15068 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
15071 contents = data->contents;
15073 veneered_insn_loc = stub_entry->target_section->output_section->vma
15074 + stub_entry->target_section->output_offset
15075 + stub_entry->target_value;
15077 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
15078 + stub_entry->stub_sec->output_offset
15079 + stub_entry->stub_offset;
15081 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
15082 veneered_insn_loc &= ~3u;
15084 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
15086 abfd = stub_entry->target_section->owner;
15087 target = stub_entry->target_value;
15089 /* We attempt to avoid this condition by setting stubs_always_after_branch
15090 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
15091 This check is just to be on the safe side... */
15092 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
15094 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
15095 "allocated in unsafe location"), abfd);
15099 switch (stub_entry->stub_type)
15101 case arm_stub_a8_veneer_b:
15102 case arm_stub_a8_veneer_b_cond:
15103 branch_insn = 0xf0009000;
15106 case arm_stub_a8_veneer_blx:
15107 branch_insn = 0xf000e800;
15110 case arm_stub_a8_veneer_bl:
15112 unsigned int i1, j1, i2, j2, s;
15114 branch_insn = 0xf000d000;
15117 if (branch_offset < -16777216 || branch_offset > 16777214)
15119 /* There's not much we can do apart from complain if this
15121 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
15122 "of range (input file too large)"), abfd);
15126 /* i1 = not(j1 eor s), so:
15128 j1 = (not i1) eor s. */
15130 branch_insn |= (branch_offset >> 1) & 0x7ff;
15131 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
15132 i2 = (branch_offset >> 22) & 1;
15133 i1 = (branch_offset >> 23) & 1;
15134 s = (branch_offset >> 24) & 1;
15137 branch_insn |= j2 << 11;
15138 branch_insn |= j1 << 13;
15139 branch_insn |= s << 26;
15148 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
15149 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
15154 /* Do code byteswapping. Return FALSE afterwards so that the section is
15155 written out as normal. */
15158 elf32_arm_write_section (bfd *output_bfd,
15159 struct bfd_link_info *link_info,
15161 bfd_byte *contents)
15163 unsigned int mapcount, errcount;
15164 _arm_elf_section_data *arm_data;
15165 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
15166 elf32_arm_section_map *map;
15167 elf32_vfp11_erratum_list *errnode;
15170 bfd_vma offset = sec->output_section->vma + sec->output_offset;
15174 if (globals == NULL)
15177 /* If this section has not been allocated an _arm_elf_section_data
15178 structure then we cannot record anything. */
15179 arm_data = get_arm_elf_section_data (sec);
15180 if (arm_data == NULL)
15183 mapcount = arm_data->mapcount;
15184 map = arm_data->map;
15185 errcount = arm_data->erratumcount;
15189 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
15191 for (errnode = arm_data->erratumlist; errnode != 0;
15192 errnode = errnode->next)
15194 bfd_vma target = errnode->vma - offset;
15196 switch (errnode->type)
15198 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
15200 bfd_vma branch_to_veneer;
15201 /* Original condition code of instruction, plus bit mask for
15202 ARM B instruction. */
15203 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
15206 /* The instruction is before the label. */
15209 /* Above offset included in -4 below. */
15210 branch_to_veneer = errnode->u.b.veneer->vma
15211 - errnode->vma - 4;
15213 if ((signed) branch_to_veneer < -(1 << 25)
15214 || (signed) branch_to_veneer >= (1 << 25))
15215 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
15216 "range"), output_bfd);
15218 insn |= (branch_to_veneer >> 2) & 0xffffff;
15219 contents[endianflip ^ target] = insn & 0xff;
15220 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
15221 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
15222 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
15226 case VFP11_ERRATUM_ARM_VENEER:
15228 bfd_vma branch_from_veneer;
15231 /* Take size of veneer into account. */
15232 branch_from_veneer = errnode->u.v.branch->vma
15233 - errnode->vma - 12;
15235 if ((signed) branch_from_veneer < -(1 << 25)
15236 || (signed) branch_from_veneer >= (1 << 25))
15237 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
15238 "range"), output_bfd);
15240 /* Original instruction. */
15241 insn = errnode->u.v.branch->u.b.vfp_insn;
15242 contents[endianflip ^ target] = insn & 0xff;
15243 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
15244 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
15245 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
15247 /* Branch back to insn after original insn. */
15248 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
15249 contents[endianflip ^ (target + 4)] = insn & 0xff;
15250 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
15251 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
15252 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
15262 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
15264 arm_unwind_table_edit *edit_node
15265 = arm_data->u.exidx.unwind_edit_list;
15266 /* Now, sec->size is the size of the section we will write. The original
15267 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
15268 markers) was sec->rawsize. (This isn't the case if we perform no
15269 edits, then rawsize will be zero and we should use size). */
15270 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
15271 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
15272 unsigned int in_index, out_index;
15273 bfd_vma add_to_offsets = 0;
15275 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
15279 unsigned int edit_index = edit_node->index;
15281 if (in_index < edit_index && in_index * 8 < input_size)
15283 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15284 contents + in_index * 8, add_to_offsets);
15288 else if (in_index == edit_index
15289 || (in_index * 8 >= input_size
15290 && edit_index == UINT_MAX))
15292 switch (edit_node->type)
15294 case DELETE_EXIDX_ENTRY:
15296 add_to_offsets += 8;
15299 case INSERT_EXIDX_CANTUNWIND_AT_END:
15301 asection *text_sec = edit_node->linked_section;
15302 bfd_vma text_offset = text_sec->output_section->vma
15303 + text_sec->output_offset
15305 bfd_vma exidx_offset = offset + out_index * 8;
15306 unsigned long prel31_offset;
15308 /* Note: this is meant to be equivalent to an
15309 R_ARM_PREL31 relocation. These synthetic
15310 EXIDX_CANTUNWIND markers are not relocated by the
15311 usual BFD method. */
15312 prel31_offset = (text_offset - exidx_offset)
15315 /* First address we can't unwind. */
15316 bfd_put_32 (output_bfd, prel31_offset,
15317 &edited_contents[out_index * 8]);
15319 /* Code for EXIDX_CANTUNWIND. */
15320 bfd_put_32 (output_bfd, 0x1,
15321 &edited_contents[out_index * 8 + 4]);
15324 add_to_offsets -= 8;
15329 edit_node = edit_node->next;
15334 /* No more edits, copy remaining entries verbatim. */
15335 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
15336 contents + in_index * 8, add_to_offsets);
15342 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
15343 bfd_set_section_contents (output_bfd, sec->output_section,
15345 (file_ptr) sec->output_offset, sec->size);
15350 /* Fix code to point to Cortex-A8 erratum stubs. */
15351 if (globals->fix_cortex_a8)
15353 struct a8_branch_to_stub_data data;
15355 data.writing_section = sec;
15356 data.contents = contents;
15358 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
15365 if (globals->byteswap_code)
15367 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
15370 for (i = 0; i < mapcount; i++)
15372 if (i == mapcount - 1)
15375 end = map[i + 1].vma;
15377 switch (map[i].type)
15380 /* Byte swap code words. */
15381 while (ptr + 3 < end)
15383 tmp = contents[ptr];
15384 contents[ptr] = contents[ptr + 3];
15385 contents[ptr + 3] = tmp;
15386 tmp = contents[ptr + 1];
15387 contents[ptr + 1] = contents[ptr + 2];
15388 contents[ptr + 2] = tmp;
15394 /* Byte swap code halfwords. */
15395 while (ptr + 1 < end)
15397 tmp = contents[ptr];
15398 contents[ptr] = contents[ptr + 1];
15399 contents[ptr + 1] = tmp;
15405 /* Leave data alone. */
15413 arm_data->mapcount = -1;
15414 arm_data->mapsize = 0;
15415 arm_data->map = NULL;
15420 /* Mangle thumb function symbols as we read them in. */
15423 elf32_arm_swap_symbol_in (bfd * abfd,
15426 Elf_Internal_Sym *dst)
15428 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
15431 /* New EABI objects mark thumb function symbols by setting the low bit of
15433 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
15434 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
15436 if (dst->st_value & 1)
15438 dst->st_value &= ~(bfd_vma) 1;
15439 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15442 dst->st_target_internal = ST_BRANCH_TO_ARM;
15444 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
15446 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
15447 dst->st_target_internal = ST_BRANCH_TO_THUMB;
15449 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
15450 dst->st_target_internal = ST_BRANCH_LONG;
15452 dst->st_target_internal = ST_BRANCH_UNKNOWN;
15458 /* Mangle thumb function symbols as we write them out. */
15461 elf32_arm_swap_symbol_out (bfd *abfd,
15462 const Elf_Internal_Sym *src,
15466 Elf_Internal_Sym newsym;
15468 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
15469 of the address set, as per the new EABI. We do this unconditionally
15470 because objcopy does not set the elf header flags until after
15471 it writes out the symbol table. */
15472 if (src->st_target_internal == ST_BRANCH_TO_THUMB)
15475 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
15476 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
15477 if (newsym.st_shndx != SHN_UNDEF)
15479 /* Do this only for defined symbols. At link type, the static
15480 linker will simulate the work of dynamic linker of resolving
15481 symbols and will carry over the thumbness of found symbols to
15482 the output symbol table. It's not clear how it happens, but
15483 the thumbness of undefined symbols can well be different at
15484 runtime, and writing '1' for them will be confusing for users
15485 and possibly for dynamic linker itself.
15487 newsym.st_value |= 1;
15492 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
15495 /* Add the PT_ARM_EXIDX program header. */
15498 elf32_arm_modify_segment_map (bfd *abfd,
15499 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15501 struct elf_segment_map *m;
15504 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15505 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15507 /* If there is already a PT_ARM_EXIDX header, then we do not
15508 want to add another one. This situation arises when running
15509 "strip"; the input binary already has the header. */
15510 m = elf_tdata (abfd)->segment_map;
15511 while (m && m->p_type != PT_ARM_EXIDX)
15515 m = (struct elf_segment_map *)
15516 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
15519 m->p_type = PT_ARM_EXIDX;
15521 m->sections[0] = sec;
15523 m->next = elf_tdata (abfd)->segment_map;
15524 elf_tdata (abfd)->segment_map = m;
15531 /* We may add a PT_ARM_EXIDX program header. */
15534 elf32_arm_additional_program_headers (bfd *abfd,
15535 struct bfd_link_info *info ATTRIBUTE_UNUSED)
15539 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
15540 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
15546 /* Hook called by the linker routine which adds symbols from an object
15550 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
15551 Elf_Internal_Sym *sym, const char **namep,
15552 flagword *flagsp, asection **secp, bfd_vma *valp)
15554 if ((abfd->flags & DYNAMIC) == 0
15555 && (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
15556 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE))
15557 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE;
15559 if (elf32_arm_hash_table (info)->vxworks_p
15560 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
15561 flagsp, secp, valp))
15567 /* We use this to override swap_symbol_in and swap_symbol_out. */
15568 const struct elf_size_info elf32_arm_size_info =
15570 sizeof (Elf32_External_Ehdr),
15571 sizeof (Elf32_External_Phdr),
15572 sizeof (Elf32_External_Shdr),
15573 sizeof (Elf32_External_Rel),
15574 sizeof (Elf32_External_Rela),
15575 sizeof (Elf32_External_Sym),
15576 sizeof (Elf32_External_Dyn),
15577 sizeof (Elf_External_Note),
15581 ELFCLASS32, EV_CURRENT,
15582 bfd_elf32_write_out_phdrs,
15583 bfd_elf32_write_shdrs_and_ehdr,
15584 bfd_elf32_checksum_contents,
15585 bfd_elf32_write_relocs,
15586 elf32_arm_swap_symbol_in,
15587 elf32_arm_swap_symbol_out,
15588 bfd_elf32_slurp_reloc_table,
15589 bfd_elf32_slurp_symbol_table,
15590 bfd_elf32_swap_dyn_in,
15591 bfd_elf32_swap_dyn_out,
15592 bfd_elf32_swap_reloc_in,
15593 bfd_elf32_swap_reloc_out,
15594 bfd_elf32_swap_reloca_in,
15595 bfd_elf32_swap_reloca_out
15598 #define ELF_ARCH bfd_arch_arm
15599 #define ELF_TARGET_ID ARM_ELF_DATA
15600 #define ELF_MACHINE_CODE EM_ARM
15601 #ifdef __QNXTARGET__
15602 #define ELF_MAXPAGESIZE 0x1000
15604 #define ELF_MAXPAGESIZE 0x8000
15606 #define ELF_MINPAGESIZE 0x1000
15607 #define ELF_COMMONPAGESIZE 0x1000
15609 #define bfd_elf32_mkobject elf32_arm_mkobject
15611 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
15612 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
15613 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
15614 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
15615 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
15616 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
15617 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
15618 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
15619 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
15620 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
15621 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
15622 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
15623 #define bfd_elf32_bfd_final_link elf32_arm_final_link
15625 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
15626 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
15627 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
15628 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
15629 #define elf_backend_check_relocs elf32_arm_check_relocs
15630 #define elf_backend_relocate_section elf32_arm_relocate_section
15631 #define elf_backend_write_section elf32_arm_write_section
15632 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
15633 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
15634 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
15635 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
15636 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
15637 #define elf_backend_always_size_sections elf32_arm_always_size_sections
15638 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
15639 #define elf_backend_post_process_headers elf32_arm_post_process_headers
15640 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
15641 #define elf_backend_object_p elf32_arm_object_p
15642 #define elf_backend_fake_sections elf32_arm_fake_sections
15643 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
15644 #define elf_backend_final_write_processing elf32_arm_final_write_processing
15645 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
15646 #define elf_backend_size_info elf32_arm_size_info
15647 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
15648 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
15649 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
15650 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
15651 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
15653 #define elf_backend_can_refcount 1
15654 #define elf_backend_can_gc_sections 1
15655 #define elf_backend_plt_readonly 1
15656 #define elf_backend_want_got_plt 1
15657 #define elf_backend_want_plt_sym 0
15658 #define elf_backend_may_use_rel_p 1
15659 #define elf_backend_may_use_rela_p 0
15660 #define elf_backend_default_use_rela_p 0
15662 #define elf_backend_got_header_size 12
15664 #undef elf_backend_obj_attrs_vendor
15665 #define elf_backend_obj_attrs_vendor "aeabi"
15666 #undef elf_backend_obj_attrs_section
15667 #define elf_backend_obj_attrs_section ".ARM.attributes"
15668 #undef elf_backend_obj_attrs_arg_type
15669 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
15670 #undef elf_backend_obj_attrs_section_type
15671 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
15672 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
15673 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
15675 #include "elf32-target.h"
15677 /* Native Client targets. */
15679 #undef TARGET_LITTLE_SYM
15680 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_nacl_vec
15681 #undef TARGET_LITTLE_NAME
15682 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
15683 #undef TARGET_BIG_SYM
15684 #define TARGET_BIG_SYM bfd_elf32_bigarm_nacl_vec
15685 #undef TARGET_BIG_NAME
15686 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
15688 /* Like elf32_arm_link_hash_table_create -- but overrides
15689 appropriately for NaCl. */
15691 static struct bfd_link_hash_table *
15692 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
15694 struct bfd_link_hash_table *ret;
15696 ret = elf32_arm_link_hash_table_create (abfd);
15699 struct elf32_arm_link_hash_table *htab
15700 = (struct elf32_arm_link_hash_table *) ret;
15704 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
15705 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
15710 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
15711 really need to use elf32_arm_modify_segment_map. But we do it
15712 anyway just to reduce gratuitous differences with the stock ARM backend. */
15715 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
15717 return (elf32_arm_modify_segment_map (abfd, info)
15718 && nacl_modify_segment_map (abfd, info));
15722 #define elf32_bed elf32_arm_nacl_bed
15723 #undef bfd_elf32_bfd_link_hash_table_create
15724 #define bfd_elf32_bfd_link_hash_table_create \
15725 elf32_arm_nacl_link_hash_table_create
15726 #undef elf_backend_plt_alignment
15727 #define elf_backend_plt_alignment 4
15728 #undef elf_backend_modify_segment_map
15729 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
15730 #undef elf_backend_modify_program_headers
15731 #define elf_backend_modify_program_headers nacl_modify_program_headers
15733 #undef ELF_MAXPAGESIZE
15734 #define ELF_MAXPAGESIZE 0x10000
15736 #include "elf32-target.h"
15738 /* Reset to defaults. */
15739 #undef elf_backend_plt_alignment
15740 #undef elf_backend_modify_segment_map
15741 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
15742 #undef elf_backend_modify_program_headers
15744 /* VxWorks Targets. */
15746 #undef TARGET_LITTLE_SYM
15747 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
15748 #undef TARGET_LITTLE_NAME
15749 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
15750 #undef TARGET_BIG_SYM
15751 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
15752 #undef TARGET_BIG_NAME
15753 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
15755 /* Like elf32_arm_link_hash_table_create -- but overrides
15756 appropriately for VxWorks. */
15758 static struct bfd_link_hash_table *
15759 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
15761 struct bfd_link_hash_table *ret;
15763 ret = elf32_arm_link_hash_table_create (abfd);
15766 struct elf32_arm_link_hash_table *htab
15767 = (struct elf32_arm_link_hash_table *) ret;
15769 htab->vxworks_p = 1;
15775 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
15777 elf32_arm_final_write_processing (abfd, linker);
15778 elf_vxworks_final_write_processing (abfd, linker);
15782 #define elf32_bed elf32_arm_vxworks_bed
15784 #undef bfd_elf32_bfd_link_hash_table_create
15785 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
15786 #undef elf_backend_final_write_processing
15787 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
15788 #undef elf_backend_emit_relocs
15789 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
15791 #undef elf_backend_may_use_rel_p
15792 #define elf_backend_may_use_rel_p 0
15793 #undef elf_backend_may_use_rela_p
15794 #define elf_backend_may_use_rela_p 1
15795 #undef elf_backend_default_use_rela_p
15796 #define elf_backend_default_use_rela_p 1
15797 #undef elf_backend_want_plt_sym
15798 #define elf_backend_want_plt_sym 1
15799 #undef ELF_MAXPAGESIZE
15800 #define ELF_MAXPAGESIZE 0x1000
15802 #include "elf32-target.h"
15805 /* Merge backend specific data from an object file to the output
15806 object file when linking. */
15809 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
15811 flagword out_flags;
15813 bfd_boolean flags_compatible = TRUE;
15816 /* Check if we have the same endianness. */
15817 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
15820 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
15823 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
15826 /* The input BFD must have had its flags initialised. */
15827 /* The following seems bogus to me -- The flags are initialized in
15828 the assembler but I don't think an elf_flags_init field is
15829 written into the object. */
15830 /* BFD_ASSERT (elf_flags_init (ibfd)); */
15832 in_flags = elf_elfheader (ibfd)->e_flags;
15833 out_flags = elf_elfheader (obfd)->e_flags;
15835 /* In theory there is no reason why we couldn't handle this. However
15836 in practice it isn't even close to working and there is no real
15837 reason to want it. */
15838 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
15839 && !(ibfd->flags & DYNAMIC)
15840 && (in_flags & EF_ARM_BE8))
15842 _bfd_error_handler (_("error: %B is already in final BE8 format"),
15847 if (!elf_flags_init (obfd))
15849 /* If the input is the default architecture and had the default
15850 flags then do not bother setting the flags for the output
15851 architecture, instead allow future merges to do this. If no
15852 future merges ever set these flags then they will retain their
15853 uninitialised values, which surprise surprise, correspond
15854 to the default values. */
15855 if (bfd_get_arch_info (ibfd)->the_default
15856 && elf_elfheader (ibfd)->e_flags == 0)
15859 elf_flags_init (obfd) = TRUE;
15860 elf_elfheader (obfd)->e_flags = in_flags;
15862 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
15863 && bfd_get_arch_info (obfd)->the_default)
15864 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
15869 /* Determine what should happen if the input ARM architecture
15870 does not match the output ARM architecture. */
15871 if (! bfd_arm_merge_machines (ibfd, obfd))
15874 /* Identical flags must be compatible. */
15875 if (in_flags == out_flags)
15878 /* Check to see if the input BFD actually contains any sections. If
15879 not, its flags may not have been initialised either, but it
15880 cannot actually cause any incompatiblity. Do not short-circuit
15881 dynamic objects; their section list may be emptied by
15882 elf_link_add_object_symbols.
15884 Also check to see if there are no code sections in the input.
15885 In this case there is no need to check for code specific flags.
15886 XXX - do we need to worry about floating-point format compatability
15887 in data sections ? */
15888 if (!(ibfd->flags & DYNAMIC))
15890 bfd_boolean null_input_bfd = TRUE;
15891 bfd_boolean only_data_sections = TRUE;
15893 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
15895 /* Ignore synthetic glue sections. */
15896 if (strcmp (sec->name, ".glue_7")
15897 && strcmp (sec->name, ".glue_7t"))
15899 if ((bfd_get_section_flags (ibfd, sec)
15900 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
15901 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
15902 only_data_sections = FALSE;
15904 null_input_bfd = FALSE;
15909 if (null_input_bfd || only_data_sections)
15913 /* Complain about various flag mismatches. */
15914 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
15915 EF_ARM_EABI_VERSION (out_flags)))
15918 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
15920 (in_flags & EF_ARM_EABIMASK) >> 24,
15921 (out_flags & EF_ARM_EABIMASK) >> 24);
15925 /* Not sure what needs to be checked for EABI versions >= 1. */
15926 /* VxWorks libraries do not use these flags. */
15927 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
15928 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
15929 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
15931 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
15934 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
15936 in_flags & EF_ARM_APCS_26 ? 26 : 32,
15937 out_flags & EF_ARM_APCS_26 ? 26 : 32);
15938 flags_compatible = FALSE;
15941 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
15943 if (in_flags & EF_ARM_APCS_FLOAT)
15945 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
15949 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
15952 flags_compatible = FALSE;
15955 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
15957 if (in_flags & EF_ARM_VFP_FLOAT)
15959 (_("error: %B uses VFP instructions, whereas %B does not"),
15963 (_("error: %B uses FPA instructions, whereas %B does not"),
15966 flags_compatible = FALSE;
15969 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
15971 if (in_flags & EF_ARM_MAVERICK_FLOAT)
15973 (_("error: %B uses Maverick instructions, whereas %B does not"),
15977 (_("error: %B does not use Maverick instructions, whereas %B does"),
15980 flags_compatible = FALSE;
15983 #ifdef EF_ARM_SOFT_FLOAT
15984 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
15986 /* We can allow interworking between code that is VFP format
15987 layout, and uses either soft float or integer regs for
15988 passing floating point arguments and results. We already
15989 know that the APCS_FLOAT flags match; similarly for VFP
15991 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
15992 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
15994 if (in_flags & EF_ARM_SOFT_FLOAT)
15996 (_("error: %B uses software FP, whereas %B uses hardware FP"),
16000 (_("error: %B uses hardware FP, whereas %B uses software FP"),
16003 flags_compatible = FALSE;
16008 /* Interworking mismatch is only a warning. */
16009 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
16011 if (in_flags & EF_ARM_INTERWORK)
16014 (_("Warning: %B supports interworking, whereas %B does not"),
16020 (_("Warning: %B does not support interworking, whereas %B does"),
16026 return flags_compatible;
16030 /* Symbian OS Targets. */
16032 #undef TARGET_LITTLE_SYM
16033 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
16034 #undef TARGET_LITTLE_NAME
16035 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
16036 #undef TARGET_BIG_SYM
16037 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
16038 #undef TARGET_BIG_NAME
16039 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
16041 /* Like elf32_arm_link_hash_table_create -- but overrides
16042 appropriately for Symbian OS. */
16044 static struct bfd_link_hash_table *
16045 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
16047 struct bfd_link_hash_table *ret;
16049 ret = elf32_arm_link_hash_table_create (abfd);
16052 struct elf32_arm_link_hash_table *htab
16053 = (struct elf32_arm_link_hash_table *)ret;
16054 /* There is no PLT header for Symbian OS. */
16055 htab->plt_header_size = 0;
16056 /* The PLT entries are each one instruction and one word. */
16057 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
16058 htab->symbian_p = 1;
16059 /* Symbian uses armv5t or above, so use_blx is always true. */
16061 htab->root.is_relocatable_executable = 1;
16066 static const struct bfd_elf_special_section
16067 elf32_arm_symbian_special_sections[] =
16069 /* In a BPABI executable, the dynamic linking sections do not go in
16070 the loadable read-only segment. The post-linker may wish to
16071 refer to these sections, but they are not part of the final
16073 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
16074 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
16075 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
16076 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
16077 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
16078 /* These sections do not need to be writable as the SymbianOS
16079 postlinker will arrange things so that no dynamic relocation is
16081 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
16082 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
16083 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
16084 { NULL, 0, 0, 0, 0 }
16088 elf32_arm_symbian_begin_write_processing (bfd *abfd,
16089 struct bfd_link_info *link_info)
16091 /* BPABI objects are never loaded directly by an OS kernel; they are
16092 processed by a postlinker first, into an OS-specific format. If
16093 the D_PAGED bit is set on the file, BFD will align segments on
16094 page boundaries, so that an OS can directly map the file. With
16095 BPABI objects, that just results in wasted space. In addition,
16096 because we clear the D_PAGED bit, map_sections_to_segments will
16097 recognize that the program headers should not be mapped into any
16098 loadable segment. */
16099 abfd->flags &= ~D_PAGED;
16100 elf32_arm_begin_write_processing (abfd, link_info);
16104 elf32_arm_symbian_modify_segment_map (bfd *abfd,
16105 struct bfd_link_info *info)
16107 struct elf_segment_map *m;
16110 /* BPABI shared libraries and executables should have a PT_DYNAMIC
16111 segment. However, because the .dynamic section is not marked
16112 with SEC_LOAD, the generic ELF code will not create such a
16114 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
16117 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
16118 if (m->p_type == PT_DYNAMIC)
16123 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
16124 m->next = elf_tdata (abfd)->segment_map;
16125 elf_tdata (abfd)->segment_map = m;
16129 /* Also call the generic arm routine. */
16130 return elf32_arm_modify_segment_map (abfd, info);
16133 /* Return address for Ith PLT stub in section PLT, for relocation REL
16134 or (bfd_vma) -1 if it should not be included. */
16137 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
16138 const arelent *rel ATTRIBUTE_UNUSED)
16140 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
16145 #define elf32_bed elf32_arm_symbian_bed
16147 /* The dynamic sections are not allocated on SymbianOS; the postlinker
16148 will process them and then discard them. */
16149 #undef ELF_DYNAMIC_SEC_FLAGS
16150 #define ELF_DYNAMIC_SEC_FLAGS \
16151 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
16153 #undef elf_backend_emit_relocs
16155 #undef bfd_elf32_bfd_link_hash_table_create
16156 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
16157 #undef elf_backend_special_sections
16158 #define elf_backend_special_sections elf32_arm_symbian_special_sections
16159 #undef elf_backend_begin_write_processing
16160 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
16161 #undef elf_backend_final_write_processing
16162 #define elf_backend_final_write_processing elf32_arm_final_write_processing
16164 #undef elf_backend_modify_segment_map
16165 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
16167 /* There is no .got section for BPABI objects, and hence no header. */
16168 #undef elf_backend_got_header_size
16169 #define elf_backend_got_header_size 0
16171 /* Similarly, there is no .got.plt section. */
16172 #undef elf_backend_want_got_plt
16173 #define elf_backend_want_got_plt 0
16175 #undef elf_backend_plt_sym_val
16176 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
16178 #undef elf_backend_may_use_rel_p
16179 #define elf_backend_may_use_rel_p 1
16180 #undef elf_backend_may_use_rela_p
16181 #define elf_backend_may_use_rela_p 0
16182 #undef elf_backend_default_use_rela_p
16183 #define elf_backend_default_use_rela_p 0
16184 #undef elf_backend_want_plt_sym
16185 #define elf_backend_want_plt_sym 0
16186 #undef ELF_MAXPAGESIZE
16187 #define ELF_MAXPAGESIZE 0x8000
16189 #include "elf32-target.h"