1 /* 32-bit ELF support for ARM
2 Copyright 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
3 2008, 2009, 2010 Free Software Foundation, Inc.
5 This file is part of BFD, the Binary File Descriptor library.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
26 #include "libiberty.h"
29 #include "elf-vxworks.h"
32 /* Return the relocation section associated with NAME. HTAB is the
33 bfd's elf32_arm_link_hash_entry. */
34 #define RELOC_SECTION(HTAB, NAME) \
35 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37 /* Return size of a relocation entry. HTAB is the bfd's
38 elf32_arm_link_hash_entry. */
39 #define RELOC_SIZE(HTAB) \
41 ? sizeof (Elf32_External_Rel) \
42 : sizeof (Elf32_External_Rela))
44 /* Return function to swap relocations in. HTAB is the bfd's
45 elf32_arm_link_hash_entry. */
46 #define SWAP_RELOC_IN(HTAB) \
48 ? bfd_elf32_swap_reloc_in \
49 : bfd_elf32_swap_reloca_in)
51 /* Return function to swap relocations out. HTAB is the bfd's
52 elf32_arm_link_hash_entry. */
53 #define SWAP_RELOC_OUT(HTAB) \
55 ? bfd_elf32_swap_reloc_out \
56 : bfd_elf32_swap_reloca_out)
58 #define elf_info_to_howto 0
59 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61 #define ARM_ELF_ABI_VERSION 0
62 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
65 struct bfd_link_info *link_info,
69 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
70 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
73 static reloc_howto_type elf32_arm_howto_table_1[] =
76 HOWTO (R_ARM_NONE, /* type */
78 0, /* size (0 = byte, 1 = short, 2 = long) */
80 FALSE, /* pc_relative */
82 complain_overflow_dont,/* complain_on_overflow */
83 bfd_elf_generic_reloc, /* special_function */
84 "R_ARM_NONE", /* name */
85 FALSE, /* partial_inplace */
88 FALSE), /* pcrel_offset */
90 HOWTO (R_ARM_PC24, /* type */
92 2, /* size (0 = byte, 1 = short, 2 = long) */
94 TRUE, /* pc_relative */
96 complain_overflow_signed,/* complain_on_overflow */
97 bfd_elf_generic_reloc, /* special_function */
98 "R_ARM_PC24", /* name */
99 FALSE, /* partial_inplace */
100 0x00ffffff, /* src_mask */
101 0x00ffffff, /* dst_mask */
102 TRUE), /* pcrel_offset */
104 /* 32 bit absolute */
105 HOWTO (R_ARM_ABS32, /* type */
107 2, /* size (0 = byte, 1 = short, 2 = long) */
109 FALSE, /* pc_relative */
111 complain_overflow_bitfield,/* complain_on_overflow */
112 bfd_elf_generic_reloc, /* special_function */
113 "R_ARM_ABS32", /* name */
114 FALSE, /* partial_inplace */
115 0xffffffff, /* src_mask */
116 0xffffffff, /* dst_mask */
117 FALSE), /* pcrel_offset */
119 /* standard 32bit pc-relative reloc */
120 HOWTO (R_ARM_REL32, /* type */
122 2, /* size (0 = byte, 1 = short, 2 = long) */
124 TRUE, /* pc_relative */
126 complain_overflow_bitfield,/* complain_on_overflow */
127 bfd_elf_generic_reloc, /* special_function */
128 "R_ARM_REL32", /* name */
129 FALSE, /* partial_inplace */
130 0xffffffff, /* src_mask */
131 0xffffffff, /* dst_mask */
132 TRUE), /* pcrel_offset */
134 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
135 HOWTO (R_ARM_LDR_PC_G0, /* type */
137 0, /* size (0 = byte, 1 = short, 2 = long) */
139 TRUE, /* pc_relative */
141 complain_overflow_dont,/* complain_on_overflow */
142 bfd_elf_generic_reloc, /* special_function */
143 "R_ARM_LDR_PC_G0", /* name */
144 FALSE, /* partial_inplace */
145 0xffffffff, /* src_mask */
146 0xffffffff, /* dst_mask */
147 TRUE), /* pcrel_offset */
149 /* 16 bit absolute */
150 HOWTO (R_ARM_ABS16, /* type */
152 1, /* size (0 = byte, 1 = short, 2 = long) */
154 FALSE, /* pc_relative */
156 complain_overflow_bitfield,/* complain_on_overflow */
157 bfd_elf_generic_reloc, /* special_function */
158 "R_ARM_ABS16", /* name */
159 FALSE, /* partial_inplace */
160 0x0000ffff, /* src_mask */
161 0x0000ffff, /* dst_mask */
162 FALSE), /* pcrel_offset */
164 /* 12 bit absolute */
165 HOWTO (R_ARM_ABS12, /* type */
167 2, /* size (0 = byte, 1 = short, 2 = long) */
169 FALSE, /* pc_relative */
171 complain_overflow_bitfield,/* complain_on_overflow */
172 bfd_elf_generic_reloc, /* special_function */
173 "R_ARM_ABS12", /* name */
174 FALSE, /* partial_inplace */
175 0x00000fff, /* src_mask */
176 0x00000fff, /* dst_mask */
177 FALSE), /* pcrel_offset */
179 HOWTO (R_ARM_THM_ABS5, /* type */
181 1, /* size (0 = byte, 1 = short, 2 = long) */
183 FALSE, /* pc_relative */
185 complain_overflow_bitfield,/* complain_on_overflow */
186 bfd_elf_generic_reloc, /* special_function */
187 "R_ARM_THM_ABS5", /* name */
188 FALSE, /* partial_inplace */
189 0x000007e0, /* src_mask */
190 0x000007e0, /* dst_mask */
191 FALSE), /* pcrel_offset */
194 HOWTO (R_ARM_ABS8, /* type */
196 0, /* size (0 = byte, 1 = short, 2 = long) */
198 FALSE, /* pc_relative */
200 complain_overflow_bitfield,/* complain_on_overflow */
201 bfd_elf_generic_reloc, /* special_function */
202 "R_ARM_ABS8", /* name */
203 FALSE, /* partial_inplace */
204 0x000000ff, /* src_mask */
205 0x000000ff, /* dst_mask */
206 FALSE), /* pcrel_offset */
208 HOWTO (R_ARM_SBREL32, /* type */
210 2, /* size (0 = byte, 1 = short, 2 = long) */
212 FALSE, /* pc_relative */
214 complain_overflow_dont,/* complain_on_overflow */
215 bfd_elf_generic_reloc, /* special_function */
216 "R_ARM_SBREL32", /* name */
217 FALSE, /* partial_inplace */
218 0xffffffff, /* src_mask */
219 0xffffffff, /* dst_mask */
220 FALSE), /* pcrel_offset */
222 HOWTO (R_ARM_THM_CALL, /* type */
224 2, /* size (0 = byte, 1 = short, 2 = long) */
226 TRUE, /* pc_relative */
228 complain_overflow_signed,/* complain_on_overflow */
229 bfd_elf_generic_reloc, /* special_function */
230 "R_ARM_THM_CALL", /* name */
231 FALSE, /* partial_inplace */
232 0x07ff07ff, /* src_mask */
233 0x07ff07ff, /* dst_mask */
234 TRUE), /* pcrel_offset */
236 HOWTO (R_ARM_THM_PC8, /* type */
238 1, /* size (0 = byte, 1 = short, 2 = long) */
240 TRUE, /* pc_relative */
242 complain_overflow_signed,/* complain_on_overflow */
243 bfd_elf_generic_reloc, /* special_function */
244 "R_ARM_THM_PC8", /* name */
245 FALSE, /* partial_inplace */
246 0x000000ff, /* src_mask */
247 0x000000ff, /* dst_mask */
248 TRUE), /* pcrel_offset */
250 HOWTO (R_ARM_BREL_ADJ, /* type */
252 1, /* size (0 = byte, 1 = short, 2 = long) */
254 FALSE, /* pc_relative */
256 complain_overflow_signed,/* complain_on_overflow */
257 bfd_elf_generic_reloc, /* special_function */
258 "R_ARM_BREL_ADJ", /* name */
259 FALSE, /* partial_inplace */
260 0xffffffff, /* src_mask */
261 0xffffffff, /* dst_mask */
262 FALSE), /* pcrel_offset */
264 HOWTO (R_ARM_SWI24, /* type */
266 0, /* size (0 = byte, 1 = short, 2 = long) */
268 FALSE, /* pc_relative */
270 complain_overflow_signed,/* complain_on_overflow */
271 bfd_elf_generic_reloc, /* special_function */
272 "R_ARM_SWI24", /* name */
273 FALSE, /* partial_inplace */
274 0x00000000, /* src_mask */
275 0x00000000, /* dst_mask */
276 FALSE), /* pcrel_offset */
278 HOWTO (R_ARM_THM_SWI8, /* type */
280 0, /* size (0 = byte, 1 = short, 2 = long) */
282 FALSE, /* pc_relative */
284 complain_overflow_signed,/* complain_on_overflow */
285 bfd_elf_generic_reloc, /* special_function */
286 "R_ARM_SWI8", /* name */
287 FALSE, /* partial_inplace */
288 0x00000000, /* src_mask */
289 0x00000000, /* dst_mask */
290 FALSE), /* pcrel_offset */
292 /* BLX instruction for the ARM. */
293 HOWTO (R_ARM_XPC25, /* type */
295 2, /* size (0 = byte, 1 = short, 2 = long) */
297 TRUE, /* pc_relative */
299 complain_overflow_signed,/* complain_on_overflow */
300 bfd_elf_generic_reloc, /* special_function */
301 "R_ARM_XPC25", /* name */
302 FALSE, /* partial_inplace */
303 0x00ffffff, /* src_mask */
304 0x00ffffff, /* dst_mask */
305 TRUE), /* pcrel_offset */
307 /* BLX instruction for the Thumb. */
308 HOWTO (R_ARM_THM_XPC22, /* type */
310 2, /* size (0 = byte, 1 = short, 2 = long) */
312 TRUE, /* pc_relative */
314 complain_overflow_signed,/* complain_on_overflow */
315 bfd_elf_generic_reloc, /* special_function */
316 "R_ARM_THM_XPC22", /* name */
317 FALSE, /* partial_inplace */
318 0x07ff07ff, /* src_mask */
319 0x07ff07ff, /* dst_mask */
320 TRUE), /* pcrel_offset */
322 /* Dynamic TLS relocations. */
324 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
326 2, /* size (0 = byte, 1 = short, 2 = long) */
328 FALSE, /* pc_relative */
330 complain_overflow_bitfield,/* complain_on_overflow */
331 bfd_elf_generic_reloc, /* special_function */
332 "R_ARM_TLS_DTPMOD32", /* name */
333 TRUE, /* partial_inplace */
334 0xffffffff, /* src_mask */
335 0xffffffff, /* dst_mask */
336 FALSE), /* pcrel_offset */
338 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
340 2, /* size (0 = byte, 1 = short, 2 = long) */
342 FALSE, /* pc_relative */
344 complain_overflow_bitfield,/* complain_on_overflow */
345 bfd_elf_generic_reloc, /* special_function */
346 "R_ARM_TLS_DTPOFF32", /* name */
347 TRUE, /* partial_inplace */
348 0xffffffff, /* src_mask */
349 0xffffffff, /* dst_mask */
350 FALSE), /* pcrel_offset */
352 HOWTO (R_ARM_TLS_TPOFF32, /* type */
354 2, /* size (0 = byte, 1 = short, 2 = long) */
356 FALSE, /* pc_relative */
358 complain_overflow_bitfield,/* complain_on_overflow */
359 bfd_elf_generic_reloc, /* special_function */
360 "R_ARM_TLS_TPOFF32", /* name */
361 TRUE, /* partial_inplace */
362 0xffffffff, /* src_mask */
363 0xffffffff, /* dst_mask */
364 FALSE), /* pcrel_offset */
366 /* Relocs used in ARM Linux */
368 HOWTO (R_ARM_COPY, /* type */
370 2, /* size (0 = byte, 1 = short, 2 = long) */
372 FALSE, /* pc_relative */
374 complain_overflow_bitfield,/* complain_on_overflow */
375 bfd_elf_generic_reloc, /* special_function */
376 "R_ARM_COPY", /* name */
377 TRUE, /* partial_inplace */
378 0xffffffff, /* src_mask */
379 0xffffffff, /* dst_mask */
380 FALSE), /* pcrel_offset */
382 HOWTO (R_ARM_GLOB_DAT, /* type */
384 2, /* size (0 = byte, 1 = short, 2 = long) */
386 FALSE, /* pc_relative */
388 complain_overflow_bitfield,/* complain_on_overflow */
389 bfd_elf_generic_reloc, /* special_function */
390 "R_ARM_GLOB_DAT", /* name */
391 TRUE, /* partial_inplace */
392 0xffffffff, /* src_mask */
393 0xffffffff, /* dst_mask */
394 FALSE), /* pcrel_offset */
396 HOWTO (R_ARM_JUMP_SLOT, /* type */
398 2, /* size (0 = byte, 1 = short, 2 = long) */
400 FALSE, /* pc_relative */
402 complain_overflow_bitfield,/* complain_on_overflow */
403 bfd_elf_generic_reloc, /* special_function */
404 "R_ARM_JUMP_SLOT", /* name */
405 TRUE, /* partial_inplace */
406 0xffffffff, /* src_mask */
407 0xffffffff, /* dst_mask */
408 FALSE), /* pcrel_offset */
410 HOWTO (R_ARM_RELATIVE, /* type */
412 2, /* size (0 = byte, 1 = short, 2 = long) */
414 FALSE, /* pc_relative */
416 complain_overflow_bitfield,/* complain_on_overflow */
417 bfd_elf_generic_reloc, /* special_function */
418 "R_ARM_RELATIVE", /* name */
419 TRUE, /* partial_inplace */
420 0xffffffff, /* src_mask */
421 0xffffffff, /* dst_mask */
422 FALSE), /* pcrel_offset */
424 HOWTO (R_ARM_GOTOFF32, /* type */
426 2, /* size (0 = byte, 1 = short, 2 = long) */
428 FALSE, /* pc_relative */
430 complain_overflow_bitfield,/* complain_on_overflow */
431 bfd_elf_generic_reloc, /* special_function */
432 "R_ARM_GOTOFF32", /* name */
433 TRUE, /* partial_inplace */
434 0xffffffff, /* src_mask */
435 0xffffffff, /* dst_mask */
436 FALSE), /* pcrel_offset */
438 HOWTO (R_ARM_GOTPC, /* type */
440 2, /* size (0 = byte, 1 = short, 2 = long) */
442 TRUE, /* pc_relative */
444 complain_overflow_bitfield,/* complain_on_overflow */
445 bfd_elf_generic_reloc, /* special_function */
446 "R_ARM_GOTPC", /* name */
447 TRUE, /* partial_inplace */
448 0xffffffff, /* src_mask */
449 0xffffffff, /* dst_mask */
450 TRUE), /* pcrel_offset */
452 HOWTO (R_ARM_GOT32, /* type */
454 2, /* size (0 = byte, 1 = short, 2 = long) */
456 FALSE, /* pc_relative */
458 complain_overflow_bitfield,/* complain_on_overflow */
459 bfd_elf_generic_reloc, /* special_function */
460 "R_ARM_GOT32", /* name */
461 TRUE, /* partial_inplace */
462 0xffffffff, /* src_mask */
463 0xffffffff, /* dst_mask */
464 FALSE), /* pcrel_offset */
466 HOWTO (R_ARM_PLT32, /* type */
468 2, /* size (0 = byte, 1 = short, 2 = long) */
470 TRUE, /* pc_relative */
472 complain_overflow_bitfield,/* complain_on_overflow */
473 bfd_elf_generic_reloc, /* special_function */
474 "R_ARM_PLT32", /* name */
475 FALSE, /* partial_inplace */
476 0x00ffffff, /* src_mask */
477 0x00ffffff, /* dst_mask */
478 TRUE), /* pcrel_offset */
480 HOWTO (R_ARM_CALL, /* type */
482 2, /* size (0 = byte, 1 = short, 2 = long) */
484 TRUE, /* pc_relative */
486 complain_overflow_signed,/* complain_on_overflow */
487 bfd_elf_generic_reloc, /* special_function */
488 "R_ARM_CALL", /* name */
489 FALSE, /* partial_inplace */
490 0x00ffffff, /* src_mask */
491 0x00ffffff, /* dst_mask */
492 TRUE), /* pcrel_offset */
494 HOWTO (R_ARM_JUMP24, /* type */
496 2, /* size (0 = byte, 1 = short, 2 = long) */
498 TRUE, /* pc_relative */
500 complain_overflow_signed,/* complain_on_overflow */
501 bfd_elf_generic_reloc, /* special_function */
502 "R_ARM_JUMP24", /* name */
503 FALSE, /* partial_inplace */
504 0x00ffffff, /* src_mask */
505 0x00ffffff, /* dst_mask */
506 TRUE), /* pcrel_offset */
508 HOWTO (R_ARM_THM_JUMP24, /* type */
510 2, /* size (0 = byte, 1 = short, 2 = long) */
512 TRUE, /* pc_relative */
514 complain_overflow_signed,/* complain_on_overflow */
515 bfd_elf_generic_reloc, /* special_function */
516 "R_ARM_THM_JUMP24", /* name */
517 FALSE, /* partial_inplace */
518 0x07ff2fff, /* src_mask */
519 0x07ff2fff, /* dst_mask */
520 TRUE), /* pcrel_offset */
522 HOWTO (R_ARM_BASE_ABS, /* type */
524 2, /* size (0 = byte, 1 = short, 2 = long) */
526 FALSE, /* pc_relative */
528 complain_overflow_dont,/* complain_on_overflow */
529 bfd_elf_generic_reloc, /* special_function */
530 "R_ARM_BASE_ABS", /* name */
531 FALSE, /* partial_inplace */
532 0xffffffff, /* src_mask */
533 0xffffffff, /* dst_mask */
534 FALSE), /* pcrel_offset */
536 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
538 2, /* size (0 = byte, 1 = short, 2 = long) */
540 TRUE, /* pc_relative */
542 complain_overflow_dont,/* complain_on_overflow */
543 bfd_elf_generic_reloc, /* special_function */
544 "R_ARM_ALU_PCREL_7_0", /* name */
545 FALSE, /* partial_inplace */
546 0x00000fff, /* src_mask */
547 0x00000fff, /* dst_mask */
548 TRUE), /* pcrel_offset */
550 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
552 2, /* size (0 = byte, 1 = short, 2 = long) */
554 TRUE, /* pc_relative */
556 complain_overflow_dont,/* complain_on_overflow */
557 bfd_elf_generic_reloc, /* special_function */
558 "R_ARM_ALU_PCREL_15_8",/* name */
559 FALSE, /* partial_inplace */
560 0x00000fff, /* src_mask */
561 0x00000fff, /* dst_mask */
562 TRUE), /* pcrel_offset */
564 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
566 2, /* size (0 = byte, 1 = short, 2 = long) */
568 TRUE, /* pc_relative */
570 complain_overflow_dont,/* complain_on_overflow */
571 bfd_elf_generic_reloc, /* special_function */
572 "R_ARM_ALU_PCREL_23_15",/* name */
573 FALSE, /* partial_inplace */
574 0x00000fff, /* src_mask */
575 0x00000fff, /* dst_mask */
576 TRUE), /* pcrel_offset */
578 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
580 2, /* size (0 = byte, 1 = short, 2 = long) */
582 FALSE, /* pc_relative */
584 complain_overflow_dont,/* complain_on_overflow */
585 bfd_elf_generic_reloc, /* special_function */
586 "R_ARM_LDR_SBREL_11_0",/* name */
587 FALSE, /* partial_inplace */
588 0x00000fff, /* src_mask */
589 0x00000fff, /* dst_mask */
590 FALSE), /* pcrel_offset */
592 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
594 2, /* size (0 = byte, 1 = short, 2 = long) */
596 FALSE, /* pc_relative */
598 complain_overflow_dont,/* complain_on_overflow */
599 bfd_elf_generic_reloc, /* special_function */
600 "R_ARM_ALU_SBREL_19_12",/* name */
601 FALSE, /* partial_inplace */
602 0x000ff000, /* src_mask */
603 0x000ff000, /* dst_mask */
604 FALSE), /* pcrel_offset */
606 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
608 2, /* size (0 = byte, 1 = short, 2 = long) */
610 FALSE, /* pc_relative */
612 complain_overflow_dont,/* complain_on_overflow */
613 bfd_elf_generic_reloc, /* special_function */
614 "R_ARM_ALU_SBREL_27_20",/* name */
615 FALSE, /* partial_inplace */
616 0x0ff00000, /* src_mask */
617 0x0ff00000, /* dst_mask */
618 FALSE), /* pcrel_offset */
620 HOWTO (R_ARM_TARGET1, /* type */
622 2, /* size (0 = byte, 1 = short, 2 = long) */
624 FALSE, /* pc_relative */
626 complain_overflow_dont,/* complain_on_overflow */
627 bfd_elf_generic_reloc, /* special_function */
628 "R_ARM_TARGET1", /* name */
629 FALSE, /* partial_inplace */
630 0xffffffff, /* src_mask */
631 0xffffffff, /* dst_mask */
632 FALSE), /* pcrel_offset */
634 HOWTO (R_ARM_ROSEGREL32, /* type */
636 2, /* size (0 = byte, 1 = short, 2 = long) */
638 FALSE, /* pc_relative */
640 complain_overflow_dont,/* complain_on_overflow */
641 bfd_elf_generic_reloc, /* special_function */
642 "R_ARM_ROSEGREL32", /* name */
643 FALSE, /* partial_inplace */
644 0xffffffff, /* src_mask */
645 0xffffffff, /* dst_mask */
646 FALSE), /* pcrel_offset */
648 HOWTO (R_ARM_V4BX, /* type */
650 2, /* size (0 = byte, 1 = short, 2 = long) */
652 FALSE, /* pc_relative */
654 complain_overflow_dont,/* complain_on_overflow */
655 bfd_elf_generic_reloc, /* special_function */
656 "R_ARM_V4BX", /* name */
657 FALSE, /* partial_inplace */
658 0xffffffff, /* src_mask */
659 0xffffffff, /* dst_mask */
660 FALSE), /* pcrel_offset */
662 HOWTO (R_ARM_TARGET2, /* type */
664 2, /* size (0 = byte, 1 = short, 2 = long) */
666 FALSE, /* pc_relative */
668 complain_overflow_signed,/* complain_on_overflow */
669 bfd_elf_generic_reloc, /* special_function */
670 "R_ARM_TARGET2", /* name */
671 FALSE, /* partial_inplace */
672 0xffffffff, /* src_mask */
673 0xffffffff, /* dst_mask */
674 TRUE), /* pcrel_offset */
676 HOWTO (R_ARM_PREL31, /* type */
678 2, /* size (0 = byte, 1 = short, 2 = long) */
680 TRUE, /* pc_relative */
682 complain_overflow_signed,/* complain_on_overflow */
683 bfd_elf_generic_reloc, /* special_function */
684 "R_ARM_PREL31", /* name */
685 FALSE, /* partial_inplace */
686 0x7fffffff, /* src_mask */
687 0x7fffffff, /* dst_mask */
688 TRUE), /* pcrel_offset */
690 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
692 2, /* size (0 = byte, 1 = short, 2 = long) */
694 FALSE, /* pc_relative */
696 complain_overflow_dont,/* complain_on_overflow */
697 bfd_elf_generic_reloc, /* special_function */
698 "R_ARM_MOVW_ABS_NC", /* name */
699 FALSE, /* partial_inplace */
700 0x000f0fff, /* src_mask */
701 0x000f0fff, /* dst_mask */
702 FALSE), /* pcrel_offset */
704 HOWTO (R_ARM_MOVT_ABS, /* type */
706 2, /* size (0 = byte, 1 = short, 2 = long) */
708 FALSE, /* pc_relative */
710 complain_overflow_bitfield,/* complain_on_overflow */
711 bfd_elf_generic_reloc, /* special_function */
712 "R_ARM_MOVT_ABS", /* name */
713 FALSE, /* partial_inplace */
714 0x000f0fff, /* src_mask */
715 0x000f0fff, /* dst_mask */
716 FALSE), /* pcrel_offset */
718 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
720 2, /* size (0 = byte, 1 = short, 2 = long) */
722 TRUE, /* pc_relative */
724 complain_overflow_dont,/* complain_on_overflow */
725 bfd_elf_generic_reloc, /* special_function */
726 "R_ARM_MOVW_PREL_NC", /* name */
727 FALSE, /* partial_inplace */
728 0x000f0fff, /* src_mask */
729 0x000f0fff, /* dst_mask */
730 TRUE), /* pcrel_offset */
732 HOWTO (R_ARM_MOVT_PREL, /* type */
734 2, /* size (0 = byte, 1 = short, 2 = long) */
736 TRUE, /* pc_relative */
738 complain_overflow_bitfield,/* complain_on_overflow */
739 bfd_elf_generic_reloc, /* special_function */
740 "R_ARM_MOVT_PREL", /* name */
741 FALSE, /* partial_inplace */
742 0x000f0fff, /* src_mask */
743 0x000f0fff, /* dst_mask */
744 TRUE), /* pcrel_offset */
746 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
748 2, /* size (0 = byte, 1 = short, 2 = long) */
750 FALSE, /* pc_relative */
752 complain_overflow_dont,/* complain_on_overflow */
753 bfd_elf_generic_reloc, /* special_function */
754 "R_ARM_THM_MOVW_ABS_NC",/* name */
755 FALSE, /* partial_inplace */
756 0x040f70ff, /* src_mask */
757 0x040f70ff, /* dst_mask */
758 FALSE), /* pcrel_offset */
760 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
762 2, /* size (0 = byte, 1 = short, 2 = long) */
764 FALSE, /* pc_relative */
766 complain_overflow_bitfield,/* complain_on_overflow */
767 bfd_elf_generic_reloc, /* special_function */
768 "R_ARM_THM_MOVT_ABS", /* name */
769 FALSE, /* partial_inplace */
770 0x040f70ff, /* src_mask */
771 0x040f70ff, /* dst_mask */
772 FALSE), /* pcrel_offset */
774 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
776 2, /* size (0 = byte, 1 = short, 2 = long) */
778 TRUE, /* pc_relative */
780 complain_overflow_dont,/* complain_on_overflow */
781 bfd_elf_generic_reloc, /* special_function */
782 "R_ARM_THM_MOVW_PREL_NC",/* name */
783 FALSE, /* partial_inplace */
784 0x040f70ff, /* src_mask */
785 0x040f70ff, /* dst_mask */
786 TRUE), /* pcrel_offset */
788 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
790 2, /* size (0 = byte, 1 = short, 2 = long) */
792 TRUE, /* pc_relative */
794 complain_overflow_bitfield,/* complain_on_overflow */
795 bfd_elf_generic_reloc, /* special_function */
796 "R_ARM_THM_MOVT_PREL", /* name */
797 FALSE, /* partial_inplace */
798 0x040f70ff, /* src_mask */
799 0x040f70ff, /* dst_mask */
800 TRUE), /* pcrel_offset */
802 HOWTO (R_ARM_THM_JUMP19, /* type */
804 2, /* size (0 = byte, 1 = short, 2 = long) */
806 TRUE, /* pc_relative */
808 complain_overflow_signed,/* complain_on_overflow */
809 bfd_elf_generic_reloc, /* special_function */
810 "R_ARM_THM_JUMP19", /* name */
811 FALSE, /* partial_inplace */
812 0x043f2fff, /* src_mask */
813 0x043f2fff, /* dst_mask */
814 TRUE), /* pcrel_offset */
816 HOWTO (R_ARM_THM_JUMP6, /* type */
818 1, /* size (0 = byte, 1 = short, 2 = long) */
820 TRUE, /* pc_relative */
822 complain_overflow_unsigned,/* complain_on_overflow */
823 bfd_elf_generic_reloc, /* special_function */
824 "R_ARM_THM_JUMP6", /* name */
825 FALSE, /* partial_inplace */
826 0x02f8, /* src_mask */
827 0x02f8, /* dst_mask */
828 TRUE), /* pcrel_offset */
830 /* These are declared as 13-bit signed relocations because we can
831 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
833 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
835 2, /* size (0 = byte, 1 = short, 2 = long) */
837 TRUE, /* pc_relative */
839 complain_overflow_dont,/* complain_on_overflow */
840 bfd_elf_generic_reloc, /* special_function */
841 "R_ARM_THM_ALU_PREL_11_0",/* name */
842 FALSE, /* partial_inplace */
843 0xffffffff, /* src_mask */
844 0xffffffff, /* dst_mask */
845 TRUE), /* pcrel_offset */
847 HOWTO (R_ARM_THM_PC12, /* type */
849 2, /* size (0 = byte, 1 = short, 2 = long) */
851 TRUE, /* pc_relative */
853 complain_overflow_dont,/* complain_on_overflow */
854 bfd_elf_generic_reloc, /* special_function */
855 "R_ARM_THM_PC12", /* name */
856 FALSE, /* partial_inplace */
857 0xffffffff, /* src_mask */
858 0xffffffff, /* dst_mask */
859 TRUE), /* pcrel_offset */
861 HOWTO (R_ARM_ABS32_NOI, /* type */
863 2, /* size (0 = byte, 1 = short, 2 = long) */
865 FALSE, /* pc_relative */
867 complain_overflow_dont,/* complain_on_overflow */
868 bfd_elf_generic_reloc, /* special_function */
869 "R_ARM_ABS32_NOI", /* name */
870 FALSE, /* partial_inplace */
871 0xffffffff, /* src_mask */
872 0xffffffff, /* dst_mask */
873 FALSE), /* pcrel_offset */
875 HOWTO (R_ARM_REL32_NOI, /* type */
877 2, /* size (0 = byte, 1 = short, 2 = long) */
879 TRUE, /* pc_relative */
881 complain_overflow_dont,/* complain_on_overflow */
882 bfd_elf_generic_reloc, /* special_function */
883 "R_ARM_REL32_NOI", /* name */
884 FALSE, /* partial_inplace */
885 0xffffffff, /* src_mask */
886 0xffffffff, /* dst_mask */
887 FALSE), /* pcrel_offset */
889 /* Group relocations. */
891 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
893 2, /* size (0 = byte, 1 = short, 2 = long) */
895 TRUE, /* pc_relative */
897 complain_overflow_dont,/* complain_on_overflow */
898 bfd_elf_generic_reloc, /* special_function */
899 "R_ARM_ALU_PC_G0_NC", /* name */
900 FALSE, /* partial_inplace */
901 0xffffffff, /* src_mask */
902 0xffffffff, /* dst_mask */
903 TRUE), /* pcrel_offset */
905 HOWTO (R_ARM_ALU_PC_G0, /* type */
907 2, /* size (0 = byte, 1 = short, 2 = long) */
909 TRUE, /* pc_relative */
911 complain_overflow_dont,/* complain_on_overflow */
912 bfd_elf_generic_reloc, /* special_function */
913 "R_ARM_ALU_PC_G0", /* name */
914 FALSE, /* partial_inplace */
915 0xffffffff, /* src_mask */
916 0xffffffff, /* dst_mask */
917 TRUE), /* pcrel_offset */
919 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
921 2, /* size (0 = byte, 1 = short, 2 = long) */
923 TRUE, /* pc_relative */
925 complain_overflow_dont,/* complain_on_overflow */
926 bfd_elf_generic_reloc, /* special_function */
927 "R_ARM_ALU_PC_G1_NC", /* name */
928 FALSE, /* partial_inplace */
929 0xffffffff, /* src_mask */
930 0xffffffff, /* dst_mask */
931 TRUE), /* pcrel_offset */
933 HOWTO (R_ARM_ALU_PC_G1, /* type */
935 2, /* size (0 = byte, 1 = short, 2 = long) */
937 TRUE, /* pc_relative */
939 complain_overflow_dont,/* complain_on_overflow */
940 bfd_elf_generic_reloc, /* special_function */
941 "R_ARM_ALU_PC_G1", /* name */
942 FALSE, /* partial_inplace */
943 0xffffffff, /* src_mask */
944 0xffffffff, /* dst_mask */
945 TRUE), /* pcrel_offset */
947 HOWTO (R_ARM_ALU_PC_G2, /* type */
949 2, /* size (0 = byte, 1 = short, 2 = long) */
951 TRUE, /* pc_relative */
953 complain_overflow_dont,/* complain_on_overflow */
954 bfd_elf_generic_reloc, /* special_function */
955 "R_ARM_ALU_PC_G2", /* name */
956 FALSE, /* partial_inplace */
957 0xffffffff, /* src_mask */
958 0xffffffff, /* dst_mask */
959 TRUE), /* pcrel_offset */
961 HOWTO (R_ARM_LDR_PC_G1, /* type */
963 2, /* size (0 = byte, 1 = short, 2 = long) */
965 TRUE, /* pc_relative */
967 complain_overflow_dont,/* complain_on_overflow */
968 bfd_elf_generic_reloc, /* special_function */
969 "R_ARM_LDR_PC_G1", /* name */
970 FALSE, /* partial_inplace */
971 0xffffffff, /* src_mask */
972 0xffffffff, /* dst_mask */
973 TRUE), /* pcrel_offset */
975 HOWTO (R_ARM_LDR_PC_G2, /* type */
977 2, /* size (0 = byte, 1 = short, 2 = long) */
979 TRUE, /* pc_relative */
981 complain_overflow_dont,/* complain_on_overflow */
982 bfd_elf_generic_reloc, /* special_function */
983 "R_ARM_LDR_PC_G2", /* name */
984 FALSE, /* partial_inplace */
985 0xffffffff, /* src_mask */
986 0xffffffff, /* dst_mask */
987 TRUE), /* pcrel_offset */
989 HOWTO (R_ARM_LDRS_PC_G0, /* type */
991 2, /* size (0 = byte, 1 = short, 2 = long) */
993 TRUE, /* pc_relative */
995 complain_overflow_dont,/* complain_on_overflow */
996 bfd_elf_generic_reloc, /* special_function */
997 "R_ARM_LDRS_PC_G0", /* name */
998 FALSE, /* partial_inplace */
999 0xffffffff, /* src_mask */
1000 0xffffffff, /* dst_mask */
1001 TRUE), /* pcrel_offset */
1003 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1005 2, /* size (0 = byte, 1 = short, 2 = long) */
1007 TRUE, /* pc_relative */
1009 complain_overflow_dont,/* complain_on_overflow */
1010 bfd_elf_generic_reloc, /* special_function */
1011 "R_ARM_LDRS_PC_G1", /* name */
1012 FALSE, /* partial_inplace */
1013 0xffffffff, /* src_mask */
1014 0xffffffff, /* dst_mask */
1015 TRUE), /* pcrel_offset */
1017 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1019 2, /* size (0 = byte, 1 = short, 2 = long) */
1021 TRUE, /* pc_relative */
1023 complain_overflow_dont,/* complain_on_overflow */
1024 bfd_elf_generic_reloc, /* special_function */
1025 "R_ARM_LDRS_PC_G2", /* name */
1026 FALSE, /* partial_inplace */
1027 0xffffffff, /* src_mask */
1028 0xffffffff, /* dst_mask */
1029 TRUE), /* pcrel_offset */
1031 HOWTO (R_ARM_LDC_PC_G0, /* type */
1033 2, /* size (0 = byte, 1 = short, 2 = long) */
1035 TRUE, /* pc_relative */
1037 complain_overflow_dont,/* complain_on_overflow */
1038 bfd_elf_generic_reloc, /* special_function */
1039 "R_ARM_LDC_PC_G0", /* name */
1040 FALSE, /* partial_inplace */
1041 0xffffffff, /* src_mask */
1042 0xffffffff, /* dst_mask */
1043 TRUE), /* pcrel_offset */
1045 HOWTO (R_ARM_LDC_PC_G1, /* type */
1047 2, /* size (0 = byte, 1 = short, 2 = long) */
1049 TRUE, /* pc_relative */
1051 complain_overflow_dont,/* complain_on_overflow */
1052 bfd_elf_generic_reloc, /* special_function */
1053 "R_ARM_LDC_PC_G1", /* name */
1054 FALSE, /* partial_inplace */
1055 0xffffffff, /* src_mask */
1056 0xffffffff, /* dst_mask */
1057 TRUE), /* pcrel_offset */
1059 HOWTO (R_ARM_LDC_PC_G2, /* type */
1061 2, /* size (0 = byte, 1 = short, 2 = long) */
1063 TRUE, /* pc_relative */
1065 complain_overflow_dont,/* complain_on_overflow */
1066 bfd_elf_generic_reloc, /* special_function */
1067 "R_ARM_LDC_PC_G2", /* name */
1068 FALSE, /* partial_inplace */
1069 0xffffffff, /* src_mask */
1070 0xffffffff, /* dst_mask */
1071 TRUE), /* pcrel_offset */
1073 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1075 2, /* size (0 = byte, 1 = short, 2 = long) */
1077 TRUE, /* pc_relative */
1079 complain_overflow_dont,/* complain_on_overflow */
1080 bfd_elf_generic_reloc, /* special_function */
1081 "R_ARM_ALU_SB_G0_NC", /* name */
1082 FALSE, /* partial_inplace */
1083 0xffffffff, /* src_mask */
1084 0xffffffff, /* dst_mask */
1085 TRUE), /* pcrel_offset */
1087 HOWTO (R_ARM_ALU_SB_G0, /* type */
1089 2, /* size (0 = byte, 1 = short, 2 = long) */
1091 TRUE, /* pc_relative */
1093 complain_overflow_dont,/* complain_on_overflow */
1094 bfd_elf_generic_reloc, /* special_function */
1095 "R_ARM_ALU_SB_G0", /* name */
1096 FALSE, /* partial_inplace */
1097 0xffffffff, /* src_mask */
1098 0xffffffff, /* dst_mask */
1099 TRUE), /* pcrel_offset */
1101 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1103 2, /* size (0 = byte, 1 = short, 2 = long) */
1105 TRUE, /* pc_relative */
1107 complain_overflow_dont,/* complain_on_overflow */
1108 bfd_elf_generic_reloc, /* special_function */
1109 "R_ARM_ALU_SB_G1_NC", /* name */
1110 FALSE, /* partial_inplace */
1111 0xffffffff, /* src_mask */
1112 0xffffffff, /* dst_mask */
1113 TRUE), /* pcrel_offset */
1115 HOWTO (R_ARM_ALU_SB_G1, /* type */
1117 2, /* size (0 = byte, 1 = short, 2 = long) */
1119 TRUE, /* pc_relative */
1121 complain_overflow_dont,/* complain_on_overflow */
1122 bfd_elf_generic_reloc, /* special_function */
1123 "R_ARM_ALU_SB_G1", /* name */
1124 FALSE, /* partial_inplace */
1125 0xffffffff, /* src_mask */
1126 0xffffffff, /* dst_mask */
1127 TRUE), /* pcrel_offset */
1129 HOWTO (R_ARM_ALU_SB_G2, /* type */
1131 2, /* size (0 = byte, 1 = short, 2 = long) */
1133 TRUE, /* pc_relative */
1135 complain_overflow_dont,/* complain_on_overflow */
1136 bfd_elf_generic_reloc, /* special_function */
1137 "R_ARM_ALU_SB_G2", /* name */
1138 FALSE, /* partial_inplace */
1139 0xffffffff, /* src_mask */
1140 0xffffffff, /* dst_mask */
1141 TRUE), /* pcrel_offset */
1143 HOWTO (R_ARM_LDR_SB_G0, /* type */
1145 2, /* size (0 = byte, 1 = short, 2 = long) */
1147 TRUE, /* pc_relative */
1149 complain_overflow_dont,/* complain_on_overflow */
1150 bfd_elf_generic_reloc, /* special_function */
1151 "R_ARM_LDR_SB_G0", /* name */
1152 FALSE, /* partial_inplace */
1153 0xffffffff, /* src_mask */
1154 0xffffffff, /* dst_mask */
1155 TRUE), /* pcrel_offset */
1157 HOWTO (R_ARM_LDR_SB_G1, /* type */
1159 2, /* size (0 = byte, 1 = short, 2 = long) */
1161 TRUE, /* pc_relative */
1163 complain_overflow_dont,/* complain_on_overflow */
1164 bfd_elf_generic_reloc, /* special_function */
1165 "R_ARM_LDR_SB_G1", /* name */
1166 FALSE, /* partial_inplace */
1167 0xffffffff, /* src_mask */
1168 0xffffffff, /* dst_mask */
1169 TRUE), /* pcrel_offset */
1171 HOWTO (R_ARM_LDR_SB_G2, /* type */
1173 2, /* size (0 = byte, 1 = short, 2 = long) */
1175 TRUE, /* pc_relative */
1177 complain_overflow_dont,/* complain_on_overflow */
1178 bfd_elf_generic_reloc, /* special_function */
1179 "R_ARM_LDR_SB_G2", /* name */
1180 FALSE, /* partial_inplace */
1181 0xffffffff, /* src_mask */
1182 0xffffffff, /* dst_mask */
1183 TRUE), /* pcrel_offset */
1185 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1187 2, /* size (0 = byte, 1 = short, 2 = long) */
1189 TRUE, /* pc_relative */
1191 complain_overflow_dont,/* complain_on_overflow */
1192 bfd_elf_generic_reloc, /* special_function */
1193 "R_ARM_LDRS_SB_G0", /* name */
1194 FALSE, /* partial_inplace */
1195 0xffffffff, /* src_mask */
1196 0xffffffff, /* dst_mask */
1197 TRUE), /* pcrel_offset */
1199 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1201 2, /* size (0 = byte, 1 = short, 2 = long) */
1203 TRUE, /* pc_relative */
1205 complain_overflow_dont,/* complain_on_overflow */
1206 bfd_elf_generic_reloc, /* special_function */
1207 "R_ARM_LDRS_SB_G1", /* name */
1208 FALSE, /* partial_inplace */
1209 0xffffffff, /* src_mask */
1210 0xffffffff, /* dst_mask */
1211 TRUE), /* pcrel_offset */
1213 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1215 2, /* size (0 = byte, 1 = short, 2 = long) */
1217 TRUE, /* pc_relative */
1219 complain_overflow_dont,/* complain_on_overflow */
1220 bfd_elf_generic_reloc, /* special_function */
1221 "R_ARM_LDRS_SB_G2", /* name */
1222 FALSE, /* partial_inplace */
1223 0xffffffff, /* src_mask */
1224 0xffffffff, /* dst_mask */
1225 TRUE), /* pcrel_offset */
1227 HOWTO (R_ARM_LDC_SB_G0, /* type */
1229 2, /* size (0 = byte, 1 = short, 2 = long) */
1231 TRUE, /* pc_relative */
1233 complain_overflow_dont,/* complain_on_overflow */
1234 bfd_elf_generic_reloc, /* special_function */
1235 "R_ARM_LDC_SB_G0", /* name */
1236 FALSE, /* partial_inplace */
1237 0xffffffff, /* src_mask */
1238 0xffffffff, /* dst_mask */
1239 TRUE), /* pcrel_offset */
1241 HOWTO (R_ARM_LDC_SB_G1, /* type */
1243 2, /* size (0 = byte, 1 = short, 2 = long) */
1245 TRUE, /* pc_relative */
1247 complain_overflow_dont,/* complain_on_overflow */
1248 bfd_elf_generic_reloc, /* special_function */
1249 "R_ARM_LDC_SB_G1", /* name */
1250 FALSE, /* partial_inplace */
1251 0xffffffff, /* src_mask */
1252 0xffffffff, /* dst_mask */
1253 TRUE), /* pcrel_offset */
1255 HOWTO (R_ARM_LDC_SB_G2, /* type */
1257 2, /* size (0 = byte, 1 = short, 2 = long) */
1259 TRUE, /* pc_relative */
1261 complain_overflow_dont,/* complain_on_overflow */
1262 bfd_elf_generic_reloc, /* special_function */
1263 "R_ARM_LDC_SB_G2", /* name */
1264 FALSE, /* partial_inplace */
1265 0xffffffff, /* src_mask */
1266 0xffffffff, /* dst_mask */
1267 TRUE), /* pcrel_offset */
1269 /* End of group relocations. */
1271 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1273 2, /* size (0 = byte, 1 = short, 2 = long) */
1275 FALSE, /* pc_relative */
1277 complain_overflow_dont,/* complain_on_overflow */
1278 bfd_elf_generic_reloc, /* special_function */
1279 "R_ARM_MOVW_BREL_NC", /* name */
1280 FALSE, /* partial_inplace */
1281 0x0000ffff, /* src_mask */
1282 0x0000ffff, /* dst_mask */
1283 FALSE), /* pcrel_offset */
1285 HOWTO (R_ARM_MOVT_BREL, /* type */
1287 2, /* size (0 = byte, 1 = short, 2 = long) */
1289 FALSE, /* pc_relative */
1291 complain_overflow_bitfield,/* complain_on_overflow */
1292 bfd_elf_generic_reloc, /* special_function */
1293 "R_ARM_MOVT_BREL", /* name */
1294 FALSE, /* partial_inplace */
1295 0x0000ffff, /* src_mask */
1296 0x0000ffff, /* dst_mask */
1297 FALSE), /* pcrel_offset */
1299 HOWTO (R_ARM_MOVW_BREL, /* type */
1301 2, /* size (0 = byte, 1 = short, 2 = long) */
1303 FALSE, /* pc_relative */
1305 complain_overflow_dont,/* complain_on_overflow */
1306 bfd_elf_generic_reloc, /* special_function */
1307 "R_ARM_MOVW_BREL", /* name */
1308 FALSE, /* partial_inplace */
1309 0x0000ffff, /* src_mask */
1310 0x0000ffff, /* dst_mask */
1311 FALSE), /* pcrel_offset */
1313 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1315 2, /* size (0 = byte, 1 = short, 2 = long) */
1317 FALSE, /* pc_relative */
1319 complain_overflow_dont,/* complain_on_overflow */
1320 bfd_elf_generic_reloc, /* special_function */
1321 "R_ARM_THM_MOVW_BREL_NC",/* name */
1322 FALSE, /* partial_inplace */
1323 0x040f70ff, /* src_mask */
1324 0x040f70ff, /* dst_mask */
1325 FALSE), /* pcrel_offset */
1327 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1329 2, /* size (0 = byte, 1 = short, 2 = long) */
1331 FALSE, /* pc_relative */
1333 complain_overflow_bitfield,/* complain_on_overflow */
1334 bfd_elf_generic_reloc, /* special_function */
1335 "R_ARM_THM_MOVT_BREL", /* name */
1336 FALSE, /* partial_inplace */
1337 0x040f70ff, /* src_mask */
1338 0x040f70ff, /* dst_mask */
1339 FALSE), /* pcrel_offset */
1341 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1343 2, /* size (0 = byte, 1 = short, 2 = long) */
1345 FALSE, /* pc_relative */
1347 complain_overflow_dont,/* complain_on_overflow */
1348 bfd_elf_generic_reloc, /* special_function */
1349 "R_ARM_THM_MOVW_BREL", /* name */
1350 FALSE, /* partial_inplace */
1351 0x040f70ff, /* src_mask */
1352 0x040f70ff, /* dst_mask */
1353 FALSE), /* pcrel_offset */
1355 EMPTY_HOWTO (90), /* Unallocated. */
1360 HOWTO (R_ARM_PLT32_ABS, /* type */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1364 FALSE, /* pc_relative */
1366 complain_overflow_dont,/* complain_on_overflow */
1367 bfd_elf_generic_reloc, /* special_function */
1368 "R_ARM_PLT32_ABS", /* name */
1369 FALSE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1374 HOWTO (R_ARM_GOT_ABS, /* type */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1378 FALSE, /* pc_relative */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_GOT_ABS", /* name */
1383 FALSE, /* partial_inplace */
1384 0xffffffff, /* src_mask */
1385 0xffffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1388 HOWTO (R_ARM_GOT_PREL, /* type */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1392 TRUE, /* pc_relative */
1394 complain_overflow_dont, /* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_GOT_PREL", /* name */
1397 FALSE, /* partial_inplace */
1398 0xffffffff, /* src_mask */
1399 0xffffffff, /* dst_mask */
1400 TRUE), /* pcrel_offset */
1402 HOWTO (R_ARM_GOT_BREL12, /* type */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1406 FALSE, /* pc_relative */
1408 complain_overflow_bitfield,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_GOT_BREL12", /* name */
1411 FALSE, /* partial_inplace */
1412 0x00000fff, /* src_mask */
1413 0x00000fff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1416 HOWTO (R_ARM_GOTOFF12, /* type */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1420 FALSE, /* pc_relative */
1422 complain_overflow_bitfield,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_GOTOFF12", /* name */
1425 FALSE, /* partial_inplace */
1426 0x00000fff, /* src_mask */
1427 0x00000fff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1430 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1432 /* GNU extension to record C++ vtable member usage */
1433 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1435 2, /* size (0 = byte, 1 = short, 2 = long) */
1437 FALSE, /* pc_relative */
1439 complain_overflow_dont, /* complain_on_overflow */
1440 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1441 "R_ARM_GNU_VTENTRY", /* name */
1442 FALSE, /* partial_inplace */
1445 FALSE), /* pcrel_offset */
1447 /* GNU extension to record C++ vtable hierarchy */
1448 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1450 2, /* size (0 = byte, 1 = short, 2 = long) */
1452 FALSE, /* pc_relative */
1454 complain_overflow_dont, /* complain_on_overflow */
1455 NULL, /* special_function */
1456 "R_ARM_GNU_VTINHERIT", /* name */
1457 FALSE, /* partial_inplace */
1460 FALSE), /* pcrel_offset */
1462 HOWTO (R_ARM_THM_JUMP11, /* type */
1464 1, /* size (0 = byte, 1 = short, 2 = long) */
1466 TRUE, /* pc_relative */
1468 complain_overflow_signed, /* complain_on_overflow */
1469 bfd_elf_generic_reloc, /* special_function */
1470 "R_ARM_THM_JUMP11", /* name */
1471 FALSE, /* partial_inplace */
1472 0x000007ff, /* src_mask */
1473 0x000007ff, /* dst_mask */
1474 TRUE), /* pcrel_offset */
1476 HOWTO (R_ARM_THM_JUMP8, /* type */
1478 1, /* size (0 = byte, 1 = short, 2 = long) */
1480 TRUE, /* pc_relative */
1482 complain_overflow_signed, /* complain_on_overflow */
1483 bfd_elf_generic_reloc, /* special_function */
1484 "R_ARM_THM_JUMP8", /* name */
1485 FALSE, /* partial_inplace */
1486 0x000000ff, /* src_mask */
1487 0x000000ff, /* dst_mask */
1488 TRUE), /* pcrel_offset */
1490 /* TLS relocations */
1491 HOWTO (R_ARM_TLS_GD32, /* type */
1493 2, /* size (0 = byte, 1 = short, 2 = long) */
1495 FALSE, /* pc_relative */
1497 complain_overflow_bitfield,/* complain_on_overflow */
1498 NULL, /* special_function */
1499 "R_ARM_TLS_GD32", /* name */
1500 TRUE, /* partial_inplace */
1501 0xffffffff, /* src_mask */
1502 0xffffffff, /* dst_mask */
1503 FALSE), /* pcrel_offset */
1505 HOWTO (R_ARM_TLS_LDM32, /* type */
1507 2, /* size (0 = byte, 1 = short, 2 = long) */
1509 FALSE, /* pc_relative */
1511 complain_overflow_bitfield,/* complain_on_overflow */
1512 bfd_elf_generic_reloc, /* special_function */
1513 "R_ARM_TLS_LDM32", /* name */
1514 TRUE, /* partial_inplace */
1515 0xffffffff, /* src_mask */
1516 0xffffffff, /* dst_mask */
1517 FALSE), /* pcrel_offset */
1519 HOWTO (R_ARM_TLS_LDO32, /* type */
1521 2, /* size (0 = byte, 1 = short, 2 = long) */
1523 FALSE, /* pc_relative */
1525 complain_overflow_bitfield,/* complain_on_overflow */
1526 bfd_elf_generic_reloc, /* special_function */
1527 "R_ARM_TLS_LDO32", /* name */
1528 TRUE, /* partial_inplace */
1529 0xffffffff, /* src_mask */
1530 0xffffffff, /* dst_mask */
1531 FALSE), /* pcrel_offset */
1533 HOWTO (R_ARM_TLS_IE32, /* type */
1535 2, /* size (0 = byte, 1 = short, 2 = long) */
1537 FALSE, /* pc_relative */
1539 complain_overflow_bitfield,/* complain_on_overflow */
1540 NULL, /* special_function */
1541 "R_ARM_TLS_IE32", /* name */
1542 TRUE, /* partial_inplace */
1543 0xffffffff, /* src_mask */
1544 0xffffffff, /* dst_mask */
1545 FALSE), /* pcrel_offset */
1547 HOWTO (R_ARM_TLS_LE32, /* type */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1551 FALSE, /* pc_relative */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 bfd_elf_generic_reloc, /* special_function */
1555 "R_ARM_TLS_LE32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1561 HOWTO (R_ARM_TLS_LDO12, /* type */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1565 FALSE, /* pc_relative */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDO12", /* name */
1570 FALSE, /* partial_inplace */
1571 0x00000fff, /* src_mask */
1572 0x00000fff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1575 HOWTO (R_ARM_TLS_LE12, /* type */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1579 FALSE, /* pc_relative */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LE12", /* name */
1584 FALSE, /* partial_inplace */
1585 0x00000fff, /* src_mask */
1586 0x00000fff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1589 HOWTO (R_ARM_TLS_IE12GP, /* type */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1593 FALSE, /* pc_relative */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 bfd_elf_generic_reloc, /* special_function */
1597 "R_ARM_TLS_IE12GP", /* name */
1598 FALSE, /* partial_inplace */
1599 0x00000fff, /* src_mask */
1600 0x00000fff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1604 /* 112-127 private relocations
1605 128 R_ARM_ME_TOO, obsolete
1606 129-255 unallocated in AAELF.
1608 249-255 extended, currently unused, relocations: */
1610 static reloc_howto_type elf32_arm_howto_table_2[4] =
1612 HOWTO (R_ARM_RREL32, /* type */
1614 0, /* size (0 = byte, 1 = short, 2 = long) */
1616 FALSE, /* pc_relative */
1618 complain_overflow_dont,/* complain_on_overflow */
1619 bfd_elf_generic_reloc, /* special_function */
1620 "R_ARM_RREL32", /* name */
1621 FALSE, /* partial_inplace */
1624 FALSE), /* pcrel_offset */
1626 HOWTO (R_ARM_RABS32, /* type */
1628 0, /* size (0 = byte, 1 = short, 2 = long) */
1630 FALSE, /* pc_relative */
1632 complain_overflow_dont,/* complain_on_overflow */
1633 bfd_elf_generic_reloc, /* special_function */
1634 "R_ARM_RABS32", /* name */
1635 FALSE, /* partial_inplace */
1638 FALSE), /* pcrel_offset */
1640 HOWTO (R_ARM_RPC24, /* type */
1642 0, /* size (0 = byte, 1 = short, 2 = long) */
1644 FALSE, /* pc_relative */
1646 complain_overflow_dont,/* complain_on_overflow */
1647 bfd_elf_generic_reloc, /* special_function */
1648 "R_ARM_RPC24", /* name */
1649 FALSE, /* partial_inplace */
1652 FALSE), /* pcrel_offset */
1654 HOWTO (R_ARM_RBASE, /* type */
1656 0, /* size (0 = byte, 1 = short, 2 = long) */
1658 FALSE, /* pc_relative */
1660 complain_overflow_dont,/* complain_on_overflow */
1661 bfd_elf_generic_reloc, /* special_function */
1662 "R_ARM_RBASE", /* name */
1663 FALSE, /* partial_inplace */
1666 FALSE) /* pcrel_offset */
1669 static reloc_howto_type *
1670 elf32_arm_howto_from_type (unsigned int r_type)
1672 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1673 return &elf32_arm_howto_table_1[r_type];
1675 if (r_type >= R_ARM_RREL32
1676 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_2))
1677 return &elf32_arm_howto_table_2[r_type - R_ARM_RREL32];
1683 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1684 Elf_Internal_Rela * elf_reloc)
1686 unsigned int r_type;
1688 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1689 bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1692 struct elf32_arm_reloc_map
1694 bfd_reloc_code_real_type bfd_reloc_val;
1695 unsigned char elf_reloc_val;
1698 /* All entries in this list must also be present in elf32_arm_howto_table. */
1699 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1701 {BFD_RELOC_NONE, R_ARM_NONE},
1702 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1703 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1704 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1705 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1706 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1707 {BFD_RELOC_32, R_ARM_ABS32},
1708 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1709 {BFD_RELOC_8, R_ARM_ABS8},
1710 {BFD_RELOC_16, R_ARM_ABS16},
1711 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1712 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1713 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1714 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1715 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1716 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1717 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1718 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1719 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1720 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1721 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1722 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1723 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1724 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1725 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1726 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1727 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1728 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1729 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1730 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1731 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1732 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
1733 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
1734 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
1735 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
1736 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
1737 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
1738 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
1739 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
1740 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
1741 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
1742 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
1743 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
1744 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
1745 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
1746 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
1747 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
1748 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1749 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1750 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1751 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1752 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1753 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1754 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1755 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1756 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1757 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1758 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1759 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1760 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1761 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1762 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1763 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1764 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1765 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1766 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1767 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1768 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1769 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1770 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1771 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1772 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1773 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1774 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1775 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1776 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1777 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1778 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX}
1781 static reloc_howto_type *
1782 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1783 bfd_reloc_code_real_type code)
1787 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1788 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1789 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1794 static reloc_howto_type *
1795 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1800 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1801 if (elf32_arm_howto_table_1[i].name != NULL
1802 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1803 return &elf32_arm_howto_table_1[i];
1805 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1806 if (elf32_arm_howto_table_2[i].name != NULL
1807 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1808 return &elf32_arm_howto_table_2[i];
1813 /* Support for core dump NOTE sections. */
1816 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1821 switch (note->descsz)
1826 case 148: /* Linux/ARM 32-bit. */
1828 elf_tdata (abfd)->core_signal = bfd_get_16 (abfd, note->descdata + 12);
1831 elf_tdata (abfd)->core_pid = bfd_get_32 (abfd, note->descdata + 24);
1840 /* Make a ".reg/999" section. */
1841 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
1842 size, note->descpos + offset);
1846 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
1848 switch (note->descsz)
1853 case 124: /* Linux/ARM elf_prpsinfo. */
1854 elf_tdata (abfd)->core_program
1855 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
1856 elf_tdata (abfd)->core_command
1857 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
1860 /* Note that for some reason, a spurious space is tacked
1861 onto the end of the args in some (at least one anyway)
1862 implementations, so strip it off if it exists. */
1864 char *command = elf_tdata (abfd)->core_command;
1865 int n = strlen (command);
1867 if (0 < n && command[n - 1] == ' ')
1868 command[n - 1] = '\0';
1874 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vec
1875 #define TARGET_LITTLE_NAME "elf32-littlearm"
1876 #define TARGET_BIG_SYM bfd_elf32_bigarm_vec
1877 #define TARGET_BIG_NAME "elf32-bigarm"
1879 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
1880 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
1882 typedef unsigned long int insn32;
1883 typedef unsigned short int insn16;
1885 /* In lieu of proper flags, assume all EABIv4 or later objects are
1887 #define INTERWORK_FLAG(abfd) \
1888 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
1889 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
1890 || ((abfd)->flags & BFD_LINKER_CREATED))
1892 /* The linker script knows the section names for placement.
1893 The entry_names are used to do simple name mangling on the stubs.
1894 Given a function name, and its type, the stub can be found. The
1895 name can be changed. The only requirement is the %s be present. */
1896 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
1897 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
1899 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
1900 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
1902 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
1903 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
1905 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
1906 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
1908 #define STUB_ENTRY_NAME "__%s_veneer"
1910 /* The name of the dynamic interpreter. This is put in the .interp
1912 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
1914 #ifdef FOUR_WORD_PLT
1916 /* The first entry in a procedure linkage table looks like
1917 this. It is set up so that any shared library function that is
1918 called before the relocation has been set up calls the dynamic
1920 static const bfd_vma elf32_arm_plt0_entry [] =
1922 0xe52de004, /* str lr, [sp, #-4]! */
1923 0xe59fe010, /* ldr lr, [pc, #16] */
1924 0xe08fe00e, /* add lr, pc, lr */
1925 0xe5bef008, /* ldr pc, [lr, #8]! */
1928 /* Subsequent entries in a procedure linkage table look like
1930 static const bfd_vma elf32_arm_plt_entry [] =
1932 0xe28fc600, /* add ip, pc, #NN */
1933 0xe28cca00, /* add ip, ip, #NN */
1934 0xe5bcf000, /* ldr pc, [ip, #NN]! */
1935 0x00000000, /* unused */
1940 /* The first entry in a procedure linkage table looks like
1941 this. It is set up so that any shared library function that is
1942 called before the relocation has been set up calls the dynamic
1944 static const bfd_vma elf32_arm_plt0_entry [] =
1946 0xe52de004, /* str lr, [sp, #-4]! */
1947 0xe59fe004, /* ldr lr, [pc, #4] */
1948 0xe08fe00e, /* add lr, pc, lr */
1949 0xe5bef008, /* ldr pc, [lr, #8]! */
1950 0x00000000, /* &GOT[0] - . */
1953 /* Subsequent entries in a procedure linkage table look like
1955 static const bfd_vma elf32_arm_plt_entry [] =
1957 0xe28fc600, /* add ip, pc, #0xNN00000 */
1958 0xe28cca00, /* add ip, ip, #0xNN000 */
1959 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
1964 /* The format of the first entry in the procedure linkage table
1965 for a VxWorks executable. */
1966 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
1968 0xe52dc008, /* str ip,[sp,#-8]! */
1969 0xe59fc000, /* ldr ip,[pc] */
1970 0xe59cf008, /* ldr pc,[ip,#8] */
1971 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
1974 /* The format of subsequent entries in a VxWorks executable. */
1975 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
1977 0xe59fc000, /* ldr ip,[pc] */
1978 0xe59cf000, /* ldr pc,[ip] */
1979 0x00000000, /* .long @got */
1980 0xe59fc000, /* ldr ip,[pc] */
1981 0xea000000, /* b _PLT */
1982 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1985 /* The format of entries in a VxWorks shared library. */
1986 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
1988 0xe59fc000, /* ldr ip,[pc] */
1989 0xe79cf009, /* ldr pc,[ip,r9] */
1990 0x00000000, /* .long @got */
1991 0xe59fc000, /* ldr ip,[pc] */
1992 0xe599f008, /* ldr pc,[r9,#8] */
1993 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
1996 /* An initial stub used if the PLT entry is referenced from Thumb code. */
1997 #define PLT_THUMB_STUB_SIZE 4
1998 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2004 /* The entries in a PLT when using a DLL-based target with multiple
2006 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2008 0xe51ff004, /* ldr pc, [pc, #-4] */
2009 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2012 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2013 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2014 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2015 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2016 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2017 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2027 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2028 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2029 is inserted in arm_build_one_stub(). */
2030 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2031 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2032 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2033 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2034 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2035 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2040 enum stub_insn_type type;
2041 unsigned int r_type;
2045 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2046 to reach the stub if necessary. */
2047 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2049 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2050 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2053 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2055 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2057 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2058 ARM_INSN(0xe12fff1c), /* bx ip */
2059 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2062 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2063 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2065 THUMB16_INSN(0xb401), /* push {r0} */
2066 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2067 THUMB16_INSN(0x4684), /* mov ip, r0 */
2068 THUMB16_INSN(0xbc01), /* pop {r0} */
2069 THUMB16_INSN(0x4760), /* bx ip */
2070 THUMB16_INSN(0xbf00), /* nop */
2071 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2074 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2076 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2078 THUMB16_INSN(0x4778), /* bx pc */
2079 THUMB16_INSN(0x46c0), /* nop */
2080 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2081 ARM_INSN(0xe12fff1c), /* bx ip */
2082 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2085 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2087 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2089 THUMB16_INSN(0x4778), /* bx pc */
2090 THUMB16_INSN(0x46c0), /* nop */
2091 ARM_INSN(0xe51ff004), /* ldr pc, [pc, #-4] */
2092 DATA_WORD(0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2095 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2096 one, when the destination is close enough. */
2097 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2099 THUMB16_INSN(0x4778), /* bx pc */
2100 THUMB16_INSN(0x46c0), /* nop */
2101 ARM_REL_INSN(0xea000000, -8), /* b (X-8) */
2104 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2105 blx to reach the stub if necessary. */
2106 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2108 ARM_INSN(0xe59fc000), /* ldr r12, [pc] */
2109 ARM_INSN(0xe08ff00c), /* add pc, pc, ip */
2110 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2113 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2114 blx to reach the stub if necessary. We can not add into pc;
2115 it is not guaranteed to mode switch (different in ARMv6 and
2117 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2119 ARM_INSN(0xe59fc004), /* ldr r12, [pc, #4] */
2120 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2121 ARM_INSN(0xe12fff1c), /* bx ip */
2122 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2125 /* V4T ARM -> ARM long branch stub, PIC. */
2126 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2128 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2129 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2130 ARM_INSN(0xe12fff1c), /* bx ip */
2131 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2134 /* V4T Thumb -> ARM long branch stub, PIC. */
2135 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2137 THUMB16_INSN(0x4778), /* bx pc */
2138 THUMB16_INSN(0x46c0), /* nop */
2139 ARM_INSN(0xe59fc000), /* ldr ip, [pc, #0] */
2140 ARM_INSN(0xe08cf00f), /* add pc, ip, pc */
2141 DATA_WORD(0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2144 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2146 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2148 THUMB16_INSN(0xb401), /* push {r0} */
2149 THUMB16_INSN(0x4802), /* ldr r0, [pc, #8] */
2150 THUMB16_INSN(0x46fc), /* mov ip, pc */
2151 THUMB16_INSN(0x4484), /* add ip, r0 */
2152 THUMB16_INSN(0xbc01), /* pop {r0} */
2153 THUMB16_INSN(0x4760), /* bx ip */
2154 DATA_WORD(0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2157 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2159 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2161 THUMB16_INSN(0x4778), /* bx pc */
2162 THUMB16_INSN(0x46c0), /* nop */
2163 ARM_INSN(0xe59fc004), /* ldr ip, [pc, #4] */
2164 ARM_INSN(0xe08fc00c), /* add ip, pc, ip */
2165 ARM_INSN(0xe12fff1c), /* bx ip */
2166 DATA_WORD(0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2169 /* Cortex-A8 erratum-workaround stubs. */
2171 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2172 can't use a conditional branch to reach this stub). */
2174 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2176 THUMB16_BCOND_INSN(0xd001), /* b<cond>.n true. */
2177 THUMB32_B_INSN(0xf000b800, -4), /* b.w insn_after_original_branch. */
2178 THUMB32_B_INSN(0xf000b800, -4) /* true: b.w original_branch_dest. */
2181 /* Stub used for b.w and bl.w instructions. */
2183 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2185 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2188 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2190 THUMB32_B_INSN(0xf000b800, -4) /* b.w original_branch_dest. */
2193 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2194 instruction (which switches to ARM mode) to point to this stub. Jump to the
2195 real destination using an ARM-mode branch. */
2197 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2199 ARM_REL_INSN(0xea000000, -8) /* b original_branch_dest. */
2202 /* Section name for stubs is the associated section name plus this
2204 #define STUB_SUFFIX ".stub"
2206 /* One entry per long/short branch stub defined above. */
2208 DEF_STUB(long_branch_any_any) \
2209 DEF_STUB(long_branch_v4t_arm_thumb) \
2210 DEF_STUB(long_branch_thumb_only) \
2211 DEF_STUB(long_branch_v4t_thumb_thumb) \
2212 DEF_STUB(long_branch_v4t_thumb_arm) \
2213 DEF_STUB(short_branch_v4t_thumb_arm) \
2214 DEF_STUB(long_branch_any_arm_pic) \
2215 DEF_STUB(long_branch_any_thumb_pic) \
2216 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2217 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2218 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2219 DEF_STUB(long_branch_thumb_only_pic) \
2220 DEF_STUB(a8_veneer_b_cond) \
2221 DEF_STUB(a8_veneer_b) \
2222 DEF_STUB(a8_veneer_bl) \
2223 DEF_STUB(a8_veneer_blx)
2225 #define DEF_STUB(x) arm_stub_##x,
2226 enum elf32_arm_stub_type {
2229 /* Note the first a8_veneer type */
2230 arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond
2236 const insn_sequence* template_sequence;
2240 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2241 static const stub_def stub_definitions[] = {
2246 struct elf32_arm_stub_hash_entry
2248 /* Base hash table entry structure. */
2249 struct bfd_hash_entry root;
2251 /* The stub section. */
2254 /* Offset within stub_sec of the beginning of this stub. */
2255 bfd_vma stub_offset;
2257 /* Given the symbol's value and its section we can determine its final
2258 value when building the stubs (so the stub knows where to jump). */
2259 bfd_vma target_value;
2260 asection *target_section;
2262 /* Offset to apply to relocation referencing target_value. */
2263 bfd_vma target_addend;
2265 /* The instruction which caused this stub to be generated (only valid for
2266 Cortex-A8 erratum workaround stubs at present). */
2267 unsigned long orig_insn;
2269 /* The stub type. */
2270 enum elf32_arm_stub_type stub_type;
2271 /* Its encoding size in bytes. */
2274 const insn_sequence *stub_template;
2275 /* The size of the template (number of entries). */
2276 int stub_template_size;
2278 /* The symbol table entry, if any, that this was derived from. */
2279 struct elf32_arm_link_hash_entry *h;
2281 /* Destination symbol type (STT_ARM_TFUNC, ...) */
2282 unsigned char st_type;
2284 /* Where this stub is being called from, or, in the case of combined
2285 stub sections, the first input section in the group. */
2288 /* The name for the local symbol at the start of this stub. The
2289 stub name in the hash table has to be unique; this does not, so
2290 it can be friendlier. */
2294 /* Used to build a map of a section. This is required for mixed-endian
2297 typedef struct elf32_elf_section_map
2302 elf32_arm_section_map;
2304 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2308 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2309 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2310 VFP11_ERRATUM_ARM_VENEER,
2311 VFP11_ERRATUM_THUMB_VENEER
2313 elf32_vfp11_erratum_type;
2315 typedef struct elf32_vfp11_erratum_list
2317 struct elf32_vfp11_erratum_list *next;
2323 struct elf32_vfp11_erratum_list *veneer;
2324 unsigned int vfp_insn;
2328 struct elf32_vfp11_erratum_list *branch;
2332 elf32_vfp11_erratum_type type;
2334 elf32_vfp11_erratum_list;
2339 INSERT_EXIDX_CANTUNWIND_AT_END
2341 arm_unwind_edit_type;
2343 /* A (sorted) list of edits to apply to an unwind table. */
2344 typedef struct arm_unwind_table_edit
2346 arm_unwind_edit_type type;
2347 /* Note: we sometimes want to insert an unwind entry corresponding to a
2348 section different from the one we're currently writing out, so record the
2349 (text) section this edit relates to here. */
2350 asection *linked_section;
2352 struct arm_unwind_table_edit *next;
2354 arm_unwind_table_edit;
2356 typedef struct _arm_elf_section_data
2358 /* Information about mapping symbols. */
2359 struct bfd_elf_section_data elf;
2360 unsigned int mapcount;
2361 unsigned int mapsize;
2362 elf32_arm_section_map *map;
2363 /* Information about CPU errata. */
2364 unsigned int erratumcount;
2365 elf32_vfp11_erratum_list *erratumlist;
2366 /* Information about unwind tables. */
2369 /* Unwind info attached to a text section. */
2372 asection *arm_exidx_sec;
2375 /* Unwind info attached to an .ARM.exidx section. */
2378 arm_unwind_table_edit *unwind_edit_list;
2379 arm_unwind_table_edit *unwind_edit_tail;
2383 _arm_elf_section_data;
2385 #define elf32_arm_section_data(sec) \
2386 ((_arm_elf_section_data *) elf_section_data (sec))
2388 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2389 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2390 so may be created multiple times: we use an array of these entries whilst
2391 relaxing which we can refresh easily, then create stubs for each potentially
2392 erratum-triggering instruction once we've settled on a solution. */
2394 struct a8_erratum_fix {
2399 unsigned long orig_insn;
2401 enum elf32_arm_stub_type stub_type;
2404 /* A table of relocs applied to branches which might trigger Cortex-A8
2407 struct a8_erratum_reloc {
2409 bfd_vma destination;
2410 unsigned int r_type;
2411 unsigned char st_type;
2412 const char *sym_name;
2413 bfd_boolean non_a8_stub;
2416 /* The size of the thread control block. */
2419 struct elf_arm_obj_tdata
2421 struct elf_obj_tdata root;
2423 /* tls_type for each local got entry. */
2424 char *local_got_tls_type;
2426 /* Zero to warn when linking objects with incompatible enum sizes. */
2427 int no_enum_size_warning;
2429 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
2430 int no_wchar_size_warning;
2433 #define elf_arm_tdata(bfd) \
2434 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2436 #define elf32_arm_local_got_tls_type(bfd) \
2437 (elf_arm_tdata (bfd)->local_got_tls_type)
2439 #define is_arm_elf(bfd) \
2440 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2441 && elf_tdata (bfd) != NULL \
2442 && elf_object_id (bfd) == ARM_ELF_DATA)
2445 elf32_arm_mkobject (bfd *abfd)
2447 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2451 /* The ARM linker needs to keep track of the number of relocs that it
2452 decides to copy in check_relocs for each symbol. This is so that
2453 it can discard PC relative relocs if it doesn't need them when
2454 linking with -Bsymbolic. We store the information in a field
2455 extending the regular ELF linker hash table. */
2457 /* This structure keeps track of the number of relocs we have copied
2458 for a given symbol. */
2459 struct elf32_arm_relocs_copied
2462 struct elf32_arm_relocs_copied * next;
2463 /* A section in dynobj. */
2465 /* Number of relocs copied in this section. */
2466 bfd_size_type count;
2467 /* Number of PC-relative relocs copied in this section. */
2468 bfd_size_type pc_count;
2471 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2473 /* Arm ELF linker hash entry. */
2474 struct elf32_arm_link_hash_entry
2476 struct elf_link_hash_entry root;
2478 /* Number of PC relative relocs copied for this symbol. */
2479 struct elf32_arm_relocs_copied * relocs_copied;
2481 /* We reference count Thumb references to a PLT entry separately,
2482 so that we can emit the Thumb trampoline only if needed. */
2483 bfd_signed_vma plt_thumb_refcount;
2485 /* Some references from Thumb code may be eliminated by BL->BLX
2486 conversion, so record them separately. */
2487 bfd_signed_vma plt_maybe_thumb_refcount;
2489 /* Since PLT entries have variable size if the Thumb prologue is
2490 used, we need to record the index into .got.plt instead of
2491 recomputing it from the PLT offset. */
2492 bfd_signed_vma plt_got_offset;
2494 #define GOT_UNKNOWN 0
2495 #define GOT_NORMAL 1
2496 #define GOT_TLS_GD 2
2497 #define GOT_TLS_IE 4
2498 unsigned char tls_type;
2500 /* The symbol marking the real symbol location for exported thumb
2501 symbols with Arm stubs. */
2502 struct elf_link_hash_entry *export_glue;
2504 /* A pointer to the most recently used stub hash entry against this
2506 struct elf32_arm_stub_hash_entry *stub_cache;
2509 /* Traverse an arm ELF linker hash table. */
2510 #define elf32_arm_link_hash_traverse(table, func, info) \
2511 (elf_link_hash_traverse \
2513 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
2516 /* Get the ARM elf linker hash table from a link_info structure. */
2517 #define elf32_arm_hash_table(info) \
2518 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
2519 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
2521 #define arm_stub_hash_lookup(table, string, create, copy) \
2522 ((struct elf32_arm_stub_hash_entry *) \
2523 bfd_hash_lookup ((table), (string), (create), (copy)))
2525 /* Array to keep track of which stub sections have been created, and
2526 information on stub grouping. */
2529 /* This is the section to which stubs in the group will be
2532 /* The stub section. */
2536 /* ARM ELF linker hash table. */
2537 struct elf32_arm_link_hash_table
2539 /* The main hash table. */
2540 struct elf_link_hash_table root;
2542 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
2543 bfd_size_type thumb_glue_size;
2545 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
2546 bfd_size_type arm_glue_size;
2548 /* The size in bytes of section containing the ARMv4 BX veneers. */
2549 bfd_size_type bx_glue_size;
2551 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
2552 veneer has been populated. */
2553 bfd_vma bx_glue_offset[15];
2555 /* The size in bytes of the section containing glue for VFP11 erratum
2557 bfd_size_type vfp11_erratum_glue_size;
2559 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
2560 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
2561 elf32_arm_write_section(). */
2562 struct a8_erratum_fix *a8_erratum_fixes;
2563 unsigned int num_a8_erratum_fixes;
2565 /* An arbitrary input BFD chosen to hold the glue sections. */
2566 bfd * bfd_of_glue_owner;
2568 /* Nonzero to output a BE8 image. */
2571 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
2572 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
2575 /* The relocation to use for R_ARM_TARGET2 relocations. */
2578 /* 0 = Ignore R_ARM_V4BX.
2579 1 = Convert BX to MOV PC.
2580 2 = Generate v4 interworing stubs. */
2583 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
2586 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
2589 /* What sort of code sequences we should look for which may trigger the
2590 VFP11 denorm erratum. */
2591 bfd_arm_vfp11_fix vfp11_fix;
2593 /* Global counter for the number of fixes we have emitted. */
2594 int num_vfp11_fixes;
2596 /* Nonzero to force PIC branch veneers. */
2599 /* The number of bytes in the initial entry in the PLT. */
2600 bfd_size_type plt_header_size;
2602 /* The number of bytes in the subsequent PLT etries. */
2603 bfd_size_type plt_entry_size;
2605 /* True if the target system is VxWorks. */
2608 /* True if the target system is Symbian OS. */
2611 /* True if the target uses REL relocations. */
2614 /* Short-cuts to get to dynamic linker sections. */
2623 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
2626 /* Data for R_ARM_TLS_LDM32 relocations. */
2629 bfd_signed_vma refcount;
2633 /* Small local sym cache. */
2634 struct sym_cache sym_cache;
2636 /* For convenience in allocate_dynrelocs. */
2639 /* The stub hash table. */
2640 struct bfd_hash_table stub_hash_table;
2642 /* Linker stub bfd. */
2645 /* Linker call-backs. */
2646 asection * (*add_stub_section) (const char *, asection *);
2647 void (*layout_sections_again) (void);
2649 /* Array to keep track of which stub sections have been created, and
2650 information on stub grouping. */
2651 struct map_stub *stub_group;
2653 /* Assorted information used by elf32_arm_size_stubs. */
2654 unsigned int bfd_count;
2656 asection **input_list;
2659 /* Create an entry in an ARM ELF linker hash table. */
2661 static struct bfd_hash_entry *
2662 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
2663 struct bfd_hash_table * table,
2664 const char * string)
2666 struct elf32_arm_link_hash_entry * ret =
2667 (struct elf32_arm_link_hash_entry *) entry;
2669 /* Allocate the structure if it has not already been allocated by a
2672 ret = (struct elf32_arm_link_hash_entry *)
2673 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
2675 return (struct bfd_hash_entry *) ret;
2677 /* Call the allocation method of the superclass. */
2678 ret = ((struct elf32_arm_link_hash_entry *)
2679 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
2683 ret->relocs_copied = NULL;
2684 ret->tls_type = GOT_UNKNOWN;
2685 ret->plt_thumb_refcount = 0;
2686 ret->plt_maybe_thumb_refcount = 0;
2687 ret->plt_got_offset = -1;
2688 ret->export_glue = NULL;
2690 ret->stub_cache = NULL;
2693 return (struct bfd_hash_entry *) ret;
2696 /* Initialize an entry in the stub hash table. */
2698 static struct bfd_hash_entry *
2699 stub_hash_newfunc (struct bfd_hash_entry *entry,
2700 struct bfd_hash_table *table,
2703 /* Allocate the structure if it has not already been allocated by a
2707 entry = (struct bfd_hash_entry *)
2708 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
2713 /* Call the allocation method of the superclass. */
2714 entry = bfd_hash_newfunc (entry, table, string);
2717 struct elf32_arm_stub_hash_entry *eh;
2719 /* Initialize the local fields. */
2720 eh = (struct elf32_arm_stub_hash_entry *) entry;
2721 eh->stub_sec = NULL;
2722 eh->stub_offset = 0;
2723 eh->target_value = 0;
2724 eh->target_section = NULL;
2725 eh->target_addend = 0;
2727 eh->stub_type = arm_stub_none;
2729 eh->stub_template = NULL;
2730 eh->stub_template_size = 0;
2733 eh->output_name = NULL;
2739 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
2740 shortcuts to them in our hash table. */
2743 create_got_section (bfd *dynobj, struct bfd_link_info *info)
2745 struct elf32_arm_link_hash_table *htab;
2747 htab = elf32_arm_hash_table (info);
2751 /* BPABI objects never have a GOT, or associated sections. */
2752 if (htab->symbian_p)
2755 if (! _bfd_elf_create_got_section (dynobj, info))
2758 htab->sgot = bfd_get_section_by_name (dynobj, ".got");
2759 htab->sgotplt = bfd_get_section_by_name (dynobj, ".got.plt");
2760 if (!htab->sgot || !htab->sgotplt)
2763 htab->srelgot = bfd_get_section_by_name (dynobj,
2764 RELOC_SECTION (htab, ".got"));
2765 if (htab->srelgot == NULL)
2770 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
2771 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
2775 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
2777 struct elf32_arm_link_hash_table *htab;
2779 htab = elf32_arm_hash_table (info);
2783 if (!htab->sgot && !create_got_section (dynobj, info))
2786 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
2789 htab->splt = bfd_get_section_by_name (dynobj, ".plt");
2790 htab->srelplt = bfd_get_section_by_name (dynobj,
2791 RELOC_SECTION (htab, ".plt"));
2792 htab->sdynbss = bfd_get_section_by_name (dynobj, ".dynbss");
2794 htab->srelbss = bfd_get_section_by_name (dynobj,
2795 RELOC_SECTION (htab, ".bss"));
2797 if (htab->vxworks_p)
2799 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
2804 htab->plt_header_size = 0;
2805 htab->plt_entry_size
2806 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
2810 htab->plt_header_size
2811 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
2812 htab->plt_entry_size
2813 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
2820 || (!info->shared && !htab->srelbss))
2826 /* Copy the extra info we tack onto an elf_link_hash_entry. */
2829 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
2830 struct elf_link_hash_entry *dir,
2831 struct elf_link_hash_entry *ind)
2833 struct elf32_arm_link_hash_entry *edir, *eind;
2835 edir = (struct elf32_arm_link_hash_entry *) dir;
2836 eind = (struct elf32_arm_link_hash_entry *) ind;
2838 if (eind->relocs_copied != NULL)
2840 if (edir->relocs_copied != NULL)
2842 struct elf32_arm_relocs_copied **pp;
2843 struct elf32_arm_relocs_copied *p;
2845 /* Add reloc counts against the indirect sym to the direct sym
2846 list. Merge any entries against the same section. */
2847 for (pp = &eind->relocs_copied; (p = *pp) != NULL; )
2849 struct elf32_arm_relocs_copied *q;
2851 for (q = edir->relocs_copied; q != NULL; q = q->next)
2852 if (q->section == p->section)
2854 q->pc_count += p->pc_count;
2855 q->count += p->count;
2862 *pp = edir->relocs_copied;
2865 edir->relocs_copied = eind->relocs_copied;
2866 eind->relocs_copied = NULL;
2869 if (ind->root.type == bfd_link_hash_indirect)
2871 /* Copy over PLT info. */
2872 edir->plt_thumb_refcount += eind->plt_thumb_refcount;
2873 eind->plt_thumb_refcount = 0;
2874 edir->plt_maybe_thumb_refcount += eind->plt_maybe_thumb_refcount;
2875 eind->plt_maybe_thumb_refcount = 0;
2877 if (dir->got.refcount <= 0)
2879 edir->tls_type = eind->tls_type;
2880 eind->tls_type = GOT_UNKNOWN;
2884 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
2887 /* Create an ARM elf linker hash table. */
2889 static struct bfd_link_hash_table *
2890 elf32_arm_link_hash_table_create (bfd *abfd)
2892 struct elf32_arm_link_hash_table *ret;
2893 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
2895 ret = (struct elf32_arm_link_hash_table *) bfd_malloc (amt);
2899 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
2900 elf32_arm_link_hash_newfunc,
2901 sizeof (struct elf32_arm_link_hash_entry),
2909 ret->sgotplt = NULL;
2910 ret->srelgot = NULL;
2912 ret->srelplt = NULL;
2913 ret->sdynbss = NULL;
2914 ret->srelbss = NULL;
2915 ret->srelplt2 = NULL;
2916 ret->thumb_glue_size = 0;
2917 ret->arm_glue_size = 0;
2918 ret->bx_glue_size = 0;
2919 memset (ret->bx_glue_offset, 0, sizeof (ret->bx_glue_offset));
2920 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
2921 ret->vfp11_erratum_glue_size = 0;
2922 ret->num_vfp11_fixes = 0;
2923 ret->fix_cortex_a8 = 0;
2924 ret->bfd_of_glue_owner = NULL;
2925 ret->byteswap_code = 0;
2926 ret->target1_is_rel = 0;
2927 ret->target2_reloc = R_ARM_NONE;
2928 #ifdef FOUR_WORD_PLT
2929 ret->plt_header_size = 16;
2930 ret->plt_entry_size = 16;
2932 ret->plt_header_size = 20;
2933 ret->plt_entry_size = 12;
2940 ret->sym_cache.abfd = NULL;
2942 ret->tls_ldm_got.refcount = 0;
2943 ret->stub_bfd = NULL;
2944 ret->add_stub_section = NULL;
2945 ret->layout_sections_again = NULL;
2946 ret->stub_group = NULL;
2949 ret->input_list = NULL;
2951 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
2952 sizeof (struct elf32_arm_stub_hash_entry)))
2958 return &ret->root.root;
2961 /* Free the derived linker hash table. */
2964 elf32_arm_hash_table_free (struct bfd_link_hash_table *hash)
2966 struct elf32_arm_link_hash_table *ret
2967 = (struct elf32_arm_link_hash_table *) hash;
2969 bfd_hash_table_free (&ret->stub_hash_table);
2970 _bfd_generic_link_hash_table_free (hash);
2973 /* Determine if we're dealing with a Thumb only architecture. */
2976 using_thumb_only (struct elf32_arm_link_hash_table *globals)
2978 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2982 if (arch != TAG_CPU_ARCH_V7 && arch != TAG_CPU_ARCH_V7E_M)
2985 profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2986 Tag_CPU_arch_profile);
2988 return profile == 'M';
2991 /* Determine if we're dealing with a Thumb-2 object. */
2994 using_thumb2 (struct elf32_arm_link_hash_table *globals)
2996 int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
2998 return arch == TAG_CPU_ARCH_V6T2 || arch >= TAG_CPU_ARCH_V7;
3001 /* Determine what kind of NOPs are available. */
3004 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3006 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3008 return arch == TAG_CPU_ARCH_V6T2
3009 || arch == TAG_CPU_ARCH_V6K
3010 || arch == TAG_CPU_ARCH_V7
3011 || arch == TAG_CPU_ARCH_V7E_M;
3015 arch_has_thumb2_nop (struct elf32_arm_link_hash_table *globals)
3017 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3019 return (arch == TAG_CPU_ARCH_V6T2 || arch == TAG_CPU_ARCH_V7
3020 || arch == TAG_CPU_ARCH_V7E_M);
3024 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3028 case arm_stub_long_branch_thumb_only:
3029 case arm_stub_long_branch_v4t_thumb_arm:
3030 case arm_stub_short_branch_v4t_thumb_arm:
3031 case arm_stub_long_branch_v4t_thumb_arm_pic:
3032 case arm_stub_long_branch_thumb_only_pic:
3043 /* Determine the type of stub needed, if any, for a call. */
3045 static enum elf32_arm_stub_type
3046 arm_type_of_stub (struct bfd_link_info *info,
3047 asection *input_sec,
3048 const Elf_Internal_Rela *rel,
3049 unsigned char st_type,
3050 struct elf32_arm_link_hash_entry *hash,
3051 bfd_vma destination,
3057 bfd_signed_vma branch_offset;
3058 unsigned int r_type;
3059 struct elf32_arm_link_hash_table * globals;
3062 enum elf32_arm_stub_type stub_type = arm_stub_none;
3065 /* We don't know the actual type of destination in case it is of
3066 type STT_SECTION: give up. */
3067 if (st_type == STT_SECTION)
3070 globals = elf32_arm_hash_table (info);
3071 if (globals == NULL)
3074 thumb_only = using_thumb_only (globals);
3076 thumb2 = using_thumb2 (globals);
3078 /* Determine where the call point is. */
3079 location = (input_sec->output_offset
3080 + input_sec->output_section->vma
3083 branch_offset = (bfd_signed_vma)(destination - location);
3085 r_type = ELF32_R_TYPE (rel->r_info);
3087 /* Keep a simpler condition, for the sake of clarity. */
3088 if (globals->splt != NULL && hash != NULL && hash->root.plt.offset != (bfd_vma) -1)
3091 /* Note when dealing with PLT entries: the main PLT stub is in
3092 ARM mode, so if the branch is in Thumb mode, another
3093 Thumb->ARM stub will be inserted later just before the ARM
3094 PLT stub. We don't take this extra distance into account
3095 here, because if a long branch stub is needed, we'll add a
3096 Thumb->Arm one and branch directly to the ARM PLT entry
3097 because it avoids spreading offset corrections in several
3101 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
3103 /* Handle cases where:
3104 - this call goes too far (different Thumb/Thumb2 max
3106 - it's a Thumb->Arm call and blx is not available, or it's a
3107 Thumb->Arm branch (not bl). A stub is needed in this case,
3108 but only if this call is not through a PLT entry. Indeed,
3109 PLT stubs handle mode switching already.
3112 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3113 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3115 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3116 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3117 || ((st_type != STT_ARM_TFUNC)
3118 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
3119 || (r_type == R_ARM_THM_JUMP24))
3122 if (st_type == STT_ARM_TFUNC)
3124 /* Thumb to thumb. */
3127 stub_type = (info->shared | globals->pic_veneer)
3129 ? ((globals->use_blx
3130 && (r_type ==R_ARM_THM_CALL))
3131 /* V5T and above. Stub starts with ARM code, so
3132 we must be able to switch mode before
3133 reaching it, which is only possible for 'bl'
3134 (ie R_ARM_THM_CALL relocation). */
3135 ? arm_stub_long_branch_any_thumb_pic
3136 /* On V4T, use Thumb code only. */
3137 : arm_stub_long_branch_v4t_thumb_thumb_pic)
3139 /* non-PIC stubs. */
3140 : ((globals->use_blx
3141 && (r_type ==R_ARM_THM_CALL))
3142 /* V5T and above. */
3143 ? arm_stub_long_branch_any_any
3145 : arm_stub_long_branch_v4t_thumb_thumb);
3149 stub_type = (info->shared | globals->pic_veneer)
3151 ? arm_stub_long_branch_thumb_only_pic
3153 : arm_stub_long_branch_thumb_only;
3160 && sym_sec->owner != NULL
3161 && !INTERWORK_FLAG (sym_sec->owner))
3163 (*_bfd_error_handler)
3164 (_("%B(%s): warning: interworking not enabled.\n"
3165 " first occurrence: %B: Thumb call to ARM"),
3166 sym_sec->owner, input_bfd, name);
3169 stub_type = (info->shared | globals->pic_veneer)
3171 ? ((globals->use_blx
3172 && (r_type ==R_ARM_THM_CALL))
3173 /* V5T and above. */
3174 ? arm_stub_long_branch_any_arm_pic
3176 : arm_stub_long_branch_v4t_thumb_arm_pic)
3178 /* non-PIC stubs. */
3179 : ((globals->use_blx
3180 && (r_type ==R_ARM_THM_CALL))
3181 /* V5T and above. */
3182 ? arm_stub_long_branch_any_any
3184 : arm_stub_long_branch_v4t_thumb_arm);
3186 /* Handle v4t short branches. */
3187 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3188 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3189 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
3190 stub_type = arm_stub_short_branch_v4t_thumb_arm;
3194 else if (r_type == R_ARM_CALL || r_type == R_ARM_JUMP24 || r_type == R_ARM_PLT32)
3196 if (st_type == STT_ARM_TFUNC)
3201 && sym_sec->owner != NULL
3202 && !INTERWORK_FLAG (sym_sec->owner))
3204 (*_bfd_error_handler)
3205 (_("%B(%s): warning: interworking not enabled.\n"
3206 " first occurrence: %B: ARM call to Thumb"),
3207 sym_sec->owner, input_bfd, name);
3210 /* We have an extra 2-bytes reach because of
3211 the mode change (bit 24 (H) of BLX encoding). */
3212 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
3213 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3214 || ((r_type == R_ARM_CALL) && !globals->use_blx)
3215 || (r_type == R_ARM_JUMP24)
3216 || (r_type == R_ARM_PLT32))
3218 stub_type = (info->shared | globals->pic_veneer)
3220 ? ((globals->use_blx)
3221 /* V5T and above. */
3222 ? arm_stub_long_branch_any_thumb_pic
3224 : arm_stub_long_branch_v4t_arm_thumb_pic)
3226 /* non-PIC stubs. */
3227 : ((globals->use_blx)
3228 /* V5T and above. */
3229 ? arm_stub_long_branch_any_any
3231 : arm_stub_long_branch_v4t_arm_thumb);
3237 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
3238 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
3240 stub_type = (info->shared | globals->pic_veneer)
3242 ? arm_stub_long_branch_any_arm_pic
3243 /* non-PIC stubs. */
3244 : arm_stub_long_branch_any_any;
3252 /* Build a name for an entry in the stub hash table. */
3255 elf32_arm_stub_name (const asection *input_section,
3256 const asection *sym_sec,
3257 const struct elf32_arm_link_hash_entry *hash,
3258 const Elf_Internal_Rela *rel)
3265 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1;
3266 stub_name = (char *) bfd_malloc (len);
3267 if (stub_name != NULL)
3268 sprintf (stub_name, "%08x_%s+%x",
3269 input_section->id & 0xffffffff,
3270 hash->root.root.root.string,
3271 (int) rel->r_addend & 0xffffffff);
3275 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
3276 stub_name = (char *) bfd_malloc (len);
3277 if (stub_name != NULL)
3278 sprintf (stub_name, "%08x_%x:%x+%x",
3279 input_section->id & 0xffffffff,
3280 sym_sec->id & 0xffffffff,
3281 (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
3282 (int) rel->r_addend & 0xffffffff);
3288 /* Look up an entry in the stub hash. Stub entries are cached because
3289 creating the stub name takes a bit of time. */
3291 static struct elf32_arm_stub_hash_entry *
3292 elf32_arm_get_stub_entry (const asection *input_section,
3293 const asection *sym_sec,
3294 struct elf_link_hash_entry *hash,
3295 const Elf_Internal_Rela *rel,
3296 struct elf32_arm_link_hash_table *htab)
3298 struct elf32_arm_stub_hash_entry *stub_entry;
3299 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
3300 const asection *id_sec;
3302 if ((input_section->flags & SEC_CODE) == 0)
3305 /* If this input section is part of a group of sections sharing one
3306 stub section, then use the id of the first section in the group.
3307 Stub names need to include a section id, as there may well be
3308 more than one stub used to reach say, printf, and we need to
3309 distinguish between them. */
3310 id_sec = htab->stub_group[input_section->id].link_sec;
3312 if (h != NULL && h->stub_cache != NULL
3313 && h->stub_cache->h == h
3314 && h->stub_cache->id_sec == id_sec)
3316 stub_entry = h->stub_cache;
3322 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel);
3323 if (stub_name == NULL)
3326 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
3327 stub_name, FALSE, FALSE);
3329 h->stub_cache = stub_entry;
3337 /* Find or create a stub section. Returns a pointer to the stub section, and
3338 the section to which the stub section will be attached (in *LINK_SEC_P).
3339 LINK_SEC_P may be NULL. */
3342 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
3343 struct elf32_arm_link_hash_table *htab)
3348 link_sec = htab->stub_group[section->id].link_sec;
3349 stub_sec = htab->stub_group[section->id].stub_sec;
3350 if (stub_sec == NULL)
3352 stub_sec = htab->stub_group[link_sec->id].stub_sec;
3353 if (stub_sec == NULL)
3359 namelen = strlen (link_sec->name);
3360 len = namelen + sizeof (STUB_SUFFIX);
3361 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
3365 memcpy (s_name, link_sec->name, namelen);
3366 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
3367 stub_sec = (*htab->add_stub_section) (s_name, link_sec);
3368 if (stub_sec == NULL)
3370 htab->stub_group[link_sec->id].stub_sec = stub_sec;
3372 htab->stub_group[section->id].stub_sec = stub_sec;
3376 *link_sec_p = link_sec;
3381 /* Add a new stub entry to the stub hash. Not all fields of the new
3382 stub entry are initialised. */
3384 static struct elf32_arm_stub_hash_entry *
3385 elf32_arm_add_stub (const char *stub_name,
3387 struct elf32_arm_link_hash_table *htab)
3391 struct elf32_arm_stub_hash_entry *stub_entry;
3393 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab);
3394 if (stub_sec == NULL)
3397 /* Enter this entry into the linker stub hash table. */
3398 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
3400 if (stub_entry == NULL)
3402 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
3408 stub_entry->stub_sec = stub_sec;
3409 stub_entry->stub_offset = 0;
3410 stub_entry->id_sec = link_sec;
3415 /* Store an Arm insn into an output section not processed by
3416 elf32_arm_write_section. */
3419 put_arm_insn (struct elf32_arm_link_hash_table * htab,
3420 bfd * output_bfd, bfd_vma val, void * ptr)
3422 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3423 bfd_putl32 (val, ptr);
3425 bfd_putb32 (val, ptr);
3428 /* Store a 16-bit Thumb insn into an output section not processed by
3429 elf32_arm_write_section. */
3432 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
3433 bfd * output_bfd, bfd_vma val, void * ptr)
3435 if (htab->byteswap_code != bfd_little_endian (output_bfd))
3436 bfd_putl16 (val, ptr);
3438 bfd_putb16 (val, ptr);
3441 static bfd_reloc_status_type elf32_arm_final_link_relocate
3442 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
3443 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
3444 const char *, int, struct elf_link_hash_entry *, bfd_boolean *, char **);
3447 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
3451 struct elf32_arm_stub_hash_entry *stub_entry;
3452 struct elf32_arm_link_hash_table *globals;
3453 struct bfd_link_info *info;
3461 const insn_sequence *template_sequence;
3463 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
3464 int stub_reloc_offset[MAXRELOCS] = {0, 0};
3467 /* Massage our args to the form they really have. */
3468 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3469 info = (struct bfd_link_info *) in_arg;
3471 globals = elf32_arm_hash_table (info);
3472 if (globals == NULL)
3475 stub_sec = stub_entry->stub_sec;
3477 if ((globals->fix_cortex_a8 < 0)
3478 != (stub_entry->stub_type >= arm_stub_a8_veneer_lwm))
3479 /* We have to do the a8 fixes last, as they are less aligned than
3480 the other veneers. */
3483 /* Make a note of the offset within the stubs for this entry. */
3484 stub_entry->stub_offset = stub_sec->size;
3485 loc = stub_sec->contents + stub_entry->stub_offset;
3487 stub_bfd = stub_sec->owner;
3489 /* This is the address of the start of the stub. */
3490 stub_addr = stub_sec->output_section->vma + stub_sec->output_offset
3491 + stub_entry->stub_offset;
3493 /* This is the address of the stub destination. */
3494 sym_value = (stub_entry->target_value
3495 + stub_entry->target_section->output_offset
3496 + stub_entry->target_section->output_section->vma);
3498 template_sequence = stub_entry->stub_template;
3499 template_size = stub_entry->stub_template_size;
3502 for (i = 0; i < template_size; i++)
3504 switch (template_sequence[i].type)
3508 bfd_vma data = (bfd_vma) template_sequence[i].data;
3509 if (template_sequence[i].reloc_addend != 0)
3511 /* We've borrowed the reloc_addend field to mean we should
3512 insert a condition code into this (Thumb-1 branch)
3513 instruction. See THUMB16_BCOND_INSN. */
3514 BFD_ASSERT ((data & 0xff00) == 0xd000);
3515 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
3517 put_thumb_insn (globals, stub_bfd, data, loc + size);
3523 put_thumb_insn (globals, stub_bfd,
3524 (template_sequence[i].data >> 16) & 0xffff,
3526 put_thumb_insn (globals, stub_bfd, template_sequence[i].data & 0xffff,
3528 if (template_sequence[i].r_type != R_ARM_NONE)
3530 stub_reloc_idx[nrelocs] = i;
3531 stub_reloc_offset[nrelocs++] = size;
3537 put_arm_insn (globals, stub_bfd, template_sequence[i].data,
3539 /* Handle cases where the target is encoded within the
3541 if (template_sequence[i].r_type == R_ARM_JUMP24)
3543 stub_reloc_idx[nrelocs] = i;
3544 stub_reloc_offset[nrelocs++] = size;
3550 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
3551 stub_reloc_idx[nrelocs] = i;
3552 stub_reloc_offset[nrelocs++] = size;
3562 stub_sec->size += size;
3564 /* Stub size has already been computed in arm_size_one_stub. Check
3566 BFD_ASSERT (size == stub_entry->stub_size);
3568 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
3569 if (stub_entry->st_type == STT_ARM_TFUNC)
3572 /* Assume there is at least one and at most MAXRELOCS entries to relocate
3574 BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
3576 for (i = 0; i < nrelocs; i++)
3577 if (template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP24
3578 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_JUMP19
3579 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_CALL
3580 || template_sequence[stub_reloc_idx[i]].r_type == R_ARM_THM_XPC22)
3582 Elf_Internal_Rela rel;
3583 bfd_boolean unresolved_reloc;
3584 char *error_message;
3586 = (template_sequence[stub_reloc_idx[i]].r_type != R_ARM_THM_XPC22)
3587 ? STT_ARM_TFUNC : 0;
3588 bfd_vma points_to = sym_value + stub_entry->target_addend;
3590 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
3591 rel.r_info = ELF32_R_INFO (0,
3592 template_sequence[stub_reloc_idx[i]].r_type);
3593 rel.r_addend = template_sequence[stub_reloc_idx[i]].reloc_addend;
3595 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
3596 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
3597 template should refer back to the instruction after the original
3599 points_to = sym_value;
3601 /* There may be unintended consequences if this is not true. */
3602 BFD_ASSERT (stub_entry->h == NULL);
3604 /* Note: _bfd_final_link_relocate doesn't handle these relocations
3605 properly. We should probably use this function unconditionally,
3606 rather than only for certain relocations listed in the enclosing
3607 conditional, for the sake of consistency. */
3608 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
3609 (template_sequence[stub_reloc_idx[i]].r_type),
3610 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
3611 points_to, info, stub_entry->target_section, "", sym_flags,
3612 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
3617 _bfd_final_link_relocate (elf32_arm_howto_from_type
3618 (template_sequence[stub_reloc_idx[i]].r_type), stub_bfd, stub_sec,
3619 stub_sec->contents, stub_entry->stub_offset + stub_reloc_offset[i],
3620 sym_value + stub_entry->target_addend,
3621 template_sequence[stub_reloc_idx[i]].reloc_addend);
3628 /* Calculate the template, template size and instruction size for a stub.
3629 Return value is the instruction size. */
3632 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
3633 const insn_sequence **stub_template,
3634 int *stub_template_size)
3636 const insn_sequence *template_sequence = NULL;
3637 int template_size = 0, i;
3640 template_sequence = stub_definitions[stub_type].template_sequence;
3641 template_size = stub_definitions[stub_type].template_size;
3644 for (i = 0; i < template_size; i++)
3646 switch (template_sequence[i].type)
3665 *stub_template = template_sequence;
3667 if (stub_template_size)
3668 *stub_template_size = template_size;
3673 /* As above, but don't actually build the stub. Just bump offset so
3674 we know stub section sizes. */
3677 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
3680 struct elf32_arm_stub_hash_entry *stub_entry;
3681 struct elf32_arm_link_hash_table *htab;
3682 const insn_sequence *template_sequence;
3683 int template_size, size;
3685 /* Massage our args to the form they really have. */
3686 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
3687 htab = (struct elf32_arm_link_hash_table *) in_arg;
3689 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
3690 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
3692 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
3695 stub_entry->stub_size = size;
3696 stub_entry->stub_template = template_sequence;
3697 stub_entry->stub_template_size = template_size;
3699 size = (size + 7) & ~7;
3700 stub_entry->stub_sec->size += size;
3705 /* External entry points for sizing and building linker stubs. */
3707 /* Set up various things so that we can make a list of input sections
3708 for each output section included in the link. Returns -1 on error,
3709 0 when no stubs will be needed, and 1 on success. */
3712 elf32_arm_setup_section_lists (bfd *output_bfd,
3713 struct bfd_link_info *info)
3716 unsigned int bfd_count;
3717 int top_id, top_index;
3719 asection **input_list, **list;
3721 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3725 if (! is_elf_hash_table (htab))
3728 /* Count the number of input BFDs and find the top input section id. */
3729 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
3731 input_bfd = input_bfd->link_next)
3734 for (section = input_bfd->sections;
3736 section = section->next)
3738 if (top_id < section->id)
3739 top_id = section->id;
3742 htab->bfd_count = bfd_count;
3744 amt = sizeof (struct map_stub) * (top_id + 1);
3745 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
3746 if (htab->stub_group == NULL)
3749 /* We can't use output_bfd->section_count here to find the top output
3750 section index as some sections may have been removed, and
3751 _bfd_strip_section_from_output doesn't renumber the indices. */
3752 for (section = output_bfd->sections, top_index = 0;
3754 section = section->next)
3756 if (top_index < section->index)
3757 top_index = section->index;
3760 htab->top_index = top_index;
3761 amt = sizeof (asection *) * (top_index + 1);
3762 input_list = (asection **) bfd_malloc (amt);
3763 htab->input_list = input_list;
3764 if (input_list == NULL)
3767 /* For sections we aren't interested in, mark their entries with a
3768 value we can check later. */
3769 list = input_list + top_index;
3771 *list = bfd_abs_section_ptr;
3772 while (list-- != input_list);
3774 for (section = output_bfd->sections;
3776 section = section->next)
3778 if ((section->flags & SEC_CODE) != 0)
3779 input_list[section->index] = NULL;
3785 /* The linker repeatedly calls this function for each input section,
3786 in the order that input sections are linked into output sections.
3787 Build lists of input sections to determine groupings between which
3788 we may insert linker stubs. */
3791 elf32_arm_next_input_section (struct bfd_link_info *info,
3794 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3799 if (isec->output_section->index <= htab->top_index)
3801 asection **list = htab->input_list + isec->output_section->index;
3803 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
3805 /* Steal the link_sec pointer for our list. */
3806 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
3807 /* This happens to make the list in reverse order,
3808 which we reverse later. */
3809 PREV_SEC (isec) = *list;
3815 /* See whether we can group stub sections together. Grouping stub
3816 sections may result in fewer stubs. More importantly, we need to
3817 put all .init* and .fini* stubs at the end of the .init or
3818 .fini output sections respectively, because glibc splits the
3819 _init and _fini functions into multiple parts. Putting a stub in
3820 the middle of a function is not a good idea. */
3823 group_sections (struct elf32_arm_link_hash_table *htab,
3824 bfd_size_type stub_group_size,
3825 bfd_boolean stubs_always_after_branch)
3827 asection **list = htab->input_list;
3831 asection *tail = *list;
3834 if (tail == bfd_abs_section_ptr)
3837 /* Reverse the list: we must avoid placing stubs at the
3838 beginning of the section because the beginning of the text
3839 section may be required for an interrupt vector in bare metal
3841 #define NEXT_SEC PREV_SEC
3843 while (tail != NULL)
3845 /* Pop from tail. */
3846 asection *item = tail;
3847 tail = PREV_SEC (item);
3850 NEXT_SEC (item) = head;
3854 while (head != NULL)
3858 bfd_vma stub_group_start = head->output_offset;
3859 bfd_vma end_of_next;
3862 while (NEXT_SEC (curr) != NULL)
3864 next = NEXT_SEC (curr);
3865 end_of_next = next->output_offset + next->size;
3866 if (end_of_next - stub_group_start >= stub_group_size)
3867 /* End of NEXT is too far from start, so stop. */
3869 /* Add NEXT to the group. */
3873 /* OK, the size from the start to the start of CURR is less
3874 than stub_group_size and thus can be handled by one stub
3875 section. (Or the head section is itself larger than
3876 stub_group_size, in which case we may be toast.)
3877 We should really be keeping track of the total size of
3878 stubs added here, as stubs contribute to the final output
3882 next = NEXT_SEC (head);
3883 /* Set up this stub group. */
3884 htab->stub_group[head->id].link_sec = curr;
3886 while (head != curr && (head = next) != NULL);
3888 /* But wait, there's more! Input sections up to stub_group_size
3889 bytes after the stub section can be handled by it too. */
3890 if (!stubs_always_after_branch)
3892 stub_group_start = curr->output_offset + curr->size;
3894 while (next != NULL)
3896 end_of_next = next->output_offset + next->size;
3897 if (end_of_next - stub_group_start >= stub_group_size)
3898 /* End of NEXT is too far from stubs, so stop. */
3900 /* Add NEXT to the stub group. */
3902 next = NEXT_SEC (head);
3903 htab->stub_group[head->id].link_sec = curr;
3909 while (list++ != htab->input_list + htab->top_index);
3911 free (htab->input_list);
3916 /* Comparison function for sorting/searching relocations relating to Cortex-A8
3920 a8_reloc_compare (const void *a, const void *b)
3922 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
3923 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
3925 if (ra->from < rb->from)
3927 else if (ra->from > rb->from)
3933 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
3934 const char *, char **);
3936 /* Helper function to scan code for sequences which might trigger the Cortex-A8
3937 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
3938 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
3942 cortex_a8_erratum_scan (bfd *input_bfd,
3943 struct bfd_link_info *info,
3944 struct a8_erratum_fix **a8_fixes_p,
3945 unsigned int *num_a8_fixes_p,
3946 unsigned int *a8_fix_table_size_p,
3947 struct a8_erratum_reloc *a8_relocs,
3948 unsigned int num_a8_relocs,
3949 unsigned prev_num_a8_fixes,
3950 bfd_boolean *stub_changed_p)
3953 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
3954 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
3955 unsigned int num_a8_fixes = *num_a8_fixes_p;
3956 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
3961 for (section = input_bfd->sections;
3963 section = section->next)
3965 bfd_byte *contents = NULL;
3966 struct _arm_elf_section_data *sec_data;
3970 if (elf_section_type (section) != SHT_PROGBITS
3971 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
3972 || (section->flags & SEC_EXCLUDE) != 0
3973 || (section->sec_info_type == ELF_INFO_TYPE_JUST_SYMS)
3974 || (section->output_section == bfd_abs_section_ptr))
3977 base_vma = section->output_section->vma + section->output_offset;
3979 if (elf_section_data (section)->this_hdr.contents != NULL)
3980 contents = elf_section_data (section)->this_hdr.contents;
3981 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
3984 sec_data = elf32_arm_section_data (section);
3986 for (span = 0; span < sec_data->mapcount; span++)
3988 unsigned int span_start = sec_data->map[span].vma;
3989 unsigned int span_end = (span == sec_data->mapcount - 1)
3990 ? section->size : sec_data->map[span + 1].vma;
3992 char span_type = sec_data->map[span].type;
3993 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
3995 if (span_type != 't')
3998 /* Span is entirely within a single 4KB region: skip scanning. */
3999 if (((base_vma + span_start) & ~0xfff)
4000 == ((base_vma + span_end) & ~0xfff))
4003 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
4005 * The opcode is BLX.W, BL.W, B.W, Bcc.W
4006 * The branch target is in the same 4KB region as the
4007 first half of the branch.
4008 * The instruction before the branch is a 32-bit
4009 length non-branch instruction. */
4010 for (i = span_start; i < span_end;)
4012 unsigned int insn = bfd_getl16 (&contents[i]);
4013 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
4014 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
4016 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
4021 /* Load the rest of the insn (in manual-friendly order). */
4022 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
4024 /* Encoding T4: B<c>.W. */
4025 is_b = (insn & 0xf800d000) == 0xf0009000;
4026 /* Encoding T1: BL<c>.W. */
4027 is_bl = (insn & 0xf800d000) == 0xf000d000;
4028 /* Encoding T2: BLX<c>.W. */
4029 is_blx = (insn & 0xf800d000) == 0xf000c000;
4030 /* Encoding T3: B<c>.W (not permitted in IT block). */
4031 is_bcc = (insn & 0xf800d000) == 0xf0008000
4032 && (insn & 0x07f00000) != 0x03800000;
4035 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
4037 if (((base_vma + i) & 0xfff) == 0xffe
4041 && ! last_was_branch)
4043 bfd_signed_vma offset;
4044 bfd_boolean force_target_arm = FALSE;
4045 bfd_boolean force_target_thumb = FALSE;
4047 enum elf32_arm_stub_type stub_type = arm_stub_none;
4048 struct a8_erratum_reloc key, *found;
4050 key.from = base_vma + i;
4051 found = (struct a8_erratum_reloc *)
4052 bsearch (&key, a8_relocs, num_a8_relocs,
4053 sizeof (struct a8_erratum_reloc),
4058 char *error_message = NULL;
4059 struct elf_link_hash_entry *entry;
4061 /* We don't care about the error returned from this
4062 function, only if there is glue or not. */
4063 entry = find_thumb_glue (info, found->sym_name,
4067 found->non_a8_stub = TRUE;
4069 if (found->r_type == R_ARM_THM_CALL
4070 && found->st_type != STT_ARM_TFUNC)
4071 force_target_arm = TRUE;
4072 else if (found->r_type == R_ARM_THM_CALL
4073 && found->st_type == STT_ARM_TFUNC)
4074 force_target_thumb = TRUE;
4077 /* Check if we have an offending branch instruction. */
4079 if (found && found->non_a8_stub)
4080 /* We've already made a stub for this instruction, e.g.
4081 it's a long branch or a Thumb->ARM stub. Assume that
4082 stub will suffice to work around the A8 erratum (see
4083 setting of always_after_branch above). */
4087 offset = (insn & 0x7ff) << 1;
4088 offset |= (insn & 0x3f0000) >> 4;
4089 offset |= (insn & 0x2000) ? 0x40000 : 0;
4090 offset |= (insn & 0x800) ? 0x80000 : 0;
4091 offset |= (insn & 0x4000000) ? 0x100000 : 0;
4092 if (offset & 0x100000)
4093 offset |= ~ ((bfd_signed_vma) 0xfffff);
4094 stub_type = arm_stub_a8_veneer_b_cond;
4096 else if (is_b || is_bl || is_blx)
4098 int s = (insn & 0x4000000) != 0;
4099 int j1 = (insn & 0x2000) != 0;
4100 int j2 = (insn & 0x800) != 0;
4104 offset = (insn & 0x7ff) << 1;
4105 offset |= (insn & 0x3ff0000) >> 4;
4109 if (offset & 0x1000000)
4110 offset |= ~ ((bfd_signed_vma) 0xffffff);
4113 offset &= ~ ((bfd_signed_vma) 3);
4115 stub_type = is_blx ? arm_stub_a8_veneer_blx :
4116 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
4119 if (stub_type != arm_stub_none)
4121 bfd_vma pc_for_insn = base_vma + i + 4;
4123 /* The original instruction is a BL, but the target is
4124 an ARM instruction. If we were not making a stub,
4125 the BL would have been converted to a BLX. Use the
4126 BLX stub instead in that case. */
4127 if (htab->use_blx && force_target_arm
4128 && stub_type == arm_stub_a8_veneer_bl)
4130 stub_type = arm_stub_a8_veneer_blx;
4134 /* Conversely, if the original instruction was
4135 BLX but the target is Thumb mode, use the BL
4137 else if (force_target_thumb
4138 && stub_type == arm_stub_a8_veneer_blx)
4140 stub_type = arm_stub_a8_veneer_bl;
4146 pc_for_insn &= ~ ((bfd_vma) 3);
4148 /* If we found a relocation, use the proper destination,
4149 not the offset in the (unrelocated) instruction.
4150 Note this is always done if we switched the stub type
4154 (bfd_signed_vma) (found->destination - pc_for_insn);
4156 target = pc_for_insn + offset;
4158 /* The BLX stub is ARM-mode code. Adjust the offset to
4159 take the different PC value (+8 instead of +4) into
4161 if (stub_type == arm_stub_a8_veneer_blx)
4164 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
4166 char *stub_name = NULL;
4168 if (num_a8_fixes == a8_fix_table_size)
4170 a8_fix_table_size *= 2;
4171 a8_fixes = (struct a8_erratum_fix *)
4172 bfd_realloc (a8_fixes,
4173 sizeof (struct a8_erratum_fix)
4174 * a8_fix_table_size);
4177 if (num_a8_fixes < prev_num_a8_fixes)
4179 /* If we're doing a subsequent scan,
4180 check if we've found the same fix as
4181 before, and try and reuse the stub
4183 stub_name = a8_fixes[num_a8_fixes].stub_name;
4184 if ((a8_fixes[num_a8_fixes].section != section)
4185 || (a8_fixes[num_a8_fixes].offset != i))
4189 *stub_changed_p = TRUE;
4195 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
4196 if (stub_name != NULL)
4197 sprintf (stub_name, "%x:%x", section->id, i);
4200 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
4201 a8_fixes[num_a8_fixes].section = section;
4202 a8_fixes[num_a8_fixes].offset = i;
4203 a8_fixes[num_a8_fixes].addend = offset;
4204 a8_fixes[num_a8_fixes].orig_insn = insn;
4205 a8_fixes[num_a8_fixes].stub_name = stub_name;
4206 a8_fixes[num_a8_fixes].stub_type = stub_type;
4213 i += insn_32bit ? 4 : 2;
4214 last_was_32bit = insn_32bit;
4215 last_was_branch = is_32bit_branch;
4219 if (elf_section_data (section)->this_hdr.contents == NULL)
4223 *a8_fixes_p = a8_fixes;
4224 *num_a8_fixes_p = num_a8_fixes;
4225 *a8_fix_table_size_p = a8_fix_table_size;
4230 /* Determine and set the size of the stub section for a final link.
4232 The basic idea here is to examine all the relocations looking for
4233 PC-relative calls to a target that is unreachable with a "bl"
4237 elf32_arm_size_stubs (bfd *output_bfd,
4239 struct bfd_link_info *info,
4240 bfd_signed_vma group_size,
4241 asection * (*add_stub_section) (const char *, asection *),
4242 void (*layout_sections_again) (void))
4244 bfd_size_type stub_group_size;
4245 bfd_boolean stubs_always_after_branch;
4246 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4247 struct a8_erratum_fix *a8_fixes = NULL;
4248 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
4249 struct a8_erratum_reloc *a8_relocs = NULL;
4250 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
4255 if (htab->fix_cortex_a8)
4257 a8_fixes = (struct a8_erratum_fix *)
4258 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
4259 a8_relocs = (struct a8_erratum_reloc *)
4260 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
4263 /* Propagate mach to stub bfd, because it may not have been
4264 finalized when we created stub_bfd. */
4265 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
4266 bfd_get_mach (output_bfd));
4268 /* Stash our params away. */
4269 htab->stub_bfd = stub_bfd;
4270 htab->add_stub_section = add_stub_section;
4271 htab->layout_sections_again = layout_sections_again;
4272 stubs_always_after_branch = group_size < 0;
4274 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
4275 as the first half of a 32-bit branch straddling two 4K pages. This is a
4276 crude way of enforcing that. */
4277 if (htab->fix_cortex_a8)
4278 stubs_always_after_branch = 1;
4281 stub_group_size = -group_size;
4283 stub_group_size = group_size;
4285 if (stub_group_size == 1)
4287 /* Default values. */
4288 /* Thumb branch range is +-4MB has to be used as the default
4289 maximum size (a given section can contain both ARM and Thumb
4290 code, so the worst case has to be taken into account).
4292 This value is 24K less than that, which allows for 2025
4293 12-byte stubs. If we exceed that, then we will fail to link.
4294 The user will have to relink with an explicit group size
4296 stub_group_size = 4170000;
4299 group_sections (htab, stub_group_size, stubs_always_after_branch);
4301 /* If we're applying the cortex A8 fix, we need to determine the
4302 program header size now, because we cannot change it later --
4303 that could alter section placements. Notice the A8 erratum fix
4304 ends up requiring the section addresses to remain unchanged
4305 modulo the page size. That's something we cannot represent
4306 inside BFD, and we don't want to force the section alignment to
4307 be the page size. */
4308 if (htab->fix_cortex_a8)
4309 (*htab->layout_sections_again) ();
4314 unsigned int bfd_indx;
4316 bfd_boolean stub_changed = FALSE;
4317 unsigned prev_num_a8_fixes = num_a8_fixes;
4320 for (input_bfd = info->input_bfds, bfd_indx = 0;
4322 input_bfd = input_bfd->link_next, bfd_indx++)
4324 Elf_Internal_Shdr *symtab_hdr;
4326 Elf_Internal_Sym *local_syms = NULL;
4330 /* We'll need the symbol table in a second. */
4331 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4332 if (symtab_hdr->sh_info == 0)
4335 /* Walk over each section attached to the input bfd. */
4336 for (section = input_bfd->sections;
4338 section = section->next)
4340 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
4342 /* If there aren't any relocs, then there's nothing more
4344 if ((section->flags & SEC_RELOC) == 0
4345 || section->reloc_count == 0
4346 || (section->flags & SEC_CODE) == 0)
4349 /* If this section is a link-once section that will be
4350 discarded, then don't create any stubs. */
4351 if (section->output_section == NULL
4352 || section->output_section->owner != output_bfd)
4355 /* Get the relocs. */
4357 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
4358 NULL, info->keep_memory);
4359 if (internal_relocs == NULL)
4360 goto error_ret_free_local;
4362 /* Now examine each relocation. */
4363 irela = internal_relocs;
4364 irelaend = irela + section->reloc_count;
4365 for (; irela < irelaend; irela++)
4367 unsigned int r_type, r_indx;
4368 enum elf32_arm_stub_type stub_type;
4369 struct elf32_arm_stub_hash_entry *stub_entry;
4372 bfd_vma destination;
4373 struct elf32_arm_link_hash_entry *hash;
4374 const char *sym_name;
4376 const asection *id_sec;
4377 unsigned char st_type;
4378 bfd_boolean created_stub = FALSE;
4380 r_type = ELF32_R_TYPE (irela->r_info);
4381 r_indx = ELF32_R_SYM (irela->r_info);
4383 if (r_type >= (unsigned int) R_ARM_max)
4385 bfd_set_error (bfd_error_bad_value);
4386 error_ret_free_internal:
4387 if (elf_section_data (section)->relocs == NULL)
4388 free (internal_relocs);
4389 goto error_ret_free_local;
4392 /* Only look for stubs on branch instructions. */
4393 if ((r_type != (unsigned int) R_ARM_CALL)
4394 && (r_type != (unsigned int) R_ARM_THM_CALL)
4395 && (r_type != (unsigned int) R_ARM_JUMP24)
4396 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
4397 && (r_type != (unsigned int) R_ARM_THM_XPC22)
4398 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
4399 && (r_type != (unsigned int) R_ARM_PLT32))
4402 /* Now determine the call target, its name, value,
4409 if (r_indx < symtab_hdr->sh_info)
4411 /* It's a local symbol. */
4412 Elf_Internal_Sym *sym;
4413 Elf_Internal_Shdr *hdr;
4415 if (local_syms == NULL)
4418 = (Elf_Internal_Sym *) symtab_hdr->contents;
4419 if (local_syms == NULL)
4421 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
4422 symtab_hdr->sh_info, 0,
4424 if (local_syms == NULL)
4425 goto error_ret_free_internal;
4428 sym = local_syms + r_indx;
4429 hdr = elf_elfsections (input_bfd)[sym->st_shndx];
4430 sym_sec = hdr->bfd_section;
4432 /* This is an undefined symbol. It can never
4436 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
4437 sym_value = sym->st_value;
4438 destination = (sym_value + irela->r_addend
4439 + sym_sec->output_offset
4440 + sym_sec->output_section->vma);
4441 st_type = ELF_ST_TYPE (sym->st_info);
4443 = bfd_elf_string_from_elf_section (input_bfd,
4444 symtab_hdr->sh_link,
4449 /* It's an external symbol. */
4452 e_indx = r_indx - symtab_hdr->sh_info;
4453 hash = ((struct elf32_arm_link_hash_entry *)
4454 elf_sym_hashes (input_bfd)[e_indx]);
4456 while (hash->root.root.type == bfd_link_hash_indirect
4457 || hash->root.root.type == bfd_link_hash_warning)
4458 hash = ((struct elf32_arm_link_hash_entry *)
4459 hash->root.root.u.i.link);
4461 if (hash->root.root.type == bfd_link_hash_defined
4462 || hash->root.root.type == bfd_link_hash_defweak)
4464 sym_sec = hash->root.root.u.def.section;
4465 sym_value = hash->root.root.u.def.value;
4467 struct elf32_arm_link_hash_table *globals =
4468 elf32_arm_hash_table (info);
4470 /* For a destination in a shared library,
4471 use the PLT stub as target address to
4472 decide whether a branch stub is
4475 && globals->splt != NULL
4477 && hash->root.plt.offset != (bfd_vma) -1)
4479 sym_sec = globals->splt;
4480 sym_value = hash->root.plt.offset;
4481 if (sym_sec->output_section != NULL)
4482 destination = (sym_value
4483 + sym_sec->output_offset
4484 + sym_sec->output_section->vma);
4486 else if (sym_sec->output_section != NULL)
4487 destination = (sym_value + irela->r_addend
4488 + sym_sec->output_offset
4489 + sym_sec->output_section->vma);
4491 else if ((hash->root.root.type == bfd_link_hash_undefined)
4492 || (hash->root.root.type == bfd_link_hash_undefweak))
4494 /* For a shared library, use the PLT stub as
4495 target address to decide whether a long
4496 branch stub is needed.
4497 For absolute code, they cannot be handled. */
4498 struct elf32_arm_link_hash_table *globals =
4499 elf32_arm_hash_table (info);
4502 && globals->splt != NULL
4504 && hash->root.plt.offset != (bfd_vma) -1)
4506 sym_sec = globals->splt;
4507 sym_value = hash->root.plt.offset;
4508 if (sym_sec->output_section != NULL)
4509 destination = (sym_value
4510 + sym_sec->output_offset
4511 + sym_sec->output_section->vma);
4518 bfd_set_error (bfd_error_bad_value);
4519 goto error_ret_free_internal;
4521 st_type = ELF_ST_TYPE (hash->root.type);
4522 sym_name = hash->root.root.root.string;
4527 /* Determine what (if any) linker stub is needed. */
4528 stub_type = arm_type_of_stub (info, section, irela,
4530 destination, sym_sec,
4531 input_bfd, sym_name);
4532 if (stub_type == arm_stub_none)
4535 /* Support for grouping stub sections. */
4536 id_sec = htab->stub_group[section->id].link_sec;
4538 /* Get the name of this stub. */
4539 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash,
4542 goto error_ret_free_internal;
4544 /* We've either created a stub for this reloc already,
4545 or we are about to. */
4546 created_stub = TRUE;
4548 stub_entry = arm_stub_hash_lookup
4549 (&htab->stub_hash_table, stub_name,
4551 if (stub_entry != NULL)
4553 /* The proper stub has already been created. */
4555 stub_entry->target_value = sym_value;
4559 stub_entry = elf32_arm_add_stub (stub_name, section,
4561 if (stub_entry == NULL)
4564 goto error_ret_free_internal;
4567 stub_entry->target_value = sym_value;
4568 stub_entry->target_section = sym_sec;
4569 stub_entry->stub_type = stub_type;
4570 stub_entry->h = hash;
4571 stub_entry->st_type = st_type;
4573 if (sym_name == NULL)
4574 sym_name = "unnamed";
4575 stub_entry->output_name = (char *)
4576 bfd_alloc (htab->stub_bfd,
4577 sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
4578 + strlen (sym_name));
4579 if (stub_entry->output_name == NULL)
4582 goto error_ret_free_internal;
4585 /* For historical reasons, use the existing names for
4586 ARM-to-Thumb and Thumb-to-ARM stubs. */
4587 if ( ((r_type == (unsigned int) R_ARM_THM_CALL)
4588 || (r_type == (unsigned int) R_ARM_THM_JUMP24))
4589 && st_type != STT_ARM_TFUNC)
4590 sprintf (stub_entry->output_name,
4591 THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
4592 else if ( ((r_type == (unsigned int) R_ARM_CALL)
4593 || (r_type == (unsigned int) R_ARM_JUMP24))
4594 && st_type == STT_ARM_TFUNC)
4595 sprintf (stub_entry->output_name,
4596 ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
4598 sprintf (stub_entry->output_name, STUB_ENTRY_NAME,
4601 stub_changed = TRUE;
4605 /* Look for relocations which might trigger Cortex-A8
4607 if (htab->fix_cortex_a8
4608 && (r_type == (unsigned int) R_ARM_THM_JUMP24
4609 || r_type == (unsigned int) R_ARM_THM_JUMP19
4610 || r_type == (unsigned int) R_ARM_THM_CALL
4611 || r_type == (unsigned int) R_ARM_THM_XPC22))
4613 bfd_vma from = section->output_section->vma
4614 + section->output_offset
4617 if ((from & 0xfff) == 0xffe)
4619 /* Found a candidate. Note we haven't checked the
4620 destination is within 4K here: if we do so (and
4621 don't create an entry in a8_relocs) we can't tell
4622 that a branch should have been relocated when
4624 if (num_a8_relocs == a8_reloc_table_size)
4626 a8_reloc_table_size *= 2;
4627 a8_relocs = (struct a8_erratum_reloc *)
4628 bfd_realloc (a8_relocs,
4629 sizeof (struct a8_erratum_reloc)
4630 * a8_reloc_table_size);
4633 a8_relocs[num_a8_relocs].from = from;
4634 a8_relocs[num_a8_relocs].destination = destination;
4635 a8_relocs[num_a8_relocs].r_type = r_type;
4636 a8_relocs[num_a8_relocs].st_type = st_type;
4637 a8_relocs[num_a8_relocs].sym_name = sym_name;
4638 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
4645 /* We're done with the internal relocs, free them. */
4646 if (elf_section_data (section)->relocs == NULL)
4647 free (internal_relocs);
4650 if (htab->fix_cortex_a8)
4652 /* Sort relocs which might apply to Cortex-A8 erratum. */
4653 qsort (a8_relocs, num_a8_relocs,
4654 sizeof (struct a8_erratum_reloc),
4657 /* Scan for branches which might trigger Cortex-A8 erratum. */
4658 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
4659 &num_a8_fixes, &a8_fix_table_size,
4660 a8_relocs, num_a8_relocs,
4661 prev_num_a8_fixes, &stub_changed)
4663 goto error_ret_free_local;
4667 if (prev_num_a8_fixes != num_a8_fixes)
4668 stub_changed = TRUE;
4673 /* OK, we've added some stubs. Find out the new size of the
4675 for (stub_sec = htab->stub_bfd->sections;
4677 stub_sec = stub_sec->next)
4679 /* Ignore non-stub sections. */
4680 if (!strstr (stub_sec->name, STUB_SUFFIX))
4686 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
4688 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
4689 if (htab->fix_cortex_a8)
4690 for (i = 0; i < num_a8_fixes; i++)
4692 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
4693 a8_fixes[i].section, htab);
4695 if (stub_sec == NULL)
4696 goto error_ret_free_local;
4699 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
4704 /* Ask the linker to do its stuff. */
4705 (*htab->layout_sections_again) ();
4708 /* Add stubs for Cortex-A8 erratum fixes now. */
4709 if (htab->fix_cortex_a8)
4711 for (i = 0; i < num_a8_fixes; i++)
4713 struct elf32_arm_stub_hash_entry *stub_entry;
4714 char *stub_name = a8_fixes[i].stub_name;
4715 asection *section = a8_fixes[i].section;
4716 unsigned int section_id = a8_fixes[i].section->id;
4717 asection *link_sec = htab->stub_group[section_id].link_sec;
4718 asection *stub_sec = htab->stub_group[section_id].stub_sec;
4719 const insn_sequence *template_sequence;
4720 int template_size, size = 0;
4722 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4724 if (stub_entry == NULL)
4726 (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4732 stub_entry->stub_sec = stub_sec;
4733 stub_entry->stub_offset = 0;
4734 stub_entry->id_sec = link_sec;
4735 stub_entry->stub_type = a8_fixes[i].stub_type;
4736 stub_entry->target_section = a8_fixes[i].section;
4737 stub_entry->target_value = a8_fixes[i].offset;
4738 stub_entry->target_addend = a8_fixes[i].addend;
4739 stub_entry->orig_insn = a8_fixes[i].orig_insn;
4740 stub_entry->st_type = STT_ARM_TFUNC;
4742 size = find_stub_size_and_template (a8_fixes[i].stub_type,
4746 stub_entry->stub_size = size;
4747 stub_entry->stub_template = template_sequence;
4748 stub_entry->stub_template_size = template_size;
4751 /* Stash the Cortex-A8 erratum fix array for use later in
4752 elf32_arm_write_section(). */
4753 htab->a8_erratum_fixes = a8_fixes;
4754 htab->num_a8_erratum_fixes = num_a8_fixes;
4758 htab->a8_erratum_fixes = NULL;
4759 htab->num_a8_erratum_fixes = 0;
4763 error_ret_free_local:
4767 /* Build all the stubs associated with the current output file. The
4768 stubs are kept in a hash table attached to the main linker hash
4769 table. We also set up the .plt entries for statically linked PIC
4770 functions here. This function is called via arm_elf_finish in the
4774 elf32_arm_build_stubs (struct bfd_link_info *info)
4777 struct bfd_hash_table *table;
4778 struct elf32_arm_link_hash_table *htab;
4780 htab = elf32_arm_hash_table (info);
4784 for (stub_sec = htab->stub_bfd->sections;
4786 stub_sec = stub_sec->next)
4790 /* Ignore non-stub sections. */
4791 if (!strstr (stub_sec->name, STUB_SUFFIX))
4794 /* Allocate memory to hold the linker stubs. */
4795 size = stub_sec->size;
4796 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
4797 if (stub_sec->contents == NULL && size != 0)
4802 /* Build the stubs as directed by the stub hash table. */
4803 table = &htab->stub_hash_table;
4804 bfd_hash_traverse (table, arm_build_one_stub, info);
4805 if (htab->fix_cortex_a8)
4807 /* Place the cortex a8 stubs last. */
4808 htab->fix_cortex_a8 = -1;
4809 bfd_hash_traverse (table, arm_build_one_stub, info);
4815 /* Locate the Thumb encoded calling stub for NAME. */
4817 static struct elf_link_hash_entry *
4818 find_thumb_glue (struct bfd_link_info *link_info,
4820 char **error_message)
4823 struct elf_link_hash_entry *hash;
4824 struct elf32_arm_link_hash_table *hash_table;
4826 /* We need a pointer to the armelf specific hash table. */
4827 hash_table = elf32_arm_hash_table (link_info);
4828 if (hash_table == NULL)
4831 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4832 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
4834 BFD_ASSERT (tmp_name);
4836 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
4838 hash = elf_link_hash_lookup
4839 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4842 && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
4843 tmp_name, name) == -1)
4844 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4851 /* Locate the ARM encoded calling stub for NAME. */
4853 static struct elf_link_hash_entry *
4854 find_arm_glue (struct bfd_link_info *link_info,
4856 char **error_message)
4859 struct elf_link_hash_entry *myh;
4860 struct elf32_arm_link_hash_table *hash_table;
4862 /* We need a pointer to the elfarm specific hash table. */
4863 hash_table = elf32_arm_hash_table (link_info);
4864 if (hash_table == NULL)
4867 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
4868 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
4870 BFD_ASSERT (tmp_name);
4872 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
4874 myh = elf_link_hash_lookup
4875 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
4878 && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
4879 tmp_name, name) == -1)
4880 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
4887 /* ARM->Thumb glue (static images):
4891 ldr r12, __func_addr
4894 .word func @ behave as if you saw a ARM_32 reloc.
4901 .word func @ behave as if you saw a ARM_32 reloc.
4903 (relocatable images)
4906 ldr r12, __func_offset
4912 #define ARM2THUMB_STATIC_GLUE_SIZE 12
4913 static const insn32 a2t1_ldr_insn = 0xe59fc000;
4914 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
4915 static const insn32 a2t3_func_addr_insn = 0x00000001;
4917 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
4918 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
4919 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
4921 #define ARM2THUMB_PIC_GLUE_SIZE 16
4922 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
4923 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
4924 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
4926 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
4930 __func_from_thumb: __func_from_thumb:
4932 nop ldr r6, __func_addr
4942 #define THUMB2ARM_GLUE_SIZE 8
4943 static const insn16 t2a1_bx_pc_insn = 0x4778;
4944 static const insn16 t2a2_noop_insn = 0x46c0;
4945 static const insn32 t2a3_b_insn = 0xea000000;
4947 #define VFP11_ERRATUM_VENEER_SIZE 8
4949 #define ARM_BX_VENEER_SIZE 12
4950 static const insn32 armbx1_tst_insn = 0xe3100001;
4951 static const insn32 armbx2_moveq_insn = 0x01a0f000;
4952 static const insn32 armbx3_bx_insn = 0xe12fff10;
4954 #ifndef ELFARM_NABI_C_INCLUDED
4956 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
4959 bfd_byte * contents;
4963 /* Do not include empty glue sections in the output. */
4966 s = bfd_get_section_by_name (abfd, name);
4968 s->flags |= SEC_EXCLUDE;
4973 BFD_ASSERT (abfd != NULL);
4975 s = bfd_get_section_by_name (abfd, name);
4976 BFD_ASSERT (s != NULL);
4978 contents = (bfd_byte *) bfd_alloc (abfd, size);
4980 BFD_ASSERT (s->size == size);
4981 s->contents = contents;
4985 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
4987 struct elf32_arm_link_hash_table * globals;
4989 globals = elf32_arm_hash_table (info);
4990 BFD_ASSERT (globals != NULL);
4992 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4993 globals->arm_glue_size,
4994 ARM2THUMB_GLUE_SECTION_NAME);
4996 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
4997 globals->thumb_glue_size,
4998 THUMB2ARM_GLUE_SECTION_NAME);
5000 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5001 globals->vfp11_erratum_glue_size,
5002 VFP11_ERRATUM_VENEER_SECTION_NAME);
5004 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
5005 globals->bx_glue_size,
5006 ARM_BX_GLUE_SECTION_NAME);
5011 /* Allocate space and symbols for calling a Thumb function from Arm mode.
5012 returns the symbol identifying the stub. */
5014 static struct elf_link_hash_entry *
5015 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
5016 struct elf_link_hash_entry * h)
5018 const char * name = h->root.root.string;
5021 struct elf_link_hash_entry * myh;
5022 struct bfd_link_hash_entry * bh;
5023 struct elf32_arm_link_hash_table * globals;
5027 globals = elf32_arm_hash_table (link_info);
5028 BFD_ASSERT (globals != NULL);
5029 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5031 s = bfd_get_section_by_name
5032 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
5034 BFD_ASSERT (s != NULL);
5036 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5037 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
5039 BFD_ASSERT (tmp_name);
5041 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
5043 myh = elf_link_hash_lookup
5044 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
5048 /* We've already seen this guy. */
5053 /* The only trick here is using hash_table->arm_glue_size as the value.
5054 Even though the section isn't allocated yet, this is where we will be
5055 putting it. The +1 on the value marks that the stub has not been
5056 output yet - not that it is a Thumb function. */
5058 val = globals->arm_glue_size + 1;
5059 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5060 tmp_name, BSF_GLOBAL, s, val,
5061 NULL, TRUE, FALSE, &bh);
5063 myh = (struct elf_link_hash_entry *) bh;
5064 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5065 myh->forced_local = 1;
5069 if (link_info->shared || globals->root.is_relocatable_executable
5070 || globals->pic_veneer)
5071 size = ARM2THUMB_PIC_GLUE_SIZE;
5072 else if (globals->use_blx)
5073 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
5075 size = ARM2THUMB_STATIC_GLUE_SIZE;
5078 globals->arm_glue_size += size;
5083 /* Allocate space for ARMv4 BX veneers. */
5086 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
5089 struct elf32_arm_link_hash_table *globals;
5091 struct elf_link_hash_entry *myh;
5092 struct bfd_link_hash_entry *bh;
5095 /* BX PC does not need a veneer. */
5099 globals = elf32_arm_hash_table (link_info);
5100 BFD_ASSERT (globals != NULL);
5101 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
5103 /* Check if this veneer has already been allocated. */
5104 if (globals->bx_glue_offset[reg])
5107 s = bfd_get_section_by_name
5108 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
5110 BFD_ASSERT (s != NULL);
5112 /* Add symbol for veneer. */
5114 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
5116 BFD_ASSERT (tmp_name);
5118 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
5120 myh = elf_link_hash_lookup
5121 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
5123 BFD_ASSERT (myh == NULL);
5126 val = globals->bx_glue_size;
5127 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
5128 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5129 NULL, TRUE, FALSE, &bh);
5131 myh = (struct elf_link_hash_entry *) bh;
5132 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5133 myh->forced_local = 1;
5135 s->size += ARM_BX_VENEER_SIZE;
5136 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
5137 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
5141 /* Add an entry to the code/data map for section SEC. */
5144 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
5146 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
5147 unsigned int newidx;
5149 if (sec_data->map == NULL)
5151 sec_data->map = (elf32_arm_section_map *)
5152 bfd_malloc (sizeof (elf32_arm_section_map));
5153 sec_data->mapcount = 0;
5154 sec_data->mapsize = 1;
5157 newidx = sec_data->mapcount++;
5159 if (sec_data->mapcount > sec_data->mapsize)
5161 sec_data->mapsize *= 2;
5162 sec_data->map = (elf32_arm_section_map *)
5163 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
5164 * sizeof (elf32_arm_section_map));
5169 sec_data->map[newidx].vma = vma;
5170 sec_data->map[newidx].type = type;
5175 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
5176 veneers are handled for now. */
5179 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
5180 elf32_vfp11_erratum_list *branch,
5182 asection *branch_sec,
5183 unsigned int offset)
5186 struct elf32_arm_link_hash_table *hash_table;
5188 struct elf_link_hash_entry *myh;
5189 struct bfd_link_hash_entry *bh;
5191 struct _arm_elf_section_data *sec_data;
5193 elf32_vfp11_erratum_list *newerr;
5195 hash_table = elf32_arm_hash_table (link_info);
5196 BFD_ASSERT (hash_table != NULL);
5197 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
5199 s = bfd_get_section_by_name
5200 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
5202 sec_data = elf32_arm_section_data (s);
5204 BFD_ASSERT (s != NULL);
5206 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
5207 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
5209 BFD_ASSERT (tmp_name);
5211 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
5212 hash_table->num_vfp11_fixes);
5214 myh = elf_link_hash_lookup
5215 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5217 BFD_ASSERT (myh == NULL);
5220 val = hash_table->vfp11_erratum_glue_size;
5221 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
5222 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
5223 NULL, TRUE, FALSE, &bh);
5225 myh = (struct elf_link_hash_entry *) bh;
5226 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5227 myh->forced_local = 1;
5229 /* Link veneer back to calling location. */
5230 errcount = ++(sec_data->erratumcount);
5231 newerr = (elf32_vfp11_erratum_list *)
5232 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
5234 newerr->type = VFP11_ERRATUM_ARM_VENEER;
5236 newerr->u.v.branch = branch;
5237 newerr->u.v.id = hash_table->num_vfp11_fixes;
5238 branch->u.b.veneer = newerr;
5240 newerr->next = sec_data->erratumlist;
5241 sec_data->erratumlist = newerr;
5243 /* A symbol for the return from the veneer. */
5244 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
5245 hash_table->num_vfp11_fixes);
5247 myh = elf_link_hash_lookup
5248 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
5255 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
5256 branch_sec, val, NULL, TRUE, FALSE, &bh);
5258 myh = (struct elf_link_hash_entry *) bh;
5259 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
5260 myh->forced_local = 1;
5264 /* Generate a mapping symbol for the veneer section, and explicitly add an
5265 entry for that symbol to the code/data map for the section. */
5266 if (hash_table->vfp11_erratum_glue_size == 0)
5269 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
5270 ever requires this erratum fix. */
5271 _bfd_generic_link_add_one_symbol (link_info,
5272 hash_table->bfd_of_glue_owner, "$a",
5273 BSF_LOCAL, s, 0, NULL,
5276 myh = (struct elf_link_hash_entry *) bh;
5277 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
5278 myh->forced_local = 1;
5280 /* The elf32_arm_init_maps function only cares about symbols from input
5281 BFDs. We must make a note of this generated mapping symbol
5282 ourselves so that code byteswapping works properly in
5283 elf32_arm_write_section. */
5284 elf32_arm_section_map_add (s, 'a', 0);
5287 s->size += VFP11_ERRATUM_VENEER_SIZE;
5288 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
5289 hash_table->num_vfp11_fixes++;
5291 /* The offset of the veneer. */
5295 #define ARM_GLUE_SECTION_FLAGS \
5296 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
5297 | SEC_READONLY | SEC_LINKER_CREATED)
5299 /* Create a fake section for use by the ARM backend of the linker. */
5302 arm_make_glue_section (bfd * abfd, const char * name)
5306 sec = bfd_get_section_by_name (abfd, name);
5311 sec = bfd_make_section_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
5314 || !bfd_set_section_alignment (abfd, sec, 2))
5317 /* Set the gc mark to prevent the section from being removed by garbage
5318 collection, despite the fact that no relocs refer to this section. */
5324 /* Add the glue sections to ABFD. This function is called from the
5325 linker scripts in ld/emultempl/{armelf}.em. */
5328 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
5329 struct bfd_link_info *info)
5331 /* If we are only performing a partial
5332 link do not bother adding the glue. */
5333 if (info->relocatable)
5336 return arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
5337 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
5338 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
5339 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
5342 /* Select a BFD to be used to hold the sections used by the glue code.
5343 This function is called from the linker scripts in ld/emultempl/
5347 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
5349 struct elf32_arm_link_hash_table *globals;
5351 /* If we are only performing a partial link
5352 do not bother getting a bfd to hold the glue. */
5353 if (info->relocatable)
5356 /* Make sure we don't attach the glue sections to a dynamic object. */
5357 BFD_ASSERT (!(abfd->flags & DYNAMIC));
5359 globals = elf32_arm_hash_table (info);
5360 BFD_ASSERT (globals != NULL);
5362 if (globals->bfd_of_glue_owner != NULL)
5365 /* Save the bfd for later use. */
5366 globals->bfd_of_glue_owner = abfd;
5372 check_use_blx (struct elf32_arm_link_hash_table *globals)
5374 if (bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
5376 globals->use_blx = 1;
5380 bfd_elf32_arm_process_before_allocation (bfd *abfd,
5381 struct bfd_link_info *link_info)
5383 Elf_Internal_Shdr *symtab_hdr;
5384 Elf_Internal_Rela *internal_relocs = NULL;
5385 Elf_Internal_Rela *irel, *irelend;
5386 bfd_byte *contents = NULL;
5389 struct elf32_arm_link_hash_table *globals;
5391 /* If we are only performing a partial link do not bother
5392 to construct any glue. */
5393 if (link_info->relocatable)
5396 /* Here we have a bfd that is to be included on the link. We have a
5397 hook to do reloc rummaging, before section sizes are nailed down. */
5398 globals = elf32_arm_hash_table (link_info);
5399 BFD_ASSERT (globals != NULL);
5401 check_use_blx (globals);
5403 if (globals->byteswap_code && !bfd_big_endian (abfd))
5405 _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
5410 /* PR 5398: If we have not decided to include any loadable sections in
5411 the output then we will not have a glue owner bfd. This is OK, it
5412 just means that there is nothing else for us to do here. */
5413 if (globals->bfd_of_glue_owner == NULL)
5416 /* Rummage around all the relocs and map the glue vectors. */
5417 sec = abfd->sections;
5422 for (; sec != NULL; sec = sec->next)
5424 if (sec->reloc_count == 0)
5427 if ((sec->flags & SEC_EXCLUDE) != 0)
5430 symtab_hdr = & elf_symtab_hdr (abfd);
5432 /* Load the relocs. */
5434 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
5436 if (internal_relocs == NULL)
5439 irelend = internal_relocs + sec->reloc_count;
5440 for (irel = internal_relocs; irel < irelend; irel++)
5443 unsigned long r_index;
5445 struct elf_link_hash_entry *h;
5447 r_type = ELF32_R_TYPE (irel->r_info);
5448 r_index = ELF32_R_SYM (irel->r_info);
5450 /* These are the only relocation types we care about. */
5451 if ( r_type != R_ARM_PC24
5452 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
5455 /* Get the section contents if we haven't done so already. */
5456 if (contents == NULL)
5458 /* Get cached copy if it exists. */
5459 if (elf_section_data (sec)->this_hdr.contents != NULL)
5460 contents = elf_section_data (sec)->this_hdr.contents;
5463 /* Go get them off disk. */
5464 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5469 if (r_type == R_ARM_V4BX)
5473 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
5474 record_arm_bx_glue (link_info, reg);
5478 /* If the relocation is not against a symbol it cannot concern us. */
5481 /* We don't care about local symbols. */
5482 if (r_index < symtab_hdr->sh_info)
5485 /* This is an external symbol. */
5486 r_index -= symtab_hdr->sh_info;
5487 h = (struct elf_link_hash_entry *)
5488 elf_sym_hashes (abfd)[r_index];
5490 /* If the relocation is against a static symbol it must be within
5491 the current section and so cannot be a cross ARM/Thumb relocation. */
5495 /* If the call will go through a PLT entry then we do not need
5497 if (globals->splt != NULL && h->plt.offset != (bfd_vma) -1)
5503 /* This one is a call from arm code. We need to look up
5504 the target of the call. If it is a thumb target, we
5506 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
5507 record_arm_to_thumb_glue (link_info, h);
5515 if (contents != NULL
5516 && elf_section_data (sec)->this_hdr.contents != contents)
5520 if (internal_relocs != NULL
5521 && elf_section_data (sec)->relocs != internal_relocs)
5522 free (internal_relocs);
5523 internal_relocs = NULL;
5529 if (contents != NULL
5530 && elf_section_data (sec)->this_hdr.contents != contents)
5532 if (internal_relocs != NULL
5533 && elf_section_data (sec)->relocs != internal_relocs)
5534 free (internal_relocs);
5541 /* Initialise maps of ARM/Thumb/data for input BFDs. */
5544 bfd_elf32_arm_init_maps (bfd *abfd)
5546 Elf_Internal_Sym *isymbuf;
5547 Elf_Internal_Shdr *hdr;
5548 unsigned int i, localsyms;
5550 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
5551 if (! is_arm_elf (abfd))
5554 if ((abfd->flags & DYNAMIC) != 0)
5557 hdr = & elf_symtab_hdr (abfd);
5558 localsyms = hdr->sh_info;
5560 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
5561 should contain the number of local symbols, which should come before any
5562 global symbols. Mapping symbols are always local. */
5563 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
5566 /* No internal symbols read? Skip this BFD. */
5567 if (isymbuf == NULL)
5570 for (i = 0; i < localsyms; i++)
5572 Elf_Internal_Sym *isym = &isymbuf[i];
5573 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
5577 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
5579 name = bfd_elf_string_from_elf_section (abfd,
5580 hdr->sh_link, isym->st_name);
5582 if (bfd_is_arm_special_symbol_name (name,
5583 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
5584 elf32_arm_section_map_add (sec, name[1], isym->st_value);
5590 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
5591 say what they wanted. */
5594 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
5596 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5597 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5599 if (globals == NULL)
5602 if (globals->fix_cortex_a8 == -1)
5604 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
5605 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
5606 && (out_attr[Tag_CPU_arch_profile].i == 'A'
5607 || out_attr[Tag_CPU_arch_profile].i == 0))
5608 globals->fix_cortex_a8 = 1;
5610 globals->fix_cortex_a8 = 0;
5616 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
5618 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5619 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
5621 if (globals == NULL)
5623 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
5624 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
5626 switch (globals->vfp11_fix)
5628 case BFD_ARM_VFP11_FIX_DEFAULT:
5629 case BFD_ARM_VFP11_FIX_NONE:
5630 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5634 /* Give a warning, but do as the user requests anyway. */
5635 (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
5636 "workaround is not necessary for target architecture"), obfd);
5639 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
5640 /* For earlier architectures, we might need the workaround, but do not
5641 enable it by default. If users is running with broken hardware, they
5642 must enable the erratum fix explicitly. */
5643 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
5647 enum bfd_arm_vfp11_pipe
5655 /* Return a VFP register number. This is encoded as RX:X for single-precision
5656 registers, or X:RX for double-precision registers, where RX is the group of
5657 four bits in the instruction encoding and X is the single extension bit.
5658 RX and X fields are specified using their lowest (starting) bit. The return
5661 0...31: single-precision registers s0...s31
5662 32...63: double-precision registers d0...d31.
5664 Although X should be zero for VFP11 (encoding d0...d15 only), we might
5665 encounter VFP3 instructions, so we allow the full range for DP registers. */
5668 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
5672 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
5674 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
5677 /* Set bits in *WMASK according to a register number REG as encoded by
5678 bfd_arm_vfp11_regno(). Ignore d16-d31. */
5681 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
5686 *wmask |= 3 << ((reg - 32) * 2);
5689 /* Return TRUE if WMASK overwrites anything in REGS. */
5692 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
5696 for (i = 0; i < numregs; i++)
5698 unsigned int reg = regs[i];
5700 if (reg < 32 && (wmask & (1 << reg)) != 0)
5708 if ((wmask & (3 << (reg * 2))) != 0)
5715 /* In this function, we're interested in two things: finding input registers
5716 for VFP data-processing instructions, and finding the set of registers which
5717 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
5718 hold the written set, so FLDM etc. are easy to deal with (we're only
5719 interested in 32 SP registers or 16 dp registers, due to the VFP version
5720 implemented by the chip in question). DP registers are marked by setting
5721 both SP registers in the write mask). */
5723 static enum bfd_arm_vfp11_pipe
5724 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
5727 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
5728 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
5730 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
5733 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5734 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5736 pqrs = ((insn & 0x00800000) >> 20)
5737 | ((insn & 0x00300000) >> 19)
5738 | ((insn & 0x00000040) >> 6);
5742 case 0: /* fmac[sd]. */
5743 case 1: /* fnmac[sd]. */
5744 case 2: /* fmsc[sd]. */
5745 case 3: /* fnmsc[sd]. */
5747 bfd_arm_vfp11_write_mask (destmask, fd);
5749 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5754 case 4: /* fmul[sd]. */
5755 case 5: /* fnmul[sd]. */
5756 case 6: /* fadd[sd]. */
5757 case 7: /* fsub[sd]. */
5761 case 8: /* fdiv[sd]. */
5764 bfd_arm_vfp11_write_mask (destmask, fd);
5765 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
5770 case 15: /* extended opcode. */
5772 unsigned int extn = ((insn >> 15) & 0x1e)
5773 | ((insn >> 7) & 1);
5777 case 0: /* fcpy[sd]. */
5778 case 1: /* fabs[sd]. */
5779 case 2: /* fneg[sd]. */
5780 case 8: /* fcmp[sd]. */
5781 case 9: /* fcmpe[sd]. */
5782 case 10: /* fcmpz[sd]. */
5783 case 11: /* fcmpez[sd]. */
5784 case 16: /* fuito[sd]. */
5785 case 17: /* fsito[sd]. */
5786 case 24: /* ftoui[sd]. */
5787 case 25: /* ftouiz[sd]. */
5788 case 26: /* ftosi[sd]. */
5789 case 27: /* ftosiz[sd]. */
5790 /* These instructions will not bounce due to underflow. */
5795 case 3: /* fsqrt[sd]. */
5796 /* fsqrt cannot underflow, but it can (perhaps) overwrite
5797 registers to cause the erratum in previous instructions. */
5798 bfd_arm_vfp11_write_mask (destmask, fd);
5802 case 15: /* fcvt{ds,sd}. */
5806 bfd_arm_vfp11_write_mask (destmask, fd);
5808 /* Only FCVTSD can underflow. */
5809 if ((insn & 0x100) != 0)
5828 /* Two-register transfer. */
5829 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
5831 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
5833 if ((insn & 0x100000) == 0)
5836 bfd_arm_vfp11_write_mask (destmask, fm);
5839 bfd_arm_vfp11_write_mask (destmask, fm);
5840 bfd_arm_vfp11_write_mask (destmask, fm + 1);
5846 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
5848 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
5849 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
5853 case 0: /* Two-reg transfer. We should catch these above. */
5856 case 2: /* fldm[sdx]. */
5860 unsigned int i, offset = insn & 0xff;
5865 for (i = fd; i < fd + offset; i++)
5866 bfd_arm_vfp11_write_mask (destmask, i);
5870 case 4: /* fld[sd]. */
5872 bfd_arm_vfp11_write_mask (destmask, fd);
5881 /* Single-register transfer. Note L==0. */
5882 else if ((insn & 0x0f100e10) == 0x0e000a10)
5884 unsigned int opcode = (insn >> 21) & 7;
5885 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
5889 case 0: /* fmsr/fmdlr. */
5890 case 1: /* fmdhr. */
5891 /* Mark fmdhr and fmdlr as writing to the whole of the DP
5892 destination register. I don't know if this is exactly right,
5893 but it is the conservative choice. */
5894 bfd_arm_vfp11_write_mask (destmask, fn);
5908 static int elf32_arm_compare_mapping (const void * a, const void * b);
5911 /* Look for potentially-troublesome code sequences which might trigger the
5912 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
5913 (available from ARM) for details of the erratum. A short version is
5914 described in ld.texinfo. */
5917 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
5920 bfd_byte *contents = NULL;
5922 int regs[3], numregs = 0;
5923 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
5924 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
5926 if (globals == NULL)
5929 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
5930 The states transition as follows:
5932 0 -> 1 (vector) or 0 -> 2 (scalar)
5933 A VFP FMAC-pipeline instruction has been seen. Fill
5934 regs[0]..regs[numregs-1] with its input operands. Remember this
5935 instruction in 'first_fmac'.
5938 Any instruction, except for a VFP instruction which overwrites
5943 A VFP instruction has been seen which overwrites any of regs[*].
5944 We must make a veneer! Reset state to 0 before examining next
5948 If we fail to match anything in state 2, reset to state 0 and reset
5949 the instruction pointer to the instruction after 'first_fmac'.
5951 If the VFP11 vector mode is in use, there must be at least two unrelated
5952 instructions between anti-dependent VFP11 instructions to properly avoid
5953 triggering the erratum, hence the use of the extra state 1. */
5955 /* If we are only performing a partial link do not bother
5956 to construct any glue. */
5957 if (link_info->relocatable)
5960 /* Skip if this bfd does not correspond to an ELF image. */
5961 if (! is_arm_elf (abfd))
5964 /* We should have chosen a fix type by the time we get here. */
5965 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
5967 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
5970 /* Skip this BFD if it corresponds to an executable or dynamic object. */
5971 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
5974 for (sec = abfd->sections; sec != NULL; sec = sec->next)
5976 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
5977 struct _arm_elf_section_data *sec_data;
5979 /* If we don't have executable progbits, we're not interested in this
5980 section. Also skip if section is to be excluded. */
5981 if (elf_section_type (sec) != SHT_PROGBITS
5982 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
5983 || (sec->flags & SEC_EXCLUDE) != 0
5984 || sec->sec_info_type == ELF_INFO_TYPE_JUST_SYMS
5985 || sec->output_section == bfd_abs_section_ptr
5986 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
5989 sec_data = elf32_arm_section_data (sec);
5991 if (sec_data->mapcount == 0)
5994 if (elf_section_data (sec)->this_hdr.contents != NULL)
5995 contents = elf_section_data (sec)->this_hdr.contents;
5996 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
5999 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
6000 elf32_arm_compare_mapping);
6002 for (span = 0; span < sec_data->mapcount; span++)
6004 unsigned int span_start = sec_data->map[span].vma;
6005 unsigned int span_end = (span == sec_data->mapcount - 1)
6006 ? sec->size : sec_data->map[span + 1].vma;
6007 char span_type = sec_data->map[span].type;
6009 /* FIXME: Only ARM mode is supported at present. We may need to
6010 support Thumb-2 mode also at some point. */
6011 if (span_type != 'a')
6014 for (i = span_start; i < span_end;)
6016 unsigned int next_i = i + 4;
6017 unsigned int insn = bfd_big_endian (abfd)
6018 ? (contents[i] << 24)
6019 | (contents[i + 1] << 16)
6020 | (contents[i + 2] << 8)
6022 : (contents[i + 3] << 24)
6023 | (contents[i + 2] << 16)
6024 | (contents[i + 1] << 8)
6026 unsigned int writemask = 0;
6027 enum bfd_arm_vfp11_pipe vpipe;
6032 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
6034 /* I'm assuming the VFP11 erratum can trigger with denorm
6035 operands on either the FMAC or the DS pipeline. This might
6036 lead to slightly overenthusiastic veneer insertion. */
6037 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
6039 state = use_vector ? 1 : 2;
6041 veneer_of_insn = insn;
6047 int other_regs[3], other_numregs;
6048 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6051 if (vpipe != VFP11_BAD
6052 && bfd_arm_vfp11_antidependency (writemask, regs,
6062 int other_regs[3], other_numregs;
6063 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
6066 if (vpipe != VFP11_BAD
6067 && bfd_arm_vfp11_antidependency (writemask, regs,
6073 next_i = first_fmac + 4;
6079 abort (); /* Should be unreachable. */
6084 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
6085 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6088 errcount = ++(elf32_arm_section_data (sec)->erratumcount);
6090 newerr->u.b.vfp_insn = veneer_of_insn;
6095 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
6102 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
6107 newerr->next = sec_data->erratumlist;
6108 sec_data->erratumlist = newerr;
6117 if (contents != NULL
6118 && elf_section_data (sec)->this_hdr.contents != contents)
6126 if (contents != NULL
6127 && elf_section_data (sec)->this_hdr.contents != contents)
6133 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
6134 after sections have been laid out, using specially-named symbols. */
6137 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
6138 struct bfd_link_info *link_info)
6141 struct elf32_arm_link_hash_table *globals;
6144 if (link_info->relocatable)
6147 /* Skip if this bfd does not correspond to an ELF image. */
6148 if (! is_arm_elf (abfd))
6151 globals = elf32_arm_hash_table (link_info);
6152 if (globals == NULL)
6155 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6156 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6158 for (sec = abfd->sections; sec != NULL; sec = sec->next)
6160 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6161 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
6163 for (; errnode != NULL; errnode = errnode->next)
6165 struct elf_link_hash_entry *myh;
6168 switch (errnode->type)
6170 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
6171 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
6172 /* Find veneer symbol. */
6173 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6174 errnode->u.b.veneer->u.v.id);
6176 myh = elf_link_hash_lookup
6177 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6180 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6181 "`%s'"), abfd, tmp_name);
6183 vma = myh->root.u.def.section->output_section->vma
6184 + myh->root.u.def.section->output_offset
6185 + myh->root.u.def.value;
6187 errnode->u.b.veneer->vma = vma;
6190 case VFP11_ERRATUM_ARM_VENEER:
6191 case VFP11_ERRATUM_THUMB_VENEER:
6192 /* Find return location. */
6193 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6196 myh = elf_link_hash_lookup
6197 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6200 (*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
6201 "`%s'"), abfd, tmp_name);
6203 vma = myh->root.u.def.section->output_section->vma
6204 + myh->root.u.def.section->output_offset
6205 + myh->root.u.def.value;
6207 errnode->u.v.branch->vma = vma;
6220 /* Set target relocation values needed during linking. */
6223 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
6224 struct bfd_link_info *link_info,
6226 char * target2_type,
6229 bfd_arm_vfp11_fix vfp11_fix,
6230 int no_enum_warn, int no_wchar_warn,
6231 int pic_veneer, int fix_cortex_a8)
6233 struct elf32_arm_link_hash_table *globals;
6235 globals = elf32_arm_hash_table (link_info);
6236 if (globals == NULL)
6239 globals->target1_is_rel = target1_is_rel;
6240 if (strcmp (target2_type, "rel") == 0)
6241 globals->target2_reloc = R_ARM_REL32;
6242 else if (strcmp (target2_type, "abs") == 0)
6243 globals->target2_reloc = R_ARM_ABS32;
6244 else if (strcmp (target2_type, "got-rel") == 0)
6245 globals->target2_reloc = R_ARM_GOT_PREL;
6248 _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
6251 globals->fix_v4bx = fix_v4bx;
6252 globals->use_blx |= use_blx;
6253 globals->vfp11_fix = vfp11_fix;
6254 globals->pic_veneer = pic_veneer;
6255 globals->fix_cortex_a8 = fix_cortex_a8;
6257 BFD_ASSERT (is_arm_elf (output_bfd));
6258 elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
6259 elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
6262 /* Replace the target offset of a Thumb bl or b.w instruction. */
6265 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
6271 BFD_ASSERT ((offset & 1) == 0);
6273 upper = bfd_get_16 (abfd, insn);
6274 lower = bfd_get_16 (abfd, insn + 2);
6275 reloc_sign = (offset < 0) ? 1 : 0;
6276 upper = (upper & ~(bfd_vma) 0x7ff)
6277 | ((offset >> 12) & 0x3ff)
6278 | (reloc_sign << 10);
6279 lower = (lower & ~(bfd_vma) 0x2fff)
6280 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
6281 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
6282 | ((offset >> 1) & 0x7ff);
6283 bfd_put_16 (abfd, upper, insn);
6284 bfd_put_16 (abfd, lower, insn + 2);
6287 /* Thumb code calling an ARM function. */
6290 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
6294 asection * input_section,
6295 bfd_byte * hit_data,
6298 bfd_signed_vma addend,
6300 char **error_message)
6304 long int ret_offset;
6305 struct elf_link_hash_entry * myh;
6306 struct elf32_arm_link_hash_table * globals;
6308 myh = find_thumb_glue (info, name, error_message);
6312 globals = elf32_arm_hash_table (info);
6313 BFD_ASSERT (globals != NULL);
6314 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6316 my_offset = myh->root.u.def.value;
6318 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6319 THUMB2ARM_GLUE_SECTION_NAME);
6321 BFD_ASSERT (s != NULL);
6322 BFD_ASSERT (s->contents != NULL);
6323 BFD_ASSERT (s->output_section != NULL);
6325 if ((my_offset & 0x01) == 0x01)
6328 && sym_sec->owner != NULL
6329 && !INTERWORK_FLAG (sym_sec->owner))
6331 (*_bfd_error_handler)
6332 (_("%B(%s): warning: interworking not enabled.\n"
6333 " first occurrence: %B: thumb call to arm"),
6334 sym_sec->owner, input_bfd, name);
6340 myh->root.u.def.value = my_offset;
6342 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
6343 s->contents + my_offset);
6345 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
6346 s->contents + my_offset + 2);
6349 /* Address of destination of the stub. */
6350 ((bfd_signed_vma) val)
6352 /* Offset from the start of the current section
6353 to the start of the stubs. */
6355 /* Offset of the start of this stub from the start of the stubs. */
6357 /* Address of the start of the current section. */
6358 + s->output_section->vma)
6359 /* The branch instruction is 4 bytes into the stub. */
6361 /* ARM branches work from the pc of the instruction + 8. */
6364 put_arm_insn (globals, output_bfd,
6365 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
6366 s->contents + my_offset + 4);
6369 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
6371 /* Now go back and fix up the original BL insn to point to here. */
6373 /* Address of where the stub is located. */
6374 (s->output_section->vma + s->output_offset + my_offset)
6375 /* Address of where the BL is located. */
6376 - (input_section->output_section->vma + input_section->output_offset
6378 /* Addend in the relocation. */
6380 /* Biassing for PC-relative addressing. */
6383 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
6388 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
6390 static struct elf_link_hash_entry *
6391 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
6398 char ** error_message)
6401 long int ret_offset;
6402 struct elf_link_hash_entry * myh;
6403 struct elf32_arm_link_hash_table * globals;
6405 myh = find_arm_glue (info, name, error_message);
6409 globals = elf32_arm_hash_table (info);
6410 BFD_ASSERT (globals != NULL);
6411 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6413 my_offset = myh->root.u.def.value;
6415 if ((my_offset & 0x01) == 0x01)
6418 && sym_sec->owner != NULL
6419 && !INTERWORK_FLAG (sym_sec->owner))
6421 (*_bfd_error_handler)
6422 (_("%B(%s): warning: interworking not enabled.\n"
6423 " first occurrence: %B: arm call to thumb"),
6424 sym_sec->owner, input_bfd, name);
6428 myh->root.u.def.value = my_offset;
6430 if (info->shared || globals->root.is_relocatable_executable
6431 || globals->pic_veneer)
6433 /* For relocatable objects we can't use absolute addresses,
6434 so construct the address from a relative offset. */
6435 /* TODO: If the offset is small it's probably worth
6436 constructing the address with adds. */
6437 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
6438 s->contents + my_offset);
6439 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
6440 s->contents + my_offset + 4);
6441 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
6442 s->contents + my_offset + 8);
6443 /* Adjust the offset by 4 for the position of the add,
6444 and 8 for the pipeline offset. */
6445 ret_offset = (val - (s->output_offset
6446 + s->output_section->vma
6449 bfd_put_32 (output_bfd, ret_offset,
6450 s->contents + my_offset + 12);
6452 else if (globals->use_blx)
6454 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
6455 s->contents + my_offset);
6457 /* It's a thumb address. Add the low order bit. */
6458 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
6459 s->contents + my_offset + 4);
6463 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
6464 s->contents + my_offset);
6466 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
6467 s->contents + my_offset + 4);
6469 /* It's a thumb address. Add the low order bit. */
6470 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
6471 s->contents + my_offset + 8);
6477 BFD_ASSERT (my_offset <= globals->arm_glue_size);
6482 /* Arm code calling a Thumb function. */
6485 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
6489 asection * input_section,
6490 bfd_byte * hit_data,
6493 bfd_signed_vma addend,
6495 char **error_message)
6497 unsigned long int tmp;
6500 long int ret_offset;
6501 struct elf_link_hash_entry * myh;
6502 struct elf32_arm_link_hash_table * globals;
6504 globals = elf32_arm_hash_table (info);
6505 BFD_ASSERT (globals != NULL);
6506 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6508 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6509 ARM2THUMB_GLUE_SECTION_NAME);
6510 BFD_ASSERT (s != NULL);
6511 BFD_ASSERT (s->contents != NULL);
6512 BFD_ASSERT (s->output_section != NULL);
6514 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
6515 sym_sec, val, s, error_message);
6519 my_offset = myh->root.u.def.value;
6520 tmp = bfd_get_32 (input_bfd, hit_data);
6521 tmp = tmp & 0xFF000000;
6523 /* Somehow these are both 4 too far, so subtract 8. */
6524 ret_offset = (s->output_offset
6526 + s->output_section->vma
6527 - (input_section->output_offset
6528 + input_section->output_section->vma
6532 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
6534 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
6539 /* Populate Arm stub for an exported Thumb function. */
6542 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
6544 struct bfd_link_info * info = (struct bfd_link_info *) inf;
6546 struct elf_link_hash_entry * myh;
6547 struct elf32_arm_link_hash_entry *eh;
6548 struct elf32_arm_link_hash_table * globals;
6551 char *error_message;
6553 eh = elf32_arm_hash_entry (h);
6554 /* Allocate stubs for exported Thumb functions on v4t. */
6555 if (eh->export_glue == NULL)
6558 globals = elf32_arm_hash_table (info);
6559 BFD_ASSERT (globals != NULL);
6560 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6562 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6563 ARM2THUMB_GLUE_SECTION_NAME);
6564 BFD_ASSERT (s != NULL);
6565 BFD_ASSERT (s->contents != NULL);
6566 BFD_ASSERT (s->output_section != NULL);
6568 sec = eh->export_glue->root.u.def.section;
6570 BFD_ASSERT (sec->output_section != NULL);
6572 val = eh->export_glue->root.u.def.value + sec->output_offset
6573 + sec->output_section->vma;
6575 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
6576 h->root.u.def.section->owner,
6577 globals->obfd, sec, val, s,
6583 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
6586 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
6591 struct elf32_arm_link_hash_table *globals;
6593 globals = elf32_arm_hash_table (info);
6594 BFD_ASSERT (globals != NULL);
6595 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6597 s = bfd_get_section_by_name (globals->bfd_of_glue_owner,
6598 ARM_BX_GLUE_SECTION_NAME);
6599 BFD_ASSERT (s != NULL);
6600 BFD_ASSERT (s->contents != NULL);
6601 BFD_ASSERT (s->output_section != NULL);
6603 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
6605 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
6607 if ((globals->bx_glue_offset[reg] & 1) == 0)
6609 p = s->contents + glue_addr;
6610 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
6611 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
6612 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
6613 globals->bx_glue_offset[reg] |= 1;
6616 return glue_addr + s->output_section->vma + s->output_offset;
6619 /* Generate Arm stubs for exported Thumb symbols. */
6621 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
6622 struct bfd_link_info *link_info)
6624 struct elf32_arm_link_hash_table * globals;
6626 if (link_info == NULL)
6627 /* Ignore this if we are not called by the ELF backend linker. */
6630 globals = elf32_arm_hash_table (link_info);
6631 if (globals == NULL)
6634 /* If blx is available then exported Thumb symbols are OK and there is
6636 if (globals->use_blx)
6639 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
6643 /* Some relocations map to different relocations depending on the
6644 target. Return the real relocation. */
6647 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
6653 if (globals->target1_is_rel)
6659 return globals->target2_reloc;
6666 /* Return the base VMA address which should be subtracted from real addresses
6667 when resolving @dtpoff relocation.
6668 This is PT_TLS segment p_vaddr. */
6671 dtpoff_base (struct bfd_link_info *info)
6673 /* If tls_sec is NULL, we should have signalled an error already. */
6674 if (elf_hash_table (info)->tls_sec == NULL)
6676 return elf_hash_table (info)->tls_sec->vma;
6679 /* Return the relocation value for @tpoff relocation
6680 if STT_TLS virtual address is ADDRESS. */
6683 tpoff (struct bfd_link_info *info, bfd_vma address)
6685 struct elf_link_hash_table *htab = elf_hash_table (info);
6688 /* If tls_sec is NULL, we should have signalled an error already. */
6689 if (htab->tls_sec == NULL)
6691 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
6692 return address - htab->tls_sec->vma + base;
6695 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
6696 VALUE is the relocation value. */
6698 static bfd_reloc_status_type
6699 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
6702 return bfd_reloc_overflow;
6704 value |= bfd_get_32 (abfd, data) & 0xfffff000;
6705 bfd_put_32 (abfd, value, data);
6706 return bfd_reloc_ok;
6709 /* For a given value of n, calculate the value of G_n as required to
6710 deal with group relocations. We return it in the form of an
6711 encoded constant-and-rotation, together with the final residual. If n is
6712 specified as less than zero, then final_residual is filled with the
6713 input value and no further action is performed. */
6716 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
6720 bfd_vma encoded_g_n = 0;
6721 bfd_vma residual = value; /* Also known as Y_n. */
6723 for (current_n = 0; current_n <= n; current_n++)
6727 /* Calculate which part of the value to mask. */
6734 /* Determine the most significant bit in the residual and
6735 align the resulting value to a 2-bit boundary. */
6736 for (msb = 30; msb >= 0; msb -= 2)
6737 if (residual & (3 << msb))
6740 /* The desired shift is now (msb - 6), or zero, whichever
6747 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
6748 g_n = residual & (0xff << shift);
6749 encoded_g_n = (g_n >> shift)
6750 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
6752 /* Calculate the residual for the next time around. */
6756 *final_residual = residual;
6761 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
6762 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
6765 identify_add_or_sub (bfd_vma insn)
6767 int opcode = insn & 0x1e00000;
6769 if (opcode == 1 << 23) /* ADD */
6772 if (opcode == 1 << 22) /* SUB */
6778 /* Perform a relocation as part of a final link. */
6780 static bfd_reloc_status_type
6781 elf32_arm_final_link_relocate (reloc_howto_type * howto,
6784 asection * input_section,
6785 bfd_byte * contents,
6786 Elf_Internal_Rela * rel,
6788 struct bfd_link_info * info,
6790 const char * sym_name,
6792 struct elf_link_hash_entry * h,
6793 bfd_boolean * unresolved_reloc_p,
6794 char ** error_message)
6796 unsigned long r_type = howto->type;
6797 unsigned long r_symndx;
6798 bfd_byte * hit_data = contents + rel->r_offset;
6799 bfd * dynobj = NULL;
6800 Elf_Internal_Shdr * symtab_hdr;
6801 struct elf_link_hash_entry ** sym_hashes;
6802 bfd_vma * local_got_offsets;
6803 asection * sgot = NULL;
6804 asection * splt = NULL;
6805 asection * sreloc = NULL;
6807 bfd_signed_vma signed_addend;
6808 struct elf32_arm_link_hash_table * globals;
6810 globals = elf32_arm_hash_table (info);
6811 if (globals == NULL)
6812 return bfd_reloc_notsupported;
6814 BFD_ASSERT (is_arm_elf (input_bfd));
6816 /* Some relocation types map to different relocations depending on the
6817 target. We pick the right one here. */
6818 r_type = arm_real_reloc_type (globals, r_type);
6819 if (r_type != howto->type)
6820 howto = elf32_arm_howto_from_type (r_type);
6822 /* If the start address has been set, then set the EF_ARM_HASENTRY
6823 flag. Setting this more than once is redundant, but the cost is
6824 not too high, and it keeps the code simple.
6826 The test is done here, rather than somewhere else, because the
6827 start address is only set just before the final link commences.
6829 Note - if the user deliberately sets a start address of 0, the
6830 flag will not be set. */
6831 if (bfd_get_start_address (output_bfd) != 0)
6832 elf_elfheader (output_bfd)->e_flags |= EF_ARM_HASENTRY;
6834 dynobj = elf_hash_table (info)->dynobj;
6837 sgot = bfd_get_section_by_name (dynobj, ".got");
6838 splt = bfd_get_section_by_name (dynobj, ".plt");
6840 symtab_hdr = & elf_symtab_hdr (input_bfd);
6841 sym_hashes = elf_sym_hashes (input_bfd);
6842 local_got_offsets = elf_local_got_offsets (input_bfd);
6843 r_symndx = ELF32_R_SYM (rel->r_info);
6845 if (globals->use_rel)
6847 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
6849 if (addend & ((howto->src_mask + 1) >> 1))
6852 signed_addend &= ~ howto->src_mask;
6853 signed_addend |= addend;
6856 signed_addend = addend;
6859 addend = signed_addend = rel->r_addend;
6864 /* We don't need to find a value for this symbol. It's just a
6866 *unresolved_reloc_p = FALSE;
6867 return bfd_reloc_ok;
6870 if (!globals->vxworks_p)
6871 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
6875 case R_ARM_ABS32_NOI:
6877 case R_ARM_REL32_NOI:
6883 /* Handle relocations which should use the PLT entry. ABS32/REL32
6884 will use the symbol's value, which may point to a PLT entry, but we
6885 don't need to handle that here. If we created a PLT entry, all
6886 branches in this object should go to it, except if the PLT is too
6887 far away, in which case a long branch stub should be inserted. */
6888 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
6889 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
6890 && r_type != R_ARM_CALL
6891 && r_type != R_ARM_JUMP24
6892 && r_type != R_ARM_PLT32)
6895 && h->plt.offset != (bfd_vma) -1)
6897 /* If we've created a .plt section, and assigned a PLT entry to
6898 this function, it should not be known to bind locally. If
6899 it were, we would have cleared the PLT entry. */
6900 BFD_ASSERT (!SYMBOL_CALLS_LOCAL (info, h));
6902 value = (splt->output_section->vma
6903 + splt->output_offset
6905 *unresolved_reloc_p = FALSE;
6906 return _bfd_final_link_relocate (howto, input_bfd, input_section,
6907 contents, rel->r_offset, value,
6911 /* When generating a shared object or relocatable executable, these
6912 relocations are copied into the output file to be resolved at
6914 if ((info->shared || globals->root.is_relocatable_executable)
6915 && (input_section->flags & SEC_ALLOC)
6916 && !(globals->vxworks_p
6917 && strcmp (input_section->output_section->name,
6919 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
6920 || !SYMBOL_CALLS_LOCAL (info, h))
6922 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
6923 || h->root.type != bfd_link_hash_undefweak)
6924 && r_type != R_ARM_PC24
6925 && r_type != R_ARM_CALL
6926 && r_type != R_ARM_JUMP24
6927 && r_type != R_ARM_PREL31
6928 && r_type != R_ARM_PLT32)
6930 Elf_Internal_Rela outrel;
6932 bfd_boolean skip, relocate;
6934 *unresolved_reloc_p = FALSE;
6938 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
6939 ! globals->use_rel);
6942 return bfd_reloc_notsupported;
6948 outrel.r_addend = addend;
6950 _bfd_elf_section_offset (output_bfd, info, input_section,
6952 if (outrel.r_offset == (bfd_vma) -1)
6954 else if (outrel.r_offset == (bfd_vma) -2)
6955 skip = TRUE, relocate = TRUE;
6956 outrel.r_offset += (input_section->output_section->vma
6957 + input_section->output_offset);
6960 memset (&outrel, 0, sizeof outrel);
6965 || !h->def_regular))
6966 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
6971 /* This symbol is local, or marked to become local. */
6972 if (sym_flags == STT_ARM_TFUNC)
6974 if (globals->symbian_p)
6978 /* On Symbian OS, the data segment and text segement
6979 can be relocated independently. Therefore, we
6980 must indicate the segment to which this
6981 relocation is relative. The BPABI allows us to
6982 use any symbol in the right segment; we just use
6983 the section symbol as it is convenient. (We
6984 cannot use the symbol given by "h" directly as it
6985 will not appear in the dynamic symbol table.)
6987 Note that the dynamic linker ignores the section
6988 symbol value, so we don't subtract osec->vma
6989 from the emitted reloc addend. */
6991 osec = sym_sec->output_section;
6993 osec = input_section->output_section;
6994 symbol = elf_section_data (osec)->dynindx;
6997 struct elf_link_hash_table *htab = elf_hash_table (info);
6999 if ((osec->flags & SEC_READONLY) == 0
7000 && htab->data_index_section != NULL)
7001 osec = htab->data_index_section;
7003 osec = htab->text_index_section;
7004 symbol = elf_section_data (osec)->dynindx;
7006 BFD_ASSERT (symbol != 0);
7009 /* On SVR4-ish systems, the dynamic loader cannot
7010 relocate the text and data segments independently,
7011 so the symbol does not matter. */
7013 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
7014 if (globals->use_rel)
7017 outrel.r_addend += value;
7020 loc = sreloc->contents;
7021 loc += sreloc->reloc_count++ * RELOC_SIZE (globals);
7022 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7024 /* If this reloc is against an external symbol, we do not want to
7025 fiddle with the addend. Otherwise, we need to include the symbol
7026 value so that it becomes an addend for the dynamic reloc. */
7028 return bfd_reloc_ok;
7030 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7031 contents, rel->r_offset, value,
7034 else switch (r_type)
7037 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
7039 case R_ARM_XPC25: /* Arm BLX instruction. */
7042 case R_ARM_PC24: /* Arm B/BL instruction. */
7045 bfd_signed_vma branch_offset;
7046 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7048 if (r_type == R_ARM_XPC25)
7050 /* Check for Arm calling Arm function. */
7051 /* FIXME: Should we translate the instruction into a BL
7052 instruction instead ? */
7053 if (sym_flags != STT_ARM_TFUNC)
7054 (*_bfd_error_handler)
7055 (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
7057 h ? h->root.root.string : "(local)");
7059 else if (r_type == R_ARM_PC24)
7061 /* Check for Arm calling Thumb function. */
7062 if (sym_flags == STT_ARM_TFUNC)
7064 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
7065 output_bfd, input_section,
7066 hit_data, sym_sec, rel->r_offset,
7067 signed_addend, value,
7069 return bfd_reloc_ok;
7071 return bfd_reloc_dangerous;
7075 /* Check if a stub has to be inserted because the
7076 destination is too far or we are changing mode. */
7077 if ( r_type == R_ARM_CALL
7078 || r_type == R_ARM_JUMP24
7079 || r_type == R_ARM_PLT32)
7083 /* If the call goes through a PLT entry, make sure to
7084 check distance to the right destination address. */
7085 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7087 value = (splt->output_section->vma
7088 + splt->output_offset
7090 *unresolved_reloc_p = FALSE;
7091 /* The PLT entry is in ARM mode, regardless of the
7093 sym_flags = STT_FUNC;
7096 from = (input_section->output_section->vma
7097 + input_section->output_offset
7099 branch_offset = (bfd_signed_vma)(value - from);
7101 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
7102 || branch_offset < ARM_MAX_BWD_BRANCH_OFFSET
7103 || ((sym_flags == STT_ARM_TFUNC)
7104 && (((r_type == R_ARM_CALL) && !globals->use_blx)
7105 || (r_type == R_ARM_JUMP24)
7106 || (r_type == R_ARM_PLT32) ))
7109 /* The target is out of reach, so redirect the
7110 branch to the local stub for this function. */
7112 stub_entry = elf32_arm_get_stub_entry (input_section,
7115 if (stub_entry != NULL)
7116 value = (stub_entry->stub_offset
7117 + stub_entry->stub_sec->output_offset
7118 + stub_entry->stub_sec->output_section->vma);
7122 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
7124 S is the address of the symbol in the relocation.
7125 P is address of the instruction being relocated.
7126 A is the addend (extracted from the instruction) in bytes.
7128 S is held in 'value'.
7129 P is the base address of the section containing the
7130 instruction plus the offset of the reloc into that
7132 (input_section->output_section->vma +
7133 input_section->output_offset +
7135 A is the addend, converted into bytes, ie:
7138 Note: None of these operations have knowledge of the pipeline
7139 size of the processor, thus it is up to the assembler to
7140 encode this information into the addend. */
7141 value -= (input_section->output_section->vma
7142 + input_section->output_offset);
7143 value -= rel->r_offset;
7144 if (globals->use_rel)
7145 value += (signed_addend << howto->size);
7147 /* RELA addends do not have to be adjusted by howto->size. */
7148 value += signed_addend;
7150 signed_addend = value;
7151 signed_addend >>= howto->rightshift;
7153 /* A branch to an undefined weak symbol is turned into a jump to
7154 the next instruction unless a PLT entry will be created.
7155 Do the same for local undefined symbols.
7156 The jump to the next instruction is optimized as a NOP depending
7157 on the architecture. */
7158 if (h ? (h->root.type == bfd_link_hash_undefweak
7159 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7160 : bfd_is_und_section (sym_sec))
7162 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
7164 if (arch_has_arm_nop (globals))
7165 value |= 0x0320f000;
7167 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
7171 /* Perform a signed range check. */
7172 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
7173 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
7174 return bfd_reloc_overflow;
7176 addend = (value & 2);
7178 value = (signed_addend & howto->dst_mask)
7179 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
7181 if (r_type == R_ARM_CALL)
7183 /* Set the H bit in the BLX instruction. */
7184 if (sym_flags == STT_ARM_TFUNC)
7189 value &= ~(bfd_vma)(1 << 24);
7192 /* Select the correct instruction (BL or BLX). */
7193 /* Only if we are not handling a BL to a stub. In this
7194 case, mode switching is performed by the stub. */
7195 if (sym_flags == STT_ARM_TFUNC && !stub_entry)
7199 value &= ~(bfd_vma)(1 << 28);
7209 if (sym_flags == STT_ARM_TFUNC)
7213 case R_ARM_ABS32_NOI:
7219 if (sym_flags == STT_ARM_TFUNC)
7221 value -= (input_section->output_section->vma
7222 + input_section->output_offset + rel->r_offset);
7225 case R_ARM_REL32_NOI:
7227 value -= (input_section->output_section->vma
7228 + input_section->output_offset + rel->r_offset);
7232 value -= (input_section->output_section->vma
7233 + input_section->output_offset + rel->r_offset);
7234 value += signed_addend;
7235 if (! h || h->root.type != bfd_link_hash_undefweak)
7237 /* Check for overflow. */
7238 if ((value ^ (value >> 1)) & (1 << 30))
7239 return bfd_reloc_overflow;
7241 value &= 0x7fffffff;
7242 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
7243 if (sym_flags == STT_ARM_TFUNC)
7248 bfd_put_32 (input_bfd, value, hit_data);
7249 return bfd_reloc_ok;
7254 /* There is no way to tell whether the user intended to use a signed or
7255 unsigned addend. When checking for overflow we accept either,
7256 as specified by the AAELF. */
7257 if ((long) value > 0xff || (long) value < -0x80)
7258 return bfd_reloc_overflow;
7260 bfd_put_8 (input_bfd, value, hit_data);
7261 return bfd_reloc_ok;
7266 /* See comment for R_ARM_ABS8. */
7267 if ((long) value > 0xffff || (long) value < -0x8000)
7268 return bfd_reloc_overflow;
7270 bfd_put_16 (input_bfd, value, hit_data);
7271 return bfd_reloc_ok;
7273 case R_ARM_THM_ABS5:
7274 /* Support ldr and str instructions for the thumb. */
7275 if (globals->use_rel)
7277 /* Need to refetch addend. */
7278 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7279 /* ??? Need to determine shift amount from operand size. */
7280 addend >>= howto->rightshift;
7284 /* ??? Isn't value unsigned? */
7285 if ((long) value > 0x1f || (long) value < -0x10)
7286 return bfd_reloc_overflow;
7288 /* ??? Value needs to be properly shifted into place first. */
7289 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
7290 bfd_put_16 (input_bfd, value, hit_data);
7291 return bfd_reloc_ok;
7293 case R_ARM_THM_ALU_PREL_11_0:
7294 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
7297 bfd_signed_vma relocation;
7299 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7300 | bfd_get_16 (input_bfd, hit_data + 2);
7302 if (globals->use_rel)
7304 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
7305 | ((insn & (1 << 26)) >> 15);
7306 if (insn & 0xf00000)
7307 signed_addend = -signed_addend;
7310 relocation = value + signed_addend;
7311 relocation -= (input_section->output_section->vma
7312 + input_section->output_offset
7315 value = abs (relocation);
7317 if (value >= 0x1000)
7318 return bfd_reloc_overflow;
7320 insn = (insn & 0xfb0f8f00) | (value & 0xff)
7321 | ((value & 0x700) << 4)
7322 | ((value & 0x800) << 15);
7326 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7327 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7329 return bfd_reloc_ok;
7333 /* PR 10073: This reloc is not generated by the GNU toolchain,
7334 but it is supported for compatibility with third party libraries
7335 generated by other compilers, specifically the ARM/IAR. */
7338 bfd_signed_vma relocation;
7340 insn = bfd_get_16 (input_bfd, hit_data);
7342 if (globals->use_rel)
7343 addend = (insn & 0x00ff) << 2;
7345 relocation = value + addend;
7346 relocation -= (input_section->output_section->vma
7347 + input_section->output_offset
7350 value = abs (relocation);
7352 /* We do not check for overflow of this reloc. Although strictly
7353 speaking this is incorrect, it appears to be necessary in order
7354 to work with IAR generated relocs. Since GCC and GAS do not
7355 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
7356 a problem for them. */
7359 insn = (insn & 0xff00) | (value >> 2);
7361 bfd_put_16 (input_bfd, insn, hit_data);
7363 return bfd_reloc_ok;
7366 case R_ARM_THM_PC12:
7367 /* Corresponds to: ldr.w reg, [pc, #offset]. */
7370 bfd_signed_vma relocation;
7372 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
7373 | bfd_get_16 (input_bfd, hit_data + 2);
7375 if (globals->use_rel)
7377 signed_addend = insn & 0xfff;
7378 if (!(insn & (1 << 23)))
7379 signed_addend = -signed_addend;
7382 relocation = value + signed_addend;
7383 relocation -= (input_section->output_section->vma
7384 + input_section->output_offset
7387 value = abs (relocation);
7389 if (value >= 0x1000)
7390 return bfd_reloc_overflow;
7392 insn = (insn & 0xff7ff000) | value;
7393 if (relocation >= 0)
7396 bfd_put_16 (input_bfd, insn >> 16, hit_data);
7397 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
7399 return bfd_reloc_ok;
7402 case R_ARM_THM_XPC22:
7403 case R_ARM_THM_CALL:
7404 case R_ARM_THM_JUMP24:
7405 /* Thumb BL (branch long instruction). */
7409 bfd_boolean overflow = FALSE;
7410 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7411 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7412 bfd_signed_vma reloc_signed_max;
7413 bfd_signed_vma reloc_signed_min;
7415 bfd_signed_vma signed_check;
7417 const int thumb2 = using_thumb2 (globals);
7419 /* A branch to an undefined weak symbol is turned into a jump to
7420 the next instruction unless a PLT entry will be created.
7421 The jump to the next instruction is optimized as a NOP.W for
7422 Thumb-2 enabled architectures. */
7423 if (h && h->root.type == bfd_link_hash_undefweak
7424 && !(splt != NULL && h->plt.offset != (bfd_vma) -1))
7426 if (arch_has_thumb2_nop (globals))
7428 bfd_put_16 (input_bfd, 0xf3af, hit_data);
7429 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
7433 bfd_put_16 (input_bfd, 0xe000, hit_data);
7434 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
7436 return bfd_reloc_ok;
7439 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
7440 with Thumb-1) involving the J1 and J2 bits. */
7441 if (globals->use_rel)
7443 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
7444 bfd_vma upper = upper_insn & 0x3ff;
7445 bfd_vma lower = lower_insn & 0x7ff;
7446 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
7447 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
7448 bfd_vma i1 = j1 ^ s ? 0 : 1;
7449 bfd_vma i2 = j2 ^ s ? 0 : 1;
7451 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
7453 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
7455 signed_addend = addend;
7458 if (r_type == R_ARM_THM_XPC22)
7460 /* Check for Thumb to Thumb call. */
7461 /* FIXME: Should we translate the instruction into a BL
7462 instruction instead ? */
7463 if (sym_flags == STT_ARM_TFUNC)
7464 (*_bfd_error_handler)
7465 (_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
7467 h ? h->root.root.string : "(local)");
7471 /* If it is not a call to Thumb, assume call to Arm.
7472 If it is a call relative to a section name, then it is not a
7473 function call at all, but rather a long jump. Calls through
7474 the PLT do not require stubs. */
7475 if (sym_flags != STT_ARM_TFUNC && sym_flags != STT_SECTION
7476 && (h == NULL || splt == NULL
7477 || h->plt.offset == (bfd_vma) -1))
7479 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7481 /* Convert BL to BLX. */
7482 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7484 else if (( r_type != R_ARM_THM_CALL)
7485 && (r_type != R_ARM_THM_JUMP24))
7487 if (elf32_thumb_to_arm_stub
7488 (info, sym_name, input_bfd, output_bfd, input_section,
7489 hit_data, sym_sec, rel->r_offset, signed_addend, value,
7491 return bfd_reloc_ok;
7493 return bfd_reloc_dangerous;
7496 else if (sym_flags == STT_ARM_TFUNC && globals->use_blx
7497 && r_type == R_ARM_THM_CALL)
7499 /* Make sure this is a BL. */
7500 lower_insn |= 0x1800;
7504 /* Handle calls via the PLT. */
7505 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7507 value = (splt->output_section->vma
7508 + splt->output_offset
7510 if (globals->use_blx && r_type == R_ARM_THM_CALL)
7512 /* If the Thumb BLX instruction is available, convert the
7513 BL to a BLX instruction to call the ARM-mode PLT entry. */
7514 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7515 sym_flags = STT_FUNC;
7519 /* Target the Thumb stub before the ARM PLT entry. */
7520 value -= PLT_THUMB_STUB_SIZE;
7521 sym_flags = STT_ARM_TFUNC;
7523 *unresolved_reloc_p = FALSE;
7526 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
7528 /* Check if a stub has to be inserted because the destination
7531 bfd_signed_vma branch_offset;
7532 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
7534 from = (input_section->output_section->vma
7535 + input_section->output_offset
7537 branch_offset = (bfd_signed_vma)(value - from);
7540 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
7541 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
7544 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
7545 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
7546 || ((sym_flags != STT_ARM_TFUNC)
7547 && (((r_type == R_ARM_THM_CALL) && !globals->use_blx)
7548 || r_type == R_ARM_THM_JUMP24)))
7550 /* The target is out of reach or we are changing modes, so
7551 redirect the branch to the local stub for this
7553 stub_entry = elf32_arm_get_stub_entry (input_section,
7556 if (stub_entry != NULL)
7557 value = (stub_entry->stub_offset
7558 + stub_entry->stub_sec->output_offset
7559 + stub_entry->stub_sec->output_section->vma);
7561 /* If this call becomes a call to Arm, force BLX. */
7562 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
7565 && !arm_stub_is_thumb (stub_entry->stub_type))
7566 || (sym_flags != STT_ARM_TFUNC))
7567 lower_insn = (lower_insn & ~0x1000) | 0x0800;
7572 relocation = value + signed_addend;
7574 relocation -= (input_section->output_section->vma
7575 + input_section->output_offset
7578 check = relocation >> howto->rightshift;
7580 /* If this is a signed value, the rightshift just dropped
7581 leading 1 bits (assuming twos complement). */
7582 if ((bfd_signed_vma) relocation >= 0)
7583 signed_check = check;
7585 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
7587 /* Calculate the permissable maximum and minimum values for
7588 this relocation according to whether we're relocating for
7590 bitsize = howto->bitsize;
7593 reloc_signed_max = (1 << (bitsize - 1)) - 1;
7594 reloc_signed_min = ~reloc_signed_max;
7596 /* Assumes two's complement. */
7597 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7600 if ((lower_insn & 0x5000) == 0x4000)
7601 /* For a BLX instruction, make sure that the relocation is rounded up
7602 to a word boundary. This follows the semantics of the instruction
7603 which specifies that bit 1 of the target address will come from bit
7604 1 of the base address. */
7605 relocation = (relocation + 2) & ~ 3;
7607 /* Put RELOCATION back into the insn. Assumes two's complement.
7608 We use the Thumb-2 encoding, which is safe even if dealing with
7609 a Thumb-1 instruction by virtue of our overflow check above. */
7610 reloc_sign = (signed_check < 0) ? 1 : 0;
7611 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
7612 | ((relocation >> 12) & 0x3ff)
7613 | (reloc_sign << 10);
7614 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
7615 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
7616 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
7617 | ((relocation >> 1) & 0x7ff);
7619 /* Put the relocated value back in the object file: */
7620 bfd_put_16 (input_bfd, upper_insn, hit_data);
7621 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7623 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7627 case R_ARM_THM_JUMP19:
7628 /* Thumb32 conditional branch instruction. */
7631 bfd_boolean overflow = FALSE;
7632 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
7633 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
7634 bfd_signed_vma reloc_signed_max = 0xffffe;
7635 bfd_signed_vma reloc_signed_min = -0x100000;
7636 bfd_signed_vma signed_check;
7638 /* Need to refetch the addend, reconstruct the top three bits,
7639 and squish the two 11 bit pieces together. */
7640 if (globals->use_rel)
7642 bfd_vma S = (upper_insn & 0x0400) >> 10;
7643 bfd_vma upper = (upper_insn & 0x003f);
7644 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
7645 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
7646 bfd_vma lower = (lower_insn & 0x07ff);
7651 upper -= 0x0100; /* Sign extend. */
7653 addend = (upper << 12) | (lower << 1);
7654 signed_addend = addend;
7657 /* Handle calls via the PLT. */
7658 if (h != NULL && splt != NULL && h->plt.offset != (bfd_vma) -1)
7660 value = (splt->output_section->vma
7661 + splt->output_offset
7663 /* Target the Thumb stub before the ARM PLT entry. */
7664 value -= PLT_THUMB_STUB_SIZE;
7665 *unresolved_reloc_p = FALSE;
7668 /* ??? Should handle interworking? GCC might someday try to
7669 use this for tail calls. */
7671 relocation = value + signed_addend;
7672 relocation -= (input_section->output_section->vma
7673 + input_section->output_offset
7675 signed_check = (bfd_signed_vma) relocation;
7677 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7680 /* Put RELOCATION back into the insn. */
7682 bfd_vma S = (relocation & 0x00100000) >> 20;
7683 bfd_vma J2 = (relocation & 0x00080000) >> 19;
7684 bfd_vma J1 = (relocation & 0x00040000) >> 18;
7685 bfd_vma hi = (relocation & 0x0003f000) >> 12;
7686 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
7688 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
7689 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
7692 /* Put the relocated value back in the object file: */
7693 bfd_put_16 (input_bfd, upper_insn, hit_data);
7694 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
7696 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
7699 case R_ARM_THM_JUMP11:
7700 case R_ARM_THM_JUMP8:
7701 case R_ARM_THM_JUMP6:
7702 /* Thumb B (branch) instruction). */
7704 bfd_signed_vma relocation;
7705 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
7706 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
7707 bfd_signed_vma signed_check;
7709 /* CZB cannot jump backward. */
7710 if (r_type == R_ARM_THM_JUMP6)
7711 reloc_signed_min = 0;
7713 if (globals->use_rel)
7715 /* Need to refetch addend. */
7716 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
7717 if (addend & ((howto->src_mask + 1) >> 1))
7720 signed_addend &= ~ howto->src_mask;
7721 signed_addend |= addend;
7724 signed_addend = addend;
7725 /* The value in the insn has been right shifted. We need to
7726 undo this, so that we can perform the address calculation
7727 in terms of bytes. */
7728 signed_addend <<= howto->rightshift;
7730 relocation = value + signed_addend;
7732 relocation -= (input_section->output_section->vma
7733 + input_section->output_offset
7736 relocation >>= howto->rightshift;
7737 signed_check = relocation;
7739 if (r_type == R_ARM_THM_JUMP6)
7740 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
7742 relocation &= howto->dst_mask;
7743 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
7745 bfd_put_16 (input_bfd, relocation, hit_data);
7747 /* Assumes two's complement. */
7748 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
7749 return bfd_reloc_overflow;
7751 return bfd_reloc_ok;
7754 case R_ARM_ALU_PCREL7_0:
7755 case R_ARM_ALU_PCREL15_8:
7756 case R_ARM_ALU_PCREL23_15:
7761 insn = bfd_get_32 (input_bfd, hit_data);
7762 if (globals->use_rel)
7764 /* Extract the addend. */
7765 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
7766 signed_addend = addend;
7768 relocation = value + signed_addend;
7770 relocation -= (input_section->output_section->vma
7771 + input_section->output_offset
7773 insn = (insn & ~0xfff)
7774 | ((howto->bitpos << 7) & 0xf00)
7775 | ((relocation >> howto->bitpos) & 0xff);
7776 bfd_put_32 (input_bfd, value, hit_data);
7778 return bfd_reloc_ok;
7780 case R_ARM_GNU_VTINHERIT:
7781 case R_ARM_GNU_VTENTRY:
7782 return bfd_reloc_ok;
7784 case R_ARM_GOTOFF32:
7785 /* Relocation is relative to the start of the
7786 global offset table. */
7788 BFD_ASSERT (sgot != NULL);
7790 return bfd_reloc_notsupported;
7792 /* If we are addressing a Thumb function, we need to adjust the
7793 address by one, so that attempts to call the function pointer will
7794 correctly interpret it as Thumb code. */
7795 if (sym_flags == STT_ARM_TFUNC)
7798 /* Note that sgot->output_offset is not involved in this
7799 calculation. We always want the start of .got. If we
7800 define _GLOBAL_OFFSET_TABLE in a different way, as is
7801 permitted by the ABI, we might have to change this
7803 value -= sgot->output_section->vma;
7804 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7805 contents, rel->r_offset, value,
7809 /* Use global offset table as symbol value. */
7810 BFD_ASSERT (sgot != NULL);
7813 return bfd_reloc_notsupported;
7815 *unresolved_reloc_p = FALSE;
7816 value = sgot->output_section->vma;
7817 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7818 contents, rel->r_offset, value,
7822 case R_ARM_GOT_PREL:
7823 /* Relocation is to the entry for this symbol in the
7824 global offset table. */
7826 return bfd_reloc_notsupported;
7833 off = h->got.offset;
7834 BFD_ASSERT (off != (bfd_vma) -1);
7835 dyn = globals->root.dynamic_sections_created;
7837 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
7839 && SYMBOL_REFERENCES_LOCAL (info, h))
7840 || (ELF_ST_VISIBILITY (h->other)
7841 && h->root.type == bfd_link_hash_undefweak))
7843 /* This is actually a static link, or it is a -Bsymbolic link
7844 and the symbol is defined locally. We must initialize this
7845 entry in the global offset table. Since the offset must
7846 always be a multiple of 4, we use the least significant bit
7847 to record whether we have initialized it already.
7849 When doing a dynamic link, we create a .rel(a).got relocation
7850 entry to initialize the value. This is done in the
7851 finish_dynamic_symbol routine. */
7856 /* If we are addressing a Thumb function, we need to
7857 adjust the address by one, so that attempts to
7858 call the function pointer will correctly
7859 interpret it as Thumb code. */
7860 if (sym_flags == STT_ARM_TFUNC)
7863 bfd_put_32 (output_bfd, value, sgot->contents + off);
7868 *unresolved_reloc_p = FALSE;
7870 value = sgot->output_offset + off;
7876 BFD_ASSERT (local_got_offsets != NULL &&
7877 local_got_offsets[r_symndx] != (bfd_vma) -1);
7879 off = local_got_offsets[r_symndx];
7881 /* The offset must always be a multiple of 4. We use the
7882 least significant bit to record whether we have already
7883 generated the necessary reloc. */
7888 /* If we are addressing a Thumb function, we need to
7889 adjust the address by one, so that attempts to
7890 call the function pointer will correctly
7891 interpret it as Thumb code. */
7892 if (sym_flags == STT_ARM_TFUNC)
7895 if (globals->use_rel)
7896 bfd_put_32 (output_bfd, value, sgot->contents + off);
7901 Elf_Internal_Rela outrel;
7904 srelgot = (bfd_get_section_by_name
7905 (dynobj, RELOC_SECTION (globals, ".got")));
7906 BFD_ASSERT (srelgot != NULL);
7908 outrel.r_addend = addend + value;
7909 outrel.r_offset = (sgot->output_section->vma
7910 + sgot->output_offset
7912 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
7913 loc = srelgot->contents;
7914 loc += srelgot->reloc_count++ * RELOC_SIZE (globals);
7915 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7918 local_got_offsets[r_symndx] |= 1;
7921 value = sgot->output_offset + off;
7923 if (r_type != R_ARM_GOT32)
7924 value += sgot->output_section->vma;
7926 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7927 contents, rel->r_offset, value,
7930 case R_ARM_TLS_LDO32:
7931 value = value - dtpoff_base (info);
7933 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7934 contents, rel->r_offset, value,
7937 case R_ARM_TLS_LDM32:
7941 if (globals->sgot == NULL)
7944 off = globals->tls_ldm_got.offset;
7950 /* If we don't know the module number, create a relocation
7954 Elf_Internal_Rela outrel;
7957 if (globals->srelgot == NULL)
7960 outrel.r_addend = 0;
7961 outrel.r_offset = (globals->sgot->output_section->vma
7962 + globals->sgot->output_offset + off);
7963 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
7965 if (globals->use_rel)
7966 bfd_put_32 (output_bfd, outrel.r_addend,
7967 globals->sgot->contents + off);
7969 loc = globals->srelgot->contents;
7970 loc += globals->srelgot->reloc_count++ * RELOC_SIZE (globals);
7971 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
7974 bfd_put_32 (output_bfd, 1, globals->sgot->contents + off);
7976 globals->tls_ldm_got.offset |= 1;
7979 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
7980 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
7982 return _bfd_final_link_relocate (howto, input_bfd, input_section,
7983 contents, rel->r_offset, value,
7987 case R_ARM_TLS_GD32:
7988 case R_ARM_TLS_IE32:
7994 if (globals->sgot == NULL)
8001 dyn = globals->root.dynamic_sections_created;
8002 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
8004 || !SYMBOL_REFERENCES_LOCAL (info, h)))
8006 *unresolved_reloc_p = FALSE;
8009 off = h->got.offset;
8010 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
8014 if (local_got_offsets == NULL)
8016 off = local_got_offsets[r_symndx];
8017 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
8020 if (tls_type == GOT_UNKNOWN)
8027 bfd_boolean need_relocs = FALSE;
8028 Elf_Internal_Rela outrel;
8029 bfd_byte *loc = NULL;
8032 /* The GOT entries have not been initialized yet. Do it
8033 now, and emit any relocations. If both an IE GOT and a
8034 GD GOT are necessary, we emit the GD first. */
8036 if ((info->shared || indx != 0)
8038 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
8039 || h->root.type != bfd_link_hash_undefweak))
8042 if (globals->srelgot == NULL)
8044 loc = globals->srelgot->contents;
8045 loc += globals->srelgot->reloc_count * RELOC_SIZE (globals);
8048 if (tls_type & GOT_TLS_GD)
8052 outrel.r_addend = 0;
8053 outrel.r_offset = (globals->sgot->output_section->vma
8054 + globals->sgot->output_offset
8056 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
8058 if (globals->use_rel)
8059 bfd_put_32 (output_bfd, outrel.r_addend,
8060 globals->sgot->contents + cur_off);
8062 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8063 globals->srelgot->reloc_count++;
8064 loc += RELOC_SIZE (globals);
8067 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8068 globals->sgot->contents + cur_off + 4);
8071 outrel.r_addend = 0;
8072 outrel.r_info = ELF32_R_INFO (indx,
8073 R_ARM_TLS_DTPOFF32);
8074 outrel.r_offset += 4;
8076 if (globals->use_rel)
8077 bfd_put_32 (output_bfd, outrel.r_addend,
8078 globals->sgot->contents + cur_off + 4);
8081 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8082 globals->srelgot->reloc_count++;
8083 loc += RELOC_SIZE (globals);
8088 /* If we are not emitting relocations for a
8089 general dynamic reference, then we must be in a
8090 static link or an executable link with the
8091 symbol binding locally. Mark it as belonging
8092 to module 1, the executable. */
8093 bfd_put_32 (output_bfd, 1,
8094 globals->sgot->contents + cur_off);
8095 bfd_put_32 (output_bfd, value - dtpoff_base (info),
8096 globals->sgot->contents + cur_off + 4);
8102 if (tls_type & GOT_TLS_IE)
8107 outrel.r_addend = value - dtpoff_base (info);
8109 outrel.r_addend = 0;
8110 outrel.r_offset = (globals->sgot->output_section->vma
8111 + globals->sgot->output_offset
8113 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
8115 if (globals->use_rel)
8116 bfd_put_32 (output_bfd, outrel.r_addend,
8117 globals->sgot->contents + cur_off);
8119 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
8120 globals->srelgot->reloc_count++;
8121 loc += RELOC_SIZE (globals);
8124 bfd_put_32 (output_bfd, tpoff (info, value),
8125 globals->sgot->contents + cur_off);
8132 local_got_offsets[r_symndx] |= 1;
8135 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
8137 value = globals->sgot->output_section->vma + globals->sgot->output_offset + off
8138 - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
8140 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8141 contents, rel->r_offset, value,
8145 case R_ARM_TLS_LE32:
8148 (*_bfd_error_handler)
8149 (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
8150 input_bfd, input_section,
8151 (long) rel->r_offset, howto->name);
8152 return (bfd_reloc_status_type) FALSE;
8155 value = tpoff (info, value);
8157 return _bfd_final_link_relocate (howto, input_bfd, input_section,
8158 contents, rel->r_offset, value,
8162 if (globals->fix_v4bx)
8164 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8166 /* Ensure that we have a BX instruction. */
8167 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
8169 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
8171 /* Branch to veneer. */
8173 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
8174 glue_addr -= input_section->output_section->vma
8175 + input_section->output_offset
8176 + rel->r_offset + 8;
8177 insn = (insn & 0xf0000000) | 0x0a000000
8178 | ((glue_addr >> 2) & 0x00ffffff);
8182 /* Preserve Rm (lowest four bits) and the condition code
8183 (highest four bits). Other bits encode MOV PC,Rm. */
8184 insn = (insn & 0xf000000f) | 0x01a0f000;
8187 bfd_put_32 (input_bfd, insn, hit_data);
8189 return bfd_reloc_ok;
8191 case R_ARM_MOVW_ABS_NC:
8192 case R_ARM_MOVT_ABS:
8193 case R_ARM_MOVW_PREL_NC:
8194 case R_ARM_MOVT_PREL:
8195 /* Until we properly support segment-base-relative addressing then
8196 we assume the segment base to be zero, as for the group relocations.
8197 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
8198 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
8199 case R_ARM_MOVW_BREL_NC:
8200 case R_ARM_MOVW_BREL:
8201 case R_ARM_MOVT_BREL:
8203 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8205 if (globals->use_rel)
8207 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
8208 signed_addend = (addend ^ 0x8000) - 0x8000;
8211 value += signed_addend;
8213 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
8214 value -= (input_section->output_section->vma
8215 + input_section->output_offset + rel->r_offset);
8217 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
8218 return bfd_reloc_overflow;
8220 if (sym_flags == STT_ARM_TFUNC)
8223 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
8224 || r_type == R_ARM_MOVT_BREL)
8228 insn |= value & 0xfff;
8229 insn |= (value & 0xf000) << 4;
8230 bfd_put_32 (input_bfd, insn, hit_data);
8232 return bfd_reloc_ok;
8234 case R_ARM_THM_MOVW_ABS_NC:
8235 case R_ARM_THM_MOVT_ABS:
8236 case R_ARM_THM_MOVW_PREL_NC:
8237 case R_ARM_THM_MOVT_PREL:
8238 /* Until we properly support segment-base-relative addressing then
8239 we assume the segment base to be zero, as for the above relocations.
8240 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
8241 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
8242 as R_ARM_THM_MOVT_ABS. */
8243 case R_ARM_THM_MOVW_BREL_NC:
8244 case R_ARM_THM_MOVW_BREL:
8245 case R_ARM_THM_MOVT_BREL:
8249 insn = bfd_get_16 (input_bfd, hit_data) << 16;
8250 insn |= bfd_get_16 (input_bfd, hit_data + 2);
8252 if (globals->use_rel)
8254 addend = ((insn >> 4) & 0xf000)
8255 | ((insn >> 15) & 0x0800)
8256 | ((insn >> 4) & 0x0700)
8258 signed_addend = (addend ^ 0x8000) - 0x8000;
8261 value += signed_addend;
8263 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
8264 value -= (input_section->output_section->vma
8265 + input_section->output_offset + rel->r_offset);
8267 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
8268 return bfd_reloc_overflow;
8270 if (sym_flags == STT_ARM_TFUNC)
8273 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
8274 || r_type == R_ARM_THM_MOVT_BREL)
8278 insn |= (value & 0xf000) << 4;
8279 insn |= (value & 0x0800) << 15;
8280 insn |= (value & 0x0700) << 4;
8281 insn |= (value & 0x00ff);
8283 bfd_put_16 (input_bfd, insn >> 16, hit_data);
8284 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
8286 return bfd_reloc_ok;
8288 case R_ARM_ALU_PC_G0_NC:
8289 case R_ARM_ALU_PC_G1_NC:
8290 case R_ARM_ALU_PC_G0:
8291 case R_ARM_ALU_PC_G1:
8292 case R_ARM_ALU_PC_G2:
8293 case R_ARM_ALU_SB_G0_NC:
8294 case R_ARM_ALU_SB_G1_NC:
8295 case R_ARM_ALU_SB_G0:
8296 case R_ARM_ALU_SB_G1:
8297 case R_ARM_ALU_SB_G2:
8299 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8300 bfd_vma pc = input_section->output_section->vma
8301 + input_section->output_offset + rel->r_offset;
8302 /* sb should be the origin of the *segment* containing the symbol.
8303 It is not clear how to obtain this OS-dependent value, so we
8304 make an arbitrary choice of zero. */
8308 bfd_signed_vma signed_value;
8311 /* Determine which group of bits to select. */
8314 case R_ARM_ALU_PC_G0_NC:
8315 case R_ARM_ALU_PC_G0:
8316 case R_ARM_ALU_SB_G0_NC:
8317 case R_ARM_ALU_SB_G0:
8321 case R_ARM_ALU_PC_G1_NC:
8322 case R_ARM_ALU_PC_G1:
8323 case R_ARM_ALU_SB_G1_NC:
8324 case R_ARM_ALU_SB_G1:
8328 case R_ARM_ALU_PC_G2:
8329 case R_ARM_ALU_SB_G2:
8337 /* If REL, extract the addend from the insn. If RELA, it will
8338 have already been fetched for us. */
8339 if (globals->use_rel)
8342 bfd_vma constant = insn & 0xff;
8343 bfd_vma rotation = (insn & 0xf00) >> 8;
8346 signed_addend = constant;
8349 /* Compensate for the fact that in the instruction, the
8350 rotation is stored in multiples of 2 bits. */
8353 /* Rotate "constant" right by "rotation" bits. */
8354 signed_addend = (constant >> rotation) |
8355 (constant << (8 * sizeof (bfd_vma) - rotation));
8358 /* Determine if the instruction is an ADD or a SUB.
8359 (For REL, this determines the sign of the addend.) */
8360 negative = identify_add_or_sub (insn);
8363 (*_bfd_error_handler)
8364 (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
8365 input_bfd, input_section,
8366 (long) rel->r_offset, howto->name);
8367 return bfd_reloc_overflow;
8370 signed_addend *= negative;
8373 /* Compute the value (X) to go in the place. */
8374 if (r_type == R_ARM_ALU_PC_G0_NC
8375 || r_type == R_ARM_ALU_PC_G1_NC
8376 || r_type == R_ARM_ALU_PC_G0
8377 || r_type == R_ARM_ALU_PC_G1
8378 || r_type == R_ARM_ALU_PC_G2)
8380 signed_value = value - pc + signed_addend;
8382 /* Section base relative. */
8383 signed_value = value - sb + signed_addend;
8385 /* If the target symbol is a Thumb function, then set the
8386 Thumb bit in the address. */
8387 if (sym_flags == STT_ARM_TFUNC)
8390 /* Calculate the value of the relevant G_n, in encoded
8391 constant-with-rotation format. */
8392 g_n = calculate_group_reloc_mask (abs (signed_value), group,
8395 /* Check for overflow if required. */
8396 if ((r_type == R_ARM_ALU_PC_G0
8397 || r_type == R_ARM_ALU_PC_G1
8398 || r_type == R_ARM_ALU_PC_G2
8399 || r_type == R_ARM_ALU_SB_G0
8400 || r_type == R_ARM_ALU_SB_G1
8401 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
8403 (*_bfd_error_handler)
8404 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8405 input_bfd, input_section,
8406 (long) rel->r_offset, abs (signed_value), howto->name);
8407 return bfd_reloc_overflow;
8410 /* Mask out the value and the ADD/SUB part of the opcode; take care
8411 not to destroy the S bit. */
8414 /* Set the opcode according to whether the value to go in the
8415 place is negative. */
8416 if (signed_value < 0)
8421 /* Encode the offset. */
8424 bfd_put_32 (input_bfd, insn, hit_data);
8426 return bfd_reloc_ok;
8428 case R_ARM_LDR_PC_G0:
8429 case R_ARM_LDR_PC_G1:
8430 case R_ARM_LDR_PC_G2:
8431 case R_ARM_LDR_SB_G0:
8432 case R_ARM_LDR_SB_G1:
8433 case R_ARM_LDR_SB_G2:
8435 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8436 bfd_vma pc = input_section->output_section->vma
8437 + input_section->output_offset + rel->r_offset;
8438 bfd_vma sb = 0; /* See note above. */
8440 bfd_signed_vma signed_value;
8443 /* Determine which groups of bits to calculate. */
8446 case R_ARM_LDR_PC_G0:
8447 case R_ARM_LDR_SB_G0:
8451 case R_ARM_LDR_PC_G1:
8452 case R_ARM_LDR_SB_G1:
8456 case R_ARM_LDR_PC_G2:
8457 case R_ARM_LDR_SB_G2:
8465 /* If REL, extract the addend from the insn. If RELA, it will
8466 have already been fetched for us. */
8467 if (globals->use_rel)
8469 int negative = (insn & (1 << 23)) ? 1 : -1;
8470 signed_addend = negative * (insn & 0xfff);
8473 /* Compute the value (X) to go in the place. */
8474 if (r_type == R_ARM_LDR_PC_G0
8475 || r_type == R_ARM_LDR_PC_G1
8476 || r_type == R_ARM_LDR_PC_G2)
8478 signed_value = value - pc + signed_addend;
8480 /* Section base relative. */
8481 signed_value = value - sb + signed_addend;
8483 /* Calculate the value of the relevant G_{n-1} to obtain
8484 the residual at that stage. */
8485 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8487 /* Check for overflow. */
8488 if (residual >= 0x1000)
8490 (*_bfd_error_handler)
8491 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8492 input_bfd, input_section,
8493 (long) rel->r_offset, abs (signed_value), howto->name);
8494 return bfd_reloc_overflow;
8497 /* Mask out the value and U bit. */
8500 /* Set the U bit if the value to go in the place is non-negative. */
8501 if (signed_value >= 0)
8504 /* Encode the offset. */
8507 bfd_put_32 (input_bfd, insn, hit_data);
8509 return bfd_reloc_ok;
8511 case R_ARM_LDRS_PC_G0:
8512 case R_ARM_LDRS_PC_G1:
8513 case R_ARM_LDRS_PC_G2:
8514 case R_ARM_LDRS_SB_G0:
8515 case R_ARM_LDRS_SB_G1:
8516 case R_ARM_LDRS_SB_G2:
8518 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8519 bfd_vma pc = input_section->output_section->vma
8520 + input_section->output_offset + rel->r_offset;
8521 bfd_vma sb = 0; /* See note above. */
8523 bfd_signed_vma signed_value;
8526 /* Determine which groups of bits to calculate. */
8529 case R_ARM_LDRS_PC_G0:
8530 case R_ARM_LDRS_SB_G0:
8534 case R_ARM_LDRS_PC_G1:
8535 case R_ARM_LDRS_SB_G1:
8539 case R_ARM_LDRS_PC_G2:
8540 case R_ARM_LDRS_SB_G2:
8548 /* If REL, extract the addend from the insn. If RELA, it will
8549 have already been fetched for us. */
8550 if (globals->use_rel)
8552 int negative = (insn & (1 << 23)) ? 1 : -1;
8553 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
8556 /* Compute the value (X) to go in the place. */
8557 if (r_type == R_ARM_LDRS_PC_G0
8558 || r_type == R_ARM_LDRS_PC_G1
8559 || r_type == R_ARM_LDRS_PC_G2)
8561 signed_value = value - pc + signed_addend;
8563 /* Section base relative. */
8564 signed_value = value - sb + signed_addend;
8566 /* Calculate the value of the relevant G_{n-1} to obtain
8567 the residual at that stage. */
8568 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8570 /* Check for overflow. */
8571 if (residual >= 0x100)
8573 (*_bfd_error_handler)
8574 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8575 input_bfd, input_section,
8576 (long) rel->r_offset, abs (signed_value), howto->name);
8577 return bfd_reloc_overflow;
8580 /* Mask out the value and U bit. */
8583 /* Set the U bit if the value to go in the place is non-negative. */
8584 if (signed_value >= 0)
8587 /* Encode the offset. */
8588 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
8590 bfd_put_32 (input_bfd, insn, hit_data);
8592 return bfd_reloc_ok;
8594 case R_ARM_LDC_PC_G0:
8595 case R_ARM_LDC_PC_G1:
8596 case R_ARM_LDC_PC_G2:
8597 case R_ARM_LDC_SB_G0:
8598 case R_ARM_LDC_SB_G1:
8599 case R_ARM_LDC_SB_G2:
8601 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
8602 bfd_vma pc = input_section->output_section->vma
8603 + input_section->output_offset + rel->r_offset;
8604 bfd_vma sb = 0; /* See note above. */
8606 bfd_signed_vma signed_value;
8609 /* Determine which groups of bits to calculate. */
8612 case R_ARM_LDC_PC_G0:
8613 case R_ARM_LDC_SB_G0:
8617 case R_ARM_LDC_PC_G1:
8618 case R_ARM_LDC_SB_G1:
8622 case R_ARM_LDC_PC_G2:
8623 case R_ARM_LDC_SB_G2:
8631 /* If REL, extract the addend from the insn. If RELA, it will
8632 have already been fetched for us. */
8633 if (globals->use_rel)
8635 int negative = (insn & (1 << 23)) ? 1 : -1;
8636 signed_addend = negative * ((insn & 0xff) << 2);
8639 /* Compute the value (X) to go in the place. */
8640 if (r_type == R_ARM_LDC_PC_G0
8641 || r_type == R_ARM_LDC_PC_G1
8642 || r_type == R_ARM_LDC_PC_G2)
8644 signed_value = value - pc + signed_addend;
8646 /* Section base relative. */
8647 signed_value = value - sb + signed_addend;
8649 /* Calculate the value of the relevant G_{n-1} to obtain
8650 the residual at that stage. */
8651 calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
8653 /* Check for overflow. (The absolute value to go in the place must be
8654 divisible by four and, after having been divided by four, must
8655 fit in eight bits.) */
8656 if ((residual & 0x3) != 0 || residual >= 0x400)
8658 (*_bfd_error_handler)
8659 (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
8660 input_bfd, input_section,
8661 (long) rel->r_offset, abs (signed_value), howto->name);
8662 return bfd_reloc_overflow;
8665 /* Mask out the value and U bit. */
8668 /* Set the U bit if the value to go in the place is non-negative. */
8669 if (signed_value >= 0)
8672 /* Encode the offset. */
8673 insn |= residual >> 2;
8675 bfd_put_32 (input_bfd, insn, hit_data);
8677 return bfd_reloc_ok;
8680 return bfd_reloc_notsupported;
8684 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
8686 arm_add_to_rel (bfd * abfd,
8688 reloc_howto_type * howto,
8689 bfd_signed_vma increment)
8691 bfd_signed_vma addend;
8693 if (howto->type == R_ARM_THM_CALL
8694 || howto->type == R_ARM_THM_JUMP24)
8696 int upper_insn, lower_insn;
8699 upper_insn = bfd_get_16 (abfd, address);
8700 lower_insn = bfd_get_16 (abfd, address + 2);
8701 upper = upper_insn & 0x7ff;
8702 lower = lower_insn & 0x7ff;
8704 addend = (upper << 12) | (lower << 1);
8705 addend += increment;
8708 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
8709 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
8711 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
8712 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
8718 contents = bfd_get_32 (abfd, address);
8720 /* Get the (signed) value from the instruction. */
8721 addend = contents & howto->src_mask;
8722 if (addend & ((howto->src_mask + 1) >> 1))
8724 bfd_signed_vma mask;
8727 mask &= ~ howto->src_mask;
8731 /* Add in the increment, (which is a byte value). */
8732 switch (howto->type)
8735 addend += increment;
8742 addend <<= howto->size;
8743 addend += increment;
8745 /* Should we check for overflow here ? */
8747 /* Drop any undesired bits. */
8748 addend >>= howto->rightshift;
8752 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
8754 bfd_put_32 (abfd, contents, address);
8758 #define IS_ARM_TLS_RELOC(R_TYPE) \
8759 ((R_TYPE) == R_ARM_TLS_GD32 \
8760 || (R_TYPE) == R_ARM_TLS_LDO32 \
8761 || (R_TYPE) == R_ARM_TLS_LDM32 \
8762 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
8763 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
8764 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
8765 || (R_TYPE) == R_ARM_TLS_LE32 \
8766 || (R_TYPE) == R_ARM_TLS_IE32)
8768 /* Relocate an ARM ELF section. */
8771 elf32_arm_relocate_section (bfd * output_bfd,
8772 struct bfd_link_info * info,
8774 asection * input_section,
8775 bfd_byte * contents,
8776 Elf_Internal_Rela * relocs,
8777 Elf_Internal_Sym * local_syms,
8778 asection ** local_sections)
8780 Elf_Internal_Shdr *symtab_hdr;
8781 struct elf_link_hash_entry **sym_hashes;
8782 Elf_Internal_Rela *rel;
8783 Elf_Internal_Rela *relend;
8785 struct elf32_arm_link_hash_table * globals;
8787 globals = elf32_arm_hash_table (info);
8788 if (globals == NULL)
8791 symtab_hdr = & elf_symtab_hdr (input_bfd);
8792 sym_hashes = elf_sym_hashes (input_bfd);
8795 relend = relocs + input_section->reloc_count;
8796 for (; rel < relend; rel++)
8799 reloc_howto_type * howto;
8800 unsigned long r_symndx;
8801 Elf_Internal_Sym * sym;
8803 struct elf_link_hash_entry * h;
8805 bfd_reloc_status_type r;
8808 bfd_boolean unresolved_reloc = FALSE;
8809 char *error_message = NULL;
8811 r_symndx = ELF32_R_SYM (rel->r_info);
8812 r_type = ELF32_R_TYPE (rel->r_info);
8813 r_type = arm_real_reloc_type (globals, r_type);
8815 if ( r_type == R_ARM_GNU_VTENTRY
8816 || r_type == R_ARM_GNU_VTINHERIT)
8819 bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
8820 howto = bfd_reloc.howto;
8826 if (r_symndx < symtab_hdr->sh_info)
8828 sym = local_syms + r_symndx;
8829 sym_type = ELF32_ST_TYPE (sym->st_info);
8830 sec = local_sections[r_symndx];
8832 /* An object file might have a reference to a local
8833 undefined symbol. This is a daft object file, but we
8834 should at least do something about it. V4BX & NONE
8835 relocations do not use the symbol and are explicitly
8836 allowed to use the undefined symbol, so allow those. */
8837 if (r_type != R_ARM_V4BX
8838 && r_type != R_ARM_NONE
8839 && bfd_is_und_section (sec)
8840 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
8842 if (!info->callbacks->undefined_symbol
8843 (info, bfd_elf_string_from_elf_section
8844 (input_bfd, symtab_hdr->sh_link, sym->st_name),
8845 input_bfd, input_section,
8846 rel->r_offset, TRUE))
8850 if (globals->use_rel)
8852 relocation = (sec->output_section->vma
8853 + sec->output_offset
8855 if (!info->relocatable
8856 && (sec->flags & SEC_MERGE)
8857 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8860 bfd_vma addend, value;
8864 case R_ARM_MOVW_ABS_NC:
8865 case R_ARM_MOVT_ABS:
8866 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8867 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
8868 addend = (addend ^ 0x8000) - 0x8000;
8871 case R_ARM_THM_MOVW_ABS_NC:
8872 case R_ARM_THM_MOVT_ABS:
8873 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
8875 value |= bfd_get_16 (input_bfd,
8876 contents + rel->r_offset + 2);
8877 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
8878 | ((value & 0x04000000) >> 15);
8879 addend = (addend ^ 0x8000) - 0x8000;
8883 if (howto->rightshift
8884 || (howto->src_mask & (howto->src_mask + 1)))
8886 (*_bfd_error_handler)
8887 (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
8888 input_bfd, input_section,
8889 (long) rel->r_offset, howto->name);
8893 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
8895 /* Get the (signed) value from the instruction. */
8896 addend = value & howto->src_mask;
8897 if (addend & ((howto->src_mask + 1) >> 1))
8899 bfd_signed_vma mask;
8902 mask &= ~ howto->src_mask;
8910 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
8912 addend += msec->output_section->vma + msec->output_offset;
8914 /* Cases here must match those in the preceeding
8915 switch statement. */
8918 case R_ARM_MOVW_ABS_NC:
8919 case R_ARM_MOVT_ABS:
8920 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
8922 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8925 case R_ARM_THM_MOVW_ABS_NC:
8926 case R_ARM_THM_MOVT_ABS:
8927 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
8928 | (addend & 0xff) | ((addend & 0x0800) << 15);
8929 bfd_put_16 (input_bfd, value >> 16,
8930 contents + rel->r_offset);
8931 bfd_put_16 (input_bfd, value,
8932 contents + rel->r_offset + 2);
8936 value = (value & ~ howto->dst_mask)
8937 | (addend & howto->dst_mask);
8938 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
8944 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
8950 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
8951 r_symndx, symtab_hdr, sym_hashes,
8953 unresolved_reloc, warned);
8958 if (sec != NULL && elf_discarded_section (sec))
8960 /* For relocs against symbols from removed linkonce sections,
8961 or sections discarded by a linker script, we just want the
8962 section contents zeroed. Avoid any special processing. */
8963 _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
8969 if (info->relocatable)
8971 /* This is a relocatable link. We don't have to change
8972 anything, unless the reloc is against a section symbol,
8973 in which case we have to adjust according to where the
8974 section symbol winds up in the output section. */
8975 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
8977 if (globals->use_rel)
8978 arm_add_to_rel (input_bfd, contents + rel->r_offset,
8979 howto, (bfd_signed_vma) sec->output_offset);
8981 rel->r_addend += sec->output_offset;
8987 name = h->root.root.string;
8990 name = (bfd_elf_string_from_elf_section
8991 (input_bfd, symtab_hdr->sh_link, sym->st_name));
8992 if (name == NULL || *name == '\0')
8993 name = bfd_section_name (input_bfd, sec);
8997 && r_type != R_ARM_NONE
8999 || h->root.type == bfd_link_hash_defined
9000 || h->root.type == bfd_link_hash_defweak)
9001 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
9003 (*_bfd_error_handler)
9004 ((sym_type == STT_TLS
9005 ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
9006 : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
9009 (long) rel->r_offset,
9014 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
9015 input_section, contents, rel,
9016 relocation, info, sec, name,
9017 (h ? ELF_ST_TYPE (h->type) :
9018 ELF_ST_TYPE (sym->st_info)), h,
9019 &unresolved_reloc, &error_message);
9021 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
9022 because such sections are not SEC_ALLOC and thus ld.so will
9023 not process them. */
9024 if (unresolved_reloc
9025 && !((input_section->flags & SEC_DEBUGGING) != 0
9028 (*_bfd_error_handler)
9029 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
9032 (long) rel->r_offset,
9034 h->root.root.string);
9038 if (r != bfd_reloc_ok)
9042 case bfd_reloc_overflow:
9043 /* If the overflowing reloc was to an undefined symbol,
9044 we have already printed one error message and there
9045 is no point complaining again. */
9047 h->root.type != bfd_link_hash_undefined)
9048 && (!((*info->callbacks->reloc_overflow)
9049 (info, (h ? &h->root : NULL), name, howto->name,
9050 (bfd_vma) 0, input_bfd, input_section,
9055 case bfd_reloc_undefined:
9056 if (!((*info->callbacks->undefined_symbol)
9057 (info, name, input_bfd, input_section,
9058 rel->r_offset, TRUE)))
9062 case bfd_reloc_outofrange:
9063 error_message = _("out of range");
9066 case bfd_reloc_notsupported:
9067 error_message = _("unsupported relocation");
9070 case bfd_reloc_dangerous:
9071 /* error_message should already be set. */
9075 error_message = _("unknown error");
9079 BFD_ASSERT (error_message != NULL);
9080 if (!((*info->callbacks->reloc_dangerous)
9081 (info, error_message, input_bfd, input_section,
9092 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
9093 adds the edit to the start of the list. (The list must be built in order of
9094 ascending TINDEX: the function's callers are primarily responsible for
9095 maintaining that condition). */
9098 add_unwind_table_edit (arm_unwind_table_edit **head,
9099 arm_unwind_table_edit **tail,
9100 arm_unwind_edit_type type,
9101 asection *linked_section,
9102 unsigned int tindex)
9104 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
9105 xmalloc (sizeof (arm_unwind_table_edit));
9107 new_edit->type = type;
9108 new_edit->linked_section = linked_section;
9109 new_edit->index = tindex;
9113 new_edit->next = NULL;
9116 (*tail)->next = new_edit;
9125 new_edit->next = *head;
9134 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
9136 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
9138 adjust_exidx_size(asection *exidx_sec, int adjust)
9142 if (!exidx_sec->rawsize)
9143 exidx_sec->rawsize = exidx_sec->size;
9145 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
9146 out_sec = exidx_sec->output_section;
9147 /* Adjust size of output section. */
9148 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
9151 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
9153 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
9155 struct _arm_elf_section_data *exidx_arm_data;
9157 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9158 add_unwind_table_edit (
9159 &exidx_arm_data->u.exidx.unwind_edit_list,
9160 &exidx_arm_data->u.exidx.unwind_edit_tail,
9161 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
9163 adjust_exidx_size(exidx_sec, 8);
9166 /* Scan .ARM.exidx tables, and create a list describing edits which should be
9167 made to those tables, such that:
9169 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
9170 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
9171 codes which have been inlined into the index).
9173 The edits are applied when the tables are written
9174 (in elf32_arm_write_section).
9178 elf32_arm_fix_exidx_coverage (asection **text_section_order,
9179 unsigned int num_text_sections,
9180 struct bfd_link_info *info)
9183 unsigned int last_second_word = 0, i;
9184 asection *last_exidx_sec = NULL;
9185 asection *last_text_sec = NULL;
9186 int last_unwind_type = -1;
9188 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
9190 for (inp = info->input_bfds; inp != NULL; inp = inp->link_next)
9194 for (sec = inp->sections; sec != NULL; sec = sec->next)
9196 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
9197 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
9199 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
9202 if (elf_sec->linked_to)
9204 Elf_Internal_Shdr *linked_hdr
9205 = &elf_section_data (elf_sec->linked_to)->this_hdr;
9206 struct _arm_elf_section_data *linked_sec_arm_data
9207 = get_arm_elf_section_data (linked_hdr->bfd_section);
9209 if (linked_sec_arm_data == NULL)
9212 /* Link this .ARM.exidx section back from the text section it
9214 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
9219 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
9220 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
9221 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
9223 for (i = 0; i < num_text_sections; i++)
9225 asection *sec = text_section_order[i];
9226 asection *exidx_sec;
9227 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
9228 struct _arm_elf_section_data *exidx_arm_data;
9229 bfd_byte *contents = NULL;
9230 int deleted_exidx_bytes = 0;
9232 arm_unwind_table_edit *unwind_edit_head = NULL;
9233 arm_unwind_table_edit *unwind_edit_tail = NULL;
9234 Elf_Internal_Shdr *hdr;
9237 if (arm_data == NULL)
9240 exidx_sec = arm_data->u.text.arm_exidx_sec;
9241 if (exidx_sec == NULL)
9243 /* Section has no unwind data. */
9244 if (last_unwind_type == 0 || !last_exidx_sec)
9247 /* Ignore zero sized sections. */
9251 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9252 last_unwind_type = 0;
9256 /* Skip /DISCARD/ sections. */
9257 if (bfd_is_abs_section (exidx_sec->output_section))
9260 hdr = &elf_section_data (exidx_sec)->this_hdr;
9261 if (hdr->sh_type != SHT_ARM_EXIDX)
9264 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
9265 if (exidx_arm_data == NULL)
9268 ibfd = exidx_sec->owner;
9270 if (hdr->contents != NULL)
9271 contents = hdr->contents;
9272 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
9276 for (j = 0; j < hdr->sh_size; j += 8)
9278 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
9282 /* An EXIDX_CANTUNWIND entry. */
9283 if (second_word == 1)
9285 if (last_unwind_type == 0)
9289 /* Inlined unwinding data. Merge if equal to previous. */
9290 else if ((second_word & 0x80000000) != 0)
9292 if (last_second_word == second_word && last_unwind_type == 1)
9295 last_second_word = second_word;
9297 /* Normal table entry. In theory we could merge these too,
9298 but duplicate entries are likely to be much less common. */
9304 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
9305 DELETE_EXIDX_ENTRY, NULL, j / 8);
9307 deleted_exidx_bytes += 8;
9310 last_unwind_type = unwind_type;
9313 /* Free contents if we allocated it ourselves. */
9314 if (contents != hdr->contents)
9317 /* Record edits to be applied later (in elf32_arm_write_section). */
9318 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
9319 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
9321 if (deleted_exidx_bytes > 0)
9322 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
9324 last_exidx_sec = exidx_sec;
9325 last_text_sec = sec;
9328 /* Add terminating CANTUNWIND entry. */
9329 if (last_exidx_sec && last_unwind_type != 0)
9330 insert_cantunwind_after(last_text_sec, last_exidx_sec);
9336 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
9337 bfd *ibfd, const char *name)
9339 asection *sec, *osec;
9341 sec = bfd_get_section_by_name (ibfd, name);
9342 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
9345 osec = sec->output_section;
9346 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
9349 if (! bfd_set_section_contents (obfd, osec, sec->contents,
9350 sec->output_offset, sec->size))
9357 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
9359 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
9361 if (globals == NULL)
9364 /* Invoke the regular ELF backend linker to do all the work. */
9365 if (!bfd_elf_final_link (abfd, info))
9368 /* Write out any glue sections now that we have created all the
9370 if (globals->bfd_of_glue_owner != NULL)
9372 if (! elf32_arm_output_glue_section (info, abfd,
9373 globals->bfd_of_glue_owner,
9374 ARM2THUMB_GLUE_SECTION_NAME))
9377 if (! elf32_arm_output_glue_section (info, abfd,
9378 globals->bfd_of_glue_owner,
9379 THUMB2ARM_GLUE_SECTION_NAME))
9382 if (! elf32_arm_output_glue_section (info, abfd,
9383 globals->bfd_of_glue_owner,
9384 VFP11_ERRATUM_VENEER_SECTION_NAME))
9387 if (! elf32_arm_output_glue_section (info, abfd,
9388 globals->bfd_of_glue_owner,
9389 ARM_BX_GLUE_SECTION_NAME))
9396 /* Set the right machine number. */
9399 elf32_arm_object_p (bfd *abfd)
9403 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
9405 if (mach != bfd_mach_arm_unknown)
9406 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9408 else if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
9409 bfd_default_set_arch_mach (abfd, bfd_arch_arm, bfd_mach_arm_ep9312);
9412 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
9417 /* Function to keep ARM specific flags in the ELF header. */
9420 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
9422 if (elf_flags_init (abfd)
9423 && elf_elfheader (abfd)->e_flags != flags)
9425 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
9427 if (flags & EF_ARM_INTERWORK)
9428 (*_bfd_error_handler)
9429 (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
9433 (_("Warning: Clearing the interworking flag of %B due to outside request"),
9439 elf_elfheader (abfd)->e_flags = flags;
9440 elf_flags_init (abfd) = TRUE;
9446 /* Copy backend specific data from one object module to another. */
9449 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
9454 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
9457 in_flags = elf_elfheader (ibfd)->e_flags;
9458 out_flags = elf_elfheader (obfd)->e_flags;
9460 if (elf_flags_init (obfd)
9461 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
9462 && in_flags != out_flags)
9464 /* Cannot mix APCS26 and APCS32 code. */
9465 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
9468 /* Cannot mix float APCS and non-float APCS code. */
9469 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
9472 /* If the src and dest have different interworking flags
9473 then turn off the interworking bit. */
9474 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
9476 if (out_flags & EF_ARM_INTERWORK)
9478 (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
9481 in_flags &= ~EF_ARM_INTERWORK;
9484 /* Likewise for PIC, though don't warn for this case. */
9485 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
9486 in_flags &= ~EF_ARM_PIC;
9489 elf_elfheader (obfd)->e_flags = in_flags;
9490 elf_flags_init (obfd) = TRUE;
9492 /* Also copy the EI_OSABI field. */
9493 elf_elfheader (obfd)->e_ident[EI_OSABI] =
9494 elf_elfheader (ibfd)->e_ident[EI_OSABI];
9496 /* Copy object attributes. */
9497 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9502 /* Values for Tag_ABI_PCS_R9_use. */
9511 /* Values for Tag_ABI_PCS_RW_data. */
9514 AEABI_PCS_RW_data_absolute,
9515 AEABI_PCS_RW_data_PCrel,
9516 AEABI_PCS_RW_data_SBrel,
9517 AEABI_PCS_RW_data_unused
9520 /* Values for Tag_ABI_enum_size. */
9526 AEABI_enum_forced_wide
9529 /* Determine whether an object attribute tag takes an integer, a
9533 elf32_arm_obj_attrs_arg_type (int tag)
9535 if (tag == Tag_compatibility)
9536 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
9537 else if (tag == Tag_nodefaults)
9538 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
9539 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
9540 return ATTR_TYPE_FLAG_STR_VAL;
9542 return ATTR_TYPE_FLAG_INT_VAL;
9544 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
9547 /* The ABI defines that Tag_conformance should be emitted first, and that
9548 Tag_nodefaults should be second (if either is defined). This sets those
9549 two positions, and bumps up the position of all the remaining tags to
9552 elf32_arm_obj_attrs_order (int num)
9555 return Tag_conformance;
9557 return Tag_nodefaults;
9558 if ((num - 2) < Tag_nodefaults)
9560 if ((num - 1) < Tag_conformance)
9565 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
9566 Returns -1 if no architecture could be read. */
9569 get_secondary_compatible_arch (bfd *abfd)
9571 obj_attribute *attr =
9572 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9574 /* Note: the tag and its argument below are uleb128 values, though
9575 currently-defined values fit in one byte for each. */
9577 && attr->s[0] == Tag_CPU_arch
9578 && (attr->s[1] & 128) != 128
9582 /* This tag is "safely ignorable", so don't complain if it looks funny. */
9586 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
9587 The tag is removed if ARCH is -1. */
9590 set_secondary_compatible_arch (bfd *abfd, int arch)
9592 obj_attribute *attr =
9593 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
9601 /* Note: the tag and its argument below are uleb128 values, though
9602 currently-defined values fit in one byte for each. */
9604 attr->s = (char *) bfd_alloc (abfd, 3);
9605 attr->s[0] = Tag_CPU_arch;
9610 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
9614 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
9615 int newtag, int secondary_compat)
9617 #define T(X) TAG_CPU_ARCH_##X
9618 int tagl, tagh, result;
9621 T(V6T2), /* PRE_V4. */
9625 T(V6T2), /* V5TE. */
9626 T(V6T2), /* V5TEJ. */
9633 T(V6K), /* PRE_V4. */
9638 T(V6K), /* V5TEJ. */
9640 T(V6KZ), /* V6KZ. */
9646 T(V7), /* PRE_V4. */
9665 T(V6K), /* V5TEJ. */
9667 T(V6KZ), /* V6KZ. */
9680 T(V6K), /* V5TEJ. */
9682 T(V6KZ), /* V6KZ. */
9686 T(V6S_M), /* V6_M. */
9687 T(V6S_M) /* V6S_M. */
9693 T(V7E_M), /* V4T. */
9694 T(V7E_M), /* V5T. */
9695 T(V7E_M), /* V5TE. */
9696 T(V7E_M), /* V5TEJ. */
9698 T(V7E_M), /* V6KZ. */
9699 T(V7E_M), /* V6T2. */
9700 T(V7E_M), /* V6K. */
9702 T(V7E_M), /* V6_M. */
9703 T(V7E_M), /* V6S_M. */
9704 T(V7E_M) /* V7E_M. */
9706 const int v4t_plus_v6_m[] =
9712 T(V5TE), /* V5TE. */
9713 T(V5TEJ), /* V5TEJ. */
9715 T(V6KZ), /* V6KZ. */
9716 T(V6T2), /* V6T2. */
9719 T(V6_M), /* V6_M. */
9720 T(V6S_M), /* V6S_M. */
9721 T(V7E_M), /* V7E_M. */
9722 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
9732 /* Pseudo-architecture. */
9736 /* Check we've not got a higher architecture than we know about. */
9738 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
9740 _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
9744 /* Override old tag if we have a Tag_also_compatible_with on the output. */
9746 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
9747 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
9748 oldtag = T(V4T_PLUS_V6_M);
9750 /* And override the new tag if we have a Tag_also_compatible_with on the
9753 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
9754 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
9755 newtag = T(V4T_PLUS_V6_M);
9757 tagl = (oldtag < newtag) ? oldtag : newtag;
9758 result = tagh = (oldtag > newtag) ? oldtag : newtag;
9760 /* Architectures before V6KZ add features monotonically. */
9761 if (tagh <= TAG_CPU_ARCH_V6KZ)
9764 result = comb[tagh - T(V6T2)][tagl];
9766 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
9767 as the canonical version. */
9768 if (result == T(V4T_PLUS_V6_M))
9771 *secondary_compat_out = T(V6_M);
9774 *secondary_compat_out = -1;
9778 _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
9779 ibfd, oldtag, newtag);
9787 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
9788 are conflicting attributes. */
9791 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
9793 obj_attribute *in_attr;
9794 obj_attribute *out_attr;
9795 obj_attribute_list *in_list;
9796 obj_attribute_list *out_list;
9797 obj_attribute_list **out_listp;
9798 /* Some tags have 0 = don't care, 1 = strong requirement,
9799 2 = weak requirement. */
9800 static const int order_021[3] = {0, 2, 1};
9802 bfd_boolean result = TRUE;
9804 /* Skip the linker stubs file. This preserves previous behavior
9805 of accepting unknown attributes in the first input file - but
9807 if (ibfd->flags & BFD_LINKER_CREATED)
9810 if (!elf_known_obj_attributes_proc (obfd)[0].i)
9812 /* This is the first object. Copy the attributes. */
9813 _bfd_elf_copy_obj_attributes (ibfd, obfd);
9815 /* Use the Tag_null value to indicate the attributes have been
9817 elf_known_obj_attributes_proc (obfd)[0].i = 1;
9822 in_attr = elf_known_obj_attributes_proc (ibfd);
9823 out_attr = elf_known_obj_attributes_proc (obfd);
9824 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
9825 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
9827 /* Ignore mismatches if the object doesn't use floating point. */
9828 if (out_attr[Tag_ABI_FP_number_model].i == 0)
9829 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
9830 else if (in_attr[Tag_ABI_FP_number_model].i != 0)
9833 (_("error: %B uses VFP register arguments, %B does not"),
9834 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
9835 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
9840 for (i = 4; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
9842 /* Merge this attribute with existing attributes. */
9845 case Tag_CPU_raw_name:
9847 /* These are merged after Tag_CPU_arch. */
9850 case Tag_ABI_optimization_goals:
9851 case Tag_ABI_FP_optimization_goals:
9852 /* Use the first value seen. */
9857 int secondary_compat = -1, secondary_compat_out = -1;
9858 unsigned int saved_out_attr = out_attr[i].i;
9859 static const char *name_table[] = {
9860 /* These aren't real CPU names, but we can't guess
9861 that from the architecture version alone. */
9877 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
9878 secondary_compat = get_secondary_compatible_arch (ibfd);
9879 secondary_compat_out = get_secondary_compatible_arch (obfd);
9880 out_attr[i].i = tag_cpu_arch_combine (ibfd, out_attr[i].i,
9881 &secondary_compat_out,
9884 set_secondary_compatible_arch (obfd, secondary_compat_out);
9886 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
9887 if (out_attr[i].i == saved_out_attr)
9888 ; /* Leave the names alone. */
9889 else if (out_attr[i].i == in_attr[i].i)
9891 /* The output architecture has been changed to match the
9892 input architecture. Use the input names. */
9893 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
9894 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
9896 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
9897 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
9902 out_attr[Tag_CPU_name].s = NULL;
9903 out_attr[Tag_CPU_raw_name].s = NULL;
9906 /* If we still don't have a value for Tag_CPU_name,
9907 make one up now. Tag_CPU_raw_name remains blank. */
9908 if (out_attr[Tag_CPU_name].s == NULL
9909 && out_attr[i].i < ARRAY_SIZE (name_table))
9910 out_attr[Tag_CPU_name].s =
9911 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
9915 case Tag_ARM_ISA_use:
9916 case Tag_THUMB_ISA_use:
9918 case Tag_Advanced_SIMD_arch:
9919 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
9920 case Tag_ABI_FP_rounding:
9921 case Tag_ABI_FP_exceptions:
9922 case Tag_ABI_FP_user_exceptions:
9923 case Tag_ABI_FP_number_model:
9924 case Tag_VFP_HP_extension:
9925 case Tag_CPU_unaligned_access:
9927 case Tag_Virtualization_use:
9928 case Tag_MPextension_use:
9929 /* Use the largest value specified. */
9930 if (in_attr[i].i > out_attr[i].i)
9931 out_attr[i].i = in_attr[i].i;
9934 case Tag_ABI_align8_preserved:
9935 case Tag_ABI_PCS_RO_data:
9936 /* Use the smallest value specified. */
9937 if (in_attr[i].i < out_attr[i].i)
9938 out_attr[i].i = in_attr[i].i;
9941 case Tag_ABI_align8_needed:
9942 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
9943 && (in_attr[Tag_ABI_align8_preserved].i == 0
9944 || out_attr[Tag_ABI_align8_preserved].i == 0))
9946 /* This error message should be enabled once all non-conformant
9947 binaries in the toolchain have had the attributes set
9950 (_("error: %B: 8-byte data alignment conflicts with %B"),
9955 case Tag_ABI_FP_denormal:
9956 case Tag_ABI_PCS_GOT_use:
9957 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
9958 value if greater than 2 (for future-proofing). */
9959 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
9960 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
9961 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
9962 out_attr[i].i = in_attr[i].i;
9966 case Tag_CPU_arch_profile:
9967 if (out_attr[i].i != in_attr[i].i)
9969 /* 0 will merge with anything.
9970 'A' and 'S' merge to 'A'.
9971 'R' and 'S' merge to 'R'.
9972 'M' and 'A|R|S' is an error. */
9973 if (out_attr[i].i == 0
9974 || (out_attr[i].i == 'S'
9975 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
9976 out_attr[i].i = in_attr[i].i;
9977 else if (in_attr[i].i == 0
9978 || (in_attr[i].i == 'S'
9979 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
9984 (_("error: %B: Conflicting architecture profiles %c/%c"),
9986 in_attr[i].i ? in_attr[i].i : '0',
9987 out_attr[i].i ? out_attr[i].i : '0');
10012 /* Values greater than 6 aren't defined, so just pick the
10014 if (in_attr[i].i > 6 && in_attr[i].i > out_attr[i].i)
10016 out_attr[i] = in_attr[i];
10019 /* The output uses the superset of input features
10020 (ISA version) and registers. */
10021 ver = vfp_versions[in_attr[i].i].ver;
10022 if (ver < vfp_versions[out_attr[i].i].ver)
10023 ver = vfp_versions[out_attr[i].i].ver;
10024 regs = vfp_versions[in_attr[i].i].regs;
10025 if (regs < vfp_versions[out_attr[i].i].regs)
10026 regs = vfp_versions[out_attr[i].i].regs;
10027 /* This assumes all possible supersets are also a valid
10029 for (newval = 6; newval > 0; newval--)
10031 if (regs == vfp_versions[newval].regs
10032 && ver == vfp_versions[newval].ver)
10035 out_attr[i].i = newval;
10038 case Tag_PCS_config:
10039 if (out_attr[i].i == 0)
10040 out_attr[i].i = in_attr[i].i;
10041 else if (in_attr[i].i != 0 && out_attr[i].i != 0)
10043 /* It's sometimes ok to mix different configs, so this is only
10046 (_("Warning: %B: Conflicting platform configuration"), ibfd);
10049 case Tag_ABI_PCS_R9_use:
10050 if (in_attr[i].i != out_attr[i].i
10051 && out_attr[i].i != AEABI_R9_unused
10052 && in_attr[i].i != AEABI_R9_unused)
10055 (_("error: %B: Conflicting use of R9"), ibfd);
10058 if (out_attr[i].i == AEABI_R9_unused)
10059 out_attr[i].i = in_attr[i].i;
10061 case Tag_ABI_PCS_RW_data:
10062 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
10063 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
10064 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
10067 (_("error: %B: SB relative addressing conflicts with use of R9"),
10071 /* Use the smallest value specified. */
10072 if (in_attr[i].i < out_attr[i].i)
10073 out_attr[i].i = in_attr[i].i;
10075 case Tag_ABI_PCS_wchar_t:
10076 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
10077 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
10080 (_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
10081 ibfd, in_attr[i].i, out_attr[i].i);
10083 else if (in_attr[i].i && !out_attr[i].i)
10084 out_attr[i].i = in_attr[i].i;
10086 case Tag_ABI_enum_size:
10087 if (in_attr[i].i != AEABI_enum_unused)
10089 if (out_attr[i].i == AEABI_enum_unused
10090 || out_attr[i].i == AEABI_enum_forced_wide)
10092 /* The existing object is compatible with anything.
10093 Use whatever requirements the new object has. */
10094 out_attr[i].i = in_attr[i].i;
10096 else if (in_attr[i].i != AEABI_enum_forced_wide
10097 && out_attr[i].i != in_attr[i].i
10098 && !elf_arm_tdata (obfd)->no_enum_size_warning)
10100 static const char *aeabi_enum_names[] =
10101 { "", "variable-size", "32-bit", "" };
10102 const char *in_name =
10103 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10104 ? aeabi_enum_names[in_attr[i].i]
10106 const char *out_name =
10107 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
10108 ? aeabi_enum_names[out_attr[i].i]
10111 (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
10112 ibfd, in_name, out_name);
10116 case Tag_ABI_VFP_args:
10119 case Tag_ABI_WMMX_args:
10120 if (in_attr[i].i != out_attr[i].i)
10123 (_("error: %B uses iWMMXt register arguments, %B does not"),
10128 case Tag_compatibility:
10129 /* Merged in target-independent code. */
10131 case Tag_ABI_HardFP_use:
10132 /* 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP). */
10133 if ((in_attr[i].i == 1 && out_attr[i].i == 2)
10134 || (in_attr[i].i == 2 && out_attr[i].i == 1))
10136 else if (in_attr[i].i > out_attr[i].i)
10137 out_attr[i].i = in_attr[i].i;
10139 case Tag_ABI_FP_16bit_format:
10140 if (in_attr[i].i != 0 && out_attr[i].i != 0)
10142 if (in_attr[i].i != out_attr[i].i)
10145 (_("error: fp16 format mismatch between %B and %B"),
10150 if (in_attr[i].i != 0)
10151 out_attr[i].i = in_attr[i].i;
10154 case Tag_nodefaults:
10155 /* This tag is set if it exists, but the value is unused (and is
10156 typically zero). We don't actually need to do anything here -
10157 the merge happens automatically when the type flags are merged
10160 case Tag_also_compatible_with:
10161 /* Already done in Tag_CPU_arch. */
10163 case Tag_conformance:
10164 /* Keep the attribute if it matches. Throw it away otherwise.
10165 No attribute means no claim to conform. */
10166 if (!in_attr[i].s || !out_attr[i].s
10167 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
10168 out_attr[i].s = NULL;
10173 bfd *err_bfd = NULL;
10175 /* The "known_obj_attributes" table does contain some undefined
10176 attributes. Ensure that there are unused. */
10177 if (out_attr[i].i != 0 || out_attr[i].s != NULL)
10179 else if (in_attr[i].i != 0 || in_attr[i].s != NULL)
10182 if (err_bfd != NULL)
10184 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10185 if ((i & 127) < 64)
10188 (_("%B: Unknown mandatory EABI object attribute %d"),
10190 bfd_set_error (bfd_error_bad_value);
10196 (_("Warning: %B: Unknown EABI object attribute %d"),
10201 /* Only pass on attributes that match in both inputs. */
10202 if (in_attr[i].i != out_attr[i].i
10203 || in_attr[i].s != out_attr[i].s
10204 || (in_attr[i].s != NULL && out_attr[i].s != NULL
10205 && strcmp (in_attr[i].s, out_attr[i].s) != 0))
10208 out_attr[i].s = NULL;
10213 /* If out_attr was copied from in_attr then it won't have a type yet. */
10214 if (in_attr[i].type && !out_attr[i].type)
10215 out_attr[i].type = in_attr[i].type;
10218 /* Merge Tag_compatibility attributes and any common GNU ones. */
10219 _bfd_elf_merge_object_attributes (ibfd, obfd);
10221 /* Check for any attributes not known on ARM. */
10222 in_list = elf_other_obj_attributes_proc (ibfd);
10223 out_listp = &elf_other_obj_attributes_proc (obfd);
10224 out_list = *out_listp;
10226 for (; in_list || out_list; )
10228 bfd *err_bfd = NULL;
10231 /* The tags for each list are in numerical order. */
10232 /* If the tags are equal, then merge. */
10233 if (out_list && (!in_list || in_list->tag > out_list->tag))
10235 /* This attribute only exists in obfd. We can't merge, and we don't
10236 know what the tag means, so delete it. */
10238 err_tag = out_list->tag;
10239 *out_listp = out_list->next;
10240 out_list = *out_listp;
10242 else if (in_list && (!out_list || in_list->tag < out_list->tag))
10244 /* This attribute only exists in ibfd. We can't merge, and we don't
10245 know what the tag means, so ignore it. */
10247 err_tag = in_list->tag;
10248 in_list = in_list->next;
10250 else /* The tags are equal. */
10252 /* As present, all attributes in the list are unknown, and
10253 therefore can't be merged meaningfully. */
10255 err_tag = out_list->tag;
10257 /* Only pass on attributes that match in both inputs. */
10258 if (in_list->attr.i != out_list->attr.i
10259 || in_list->attr.s != out_list->attr.s
10260 || (in_list->attr.s && out_list->attr.s
10261 && strcmp (in_list->attr.s, out_list->attr.s) != 0))
10263 /* No match. Delete the attribute. */
10264 *out_listp = out_list->next;
10265 out_list = *out_listp;
10269 /* Matched. Keep the attribute and move to the next. */
10270 out_list = out_list->next;
10271 in_list = in_list->next;
10277 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
10278 if ((err_tag & 127) < 64)
10281 (_("%B: Unknown mandatory EABI object attribute %d"),
10283 bfd_set_error (bfd_error_bad_value);
10289 (_("Warning: %B: Unknown EABI object attribute %d"),
10298 /* Return TRUE if the two EABI versions are incompatible. */
10301 elf32_arm_versions_compatible (unsigned iver, unsigned over)
10303 /* v4 and v5 are the same spec before and after it was released,
10304 so allow mixing them. */
10305 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
10306 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
10309 return (iver == over);
10312 /* Merge backend specific data from an object file to the output
10313 object file when linking. */
10316 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
10318 /* Display the flags field. */
10321 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
10323 FILE * file = (FILE *) ptr;
10324 unsigned long flags;
10326 BFD_ASSERT (abfd != NULL && ptr != NULL);
10328 /* Print normal ELF private data. */
10329 _bfd_elf_print_private_bfd_data (abfd, ptr);
10331 flags = elf_elfheader (abfd)->e_flags;
10332 /* Ignore init flag - it may not be set, despite the flags field
10333 containing valid data. */
10335 /* xgettext:c-format */
10336 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
10338 switch (EF_ARM_EABI_VERSION (flags))
10340 case EF_ARM_EABI_UNKNOWN:
10341 /* The following flag bits are GNU extensions and not part of the
10342 official ARM ELF extended ABI. Hence they are only decoded if
10343 the EABI version is not set. */
10344 if (flags & EF_ARM_INTERWORK)
10345 fprintf (file, _(" [interworking enabled]"));
10347 if (flags & EF_ARM_APCS_26)
10348 fprintf (file, " [APCS-26]");
10350 fprintf (file, " [APCS-32]");
10352 if (flags & EF_ARM_VFP_FLOAT)
10353 fprintf (file, _(" [VFP float format]"));
10354 else if (flags & EF_ARM_MAVERICK_FLOAT)
10355 fprintf (file, _(" [Maverick float format]"));
10357 fprintf (file, _(" [FPA float format]"));
10359 if (flags & EF_ARM_APCS_FLOAT)
10360 fprintf (file, _(" [floats passed in float registers]"));
10362 if (flags & EF_ARM_PIC)
10363 fprintf (file, _(" [position independent]"));
10365 if (flags & EF_ARM_NEW_ABI)
10366 fprintf (file, _(" [new ABI]"));
10368 if (flags & EF_ARM_OLD_ABI)
10369 fprintf (file, _(" [old ABI]"));
10371 if (flags & EF_ARM_SOFT_FLOAT)
10372 fprintf (file, _(" [software FP]"));
10374 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
10375 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
10376 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
10377 | EF_ARM_MAVERICK_FLOAT);
10380 case EF_ARM_EABI_VER1:
10381 fprintf (file, _(" [Version1 EABI]"));
10383 if (flags & EF_ARM_SYMSARESORTED)
10384 fprintf (file, _(" [sorted symbol table]"));
10386 fprintf (file, _(" [unsorted symbol table]"));
10388 flags &= ~ EF_ARM_SYMSARESORTED;
10391 case EF_ARM_EABI_VER2:
10392 fprintf (file, _(" [Version2 EABI]"));
10394 if (flags & EF_ARM_SYMSARESORTED)
10395 fprintf (file, _(" [sorted symbol table]"));
10397 fprintf (file, _(" [unsorted symbol table]"));
10399 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
10400 fprintf (file, _(" [dynamic symbols use segment index]"));
10402 if (flags & EF_ARM_MAPSYMSFIRST)
10403 fprintf (file, _(" [mapping symbols precede others]"));
10405 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
10406 | EF_ARM_MAPSYMSFIRST);
10409 case EF_ARM_EABI_VER3:
10410 fprintf (file, _(" [Version3 EABI]"));
10413 case EF_ARM_EABI_VER4:
10414 fprintf (file, _(" [Version4 EABI]"));
10417 case EF_ARM_EABI_VER5:
10418 fprintf (file, _(" [Version5 EABI]"));
10420 if (flags & EF_ARM_BE8)
10421 fprintf (file, _(" [BE8]"));
10423 if (flags & EF_ARM_LE8)
10424 fprintf (file, _(" [LE8]"));
10426 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
10430 fprintf (file, _(" <EABI version unrecognised>"));
10434 flags &= ~ EF_ARM_EABIMASK;
10436 if (flags & EF_ARM_RELEXEC)
10437 fprintf (file, _(" [relocatable executable]"));
10439 if (flags & EF_ARM_HASENTRY)
10440 fprintf (file, _(" [has entry point]"));
10442 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_HASENTRY);
10445 fprintf (file, _("<Unrecognised flag bits set>"));
10447 fputc ('\n', file);
10453 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
10455 switch (ELF_ST_TYPE (elf_sym->st_info))
10457 case STT_ARM_TFUNC:
10458 return ELF_ST_TYPE (elf_sym->st_info);
10460 case STT_ARM_16BIT:
10461 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
10462 This allows us to distinguish between data used by Thumb instructions
10463 and non-data (which is probably code) inside Thumb regions of an
10465 if (type != STT_OBJECT && type != STT_TLS)
10466 return ELF_ST_TYPE (elf_sym->st_info);
10477 elf32_arm_gc_mark_hook (asection *sec,
10478 struct bfd_link_info *info,
10479 Elf_Internal_Rela *rel,
10480 struct elf_link_hash_entry *h,
10481 Elf_Internal_Sym *sym)
10484 switch (ELF32_R_TYPE (rel->r_info))
10486 case R_ARM_GNU_VTINHERIT:
10487 case R_ARM_GNU_VTENTRY:
10491 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
10494 /* Update the got entry reference counts for the section being removed. */
10497 elf32_arm_gc_sweep_hook (bfd * abfd,
10498 struct bfd_link_info * info,
10500 const Elf_Internal_Rela * relocs)
10502 Elf_Internal_Shdr *symtab_hdr;
10503 struct elf_link_hash_entry **sym_hashes;
10504 bfd_signed_vma *local_got_refcounts;
10505 const Elf_Internal_Rela *rel, *relend;
10506 struct elf32_arm_link_hash_table * globals;
10508 if (info->relocatable)
10511 globals = elf32_arm_hash_table (info);
10512 if (globals == NULL)
10515 elf_section_data (sec)->local_dynrel = NULL;
10517 symtab_hdr = & elf_symtab_hdr (abfd);
10518 sym_hashes = elf_sym_hashes (abfd);
10519 local_got_refcounts = elf_local_got_refcounts (abfd);
10521 check_use_blx (globals);
10523 relend = relocs + sec->reloc_count;
10524 for (rel = relocs; rel < relend; rel++)
10526 unsigned long r_symndx;
10527 struct elf_link_hash_entry *h = NULL;
10530 r_symndx = ELF32_R_SYM (rel->r_info);
10531 if (r_symndx >= symtab_hdr->sh_info)
10533 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10534 while (h->root.type == bfd_link_hash_indirect
10535 || h->root.type == bfd_link_hash_warning)
10536 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10539 r_type = ELF32_R_TYPE (rel->r_info);
10540 r_type = arm_real_reloc_type (globals, r_type);
10544 case R_ARM_GOT_PREL:
10545 case R_ARM_TLS_GD32:
10546 case R_ARM_TLS_IE32:
10549 if (h->got.refcount > 0)
10550 h->got.refcount -= 1;
10552 else if (local_got_refcounts != NULL)
10554 if (local_got_refcounts[r_symndx] > 0)
10555 local_got_refcounts[r_symndx] -= 1;
10559 case R_ARM_TLS_LDM32:
10560 globals->tls_ldm_got.refcount -= 1;
10564 case R_ARM_ABS32_NOI:
10566 case R_ARM_REL32_NOI:
10572 case R_ARM_THM_CALL:
10573 case R_ARM_THM_JUMP24:
10574 case R_ARM_THM_JUMP19:
10575 case R_ARM_MOVW_ABS_NC:
10576 case R_ARM_MOVT_ABS:
10577 case R_ARM_MOVW_PREL_NC:
10578 case R_ARM_MOVT_PREL:
10579 case R_ARM_THM_MOVW_ABS_NC:
10580 case R_ARM_THM_MOVT_ABS:
10581 case R_ARM_THM_MOVW_PREL_NC:
10582 case R_ARM_THM_MOVT_PREL:
10583 /* Should the interworking branches be here also? */
10587 struct elf32_arm_link_hash_entry *eh;
10588 struct elf32_arm_relocs_copied **pp;
10589 struct elf32_arm_relocs_copied *p;
10591 eh = (struct elf32_arm_link_hash_entry *) h;
10593 if (h->plt.refcount > 0)
10595 h->plt.refcount -= 1;
10596 if (r_type == R_ARM_THM_CALL)
10597 eh->plt_maybe_thumb_refcount--;
10599 if (r_type == R_ARM_THM_JUMP24
10600 || r_type == R_ARM_THM_JUMP19)
10601 eh->plt_thumb_refcount--;
10604 if (r_type == R_ARM_ABS32
10605 || r_type == R_ARM_REL32
10606 || r_type == R_ARM_ABS32_NOI
10607 || r_type == R_ARM_REL32_NOI)
10609 for (pp = &eh->relocs_copied; (p = *pp) != NULL;
10611 if (p->section == sec)
10614 if (ELF32_R_TYPE (rel->r_info) == R_ARM_REL32
10615 || ELF32_R_TYPE (rel->r_info) == R_ARM_REL32_NOI)
10633 /* Look through the relocs for a section during the first phase. */
10636 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
10637 asection *sec, const Elf_Internal_Rela *relocs)
10639 Elf_Internal_Shdr *symtab_hdr;
10640 struct elf_link_hash_entry **sym_hashes;
10641 const Elf_Internal_Rela *rel;
10642 const Elf_Internal_Rela *rel_end;
10645 bfd_vma *local_got_offsets;
10646 struct elf32_arm_link_hash_table *htab;
10647 bfd_boolean needs_plt;
10648 unsigned long nsyms;
10650 if (info->relocatable)
10653 BFD_ASSERT (is_arm_elf (abfd));
10655 htab = elf32_arm_hash_table (info);
10661 /* Create dynamic sections for relocatable executables so that we can
10662 copy relocations. */
10663 if (htab->root.is_relocatable_executable
10664 && ! htab->root.dynamic_sections_created)
10666 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
10670 dynobj = elf_hash_table (info)->dynobj;
10671 local_got_offsets = elf_local_got_offsets (abfd);
10673 symtab_hdr = & elf_symtab_hdr (abfd);
10674 sym_hashes = elf_sym_hashes (abfd);
10675 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
10677 rel_end = relocs + sec->reloc_count;
10678 for (rel = relocs; rel < rel_end; rel++)
10680 struct elf_link_hash_entry *h;
10681 struct elf32_arm_link_hash_entry *eh;
10682 unsigned long r_symndx;
10685 r_symndx = ELF32_R_SYM (rel->r_info);
10686 r_type = ELF32_R_TYPE (rel->r_info);
10687 r_type = arm_real_reloc_type (htab, r_type);
10689 if (r_symndx >= nsyms
10690 /* PR 9934: It is possible to have relocations that do not
10691 refer to symbols, thus it is also possible to have an
10692 object file containing relocations but no symbol table. */
10693 && (r_symndx > 0 || nsyms > 0))
10695 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
10700 if (nsyms == 0 || r_symndx < symtab_hdr->sh_info)
10704 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
10705 while (h->root.type == bfd_link_hash_indirect
10706 || h->root.type == bfd_link_hash_warning)
10707 h = (struct elf_link_hash_entry *) h->root.u.i.link;
10710 eh = (struct elf32_arm_link_hash_entry *) h;
10715 case R_ARM_GOT_PREL:
10716 case R_ARM_TLS_GD32:
10717 case R_ARM_TLS_IE32:
10718 /* This symbol requires a global offset table entry. */
10720 int tls_type, old_tls_type;
10724 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
10725 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
10726 default: tls_type = GOT_NORMAL; break;
10732 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
10736 bfd_signed_vma *local_got_refcounts;
10738 /* This is a global offset table entry for a local symbol. */
10739 local_got_refcounts = elf_local_got_refcounts (abfd);
10740 if (local_got_refcounts == NULL)
10742 bfd_size_type size;
10744 size = symtab_hdr->sh_info;
10745 size *= (sizeof (bfd_signed_vma) + sizeof (char));
10746 local_got_refcounts = (bfd_signed_vma *)
10747 bfd_zalloc (abfd, size);
10748 if (local_got_refcounts == NULL)
10750 elf_local_got_refcounts (abfd) = local_got_refcounts;
10751 elf32_arm_local_got_tls_type (abfd)
10752 = (char *) (local_got_refcounts + symtab_hdr->sh_info);
10754 local_got_refcounts[r_symndx] += 1;
10755 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
10758 /* We will already have issued an error message if there is a
10759 TLS / non-TLS mismatch, based on the symbol type. We don't
10760 support any linker relaxations. So just combine any TLS
10762 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
10763 && tls_type != GOT_NORMAL)
10764 tls_type |= old_tls_type;
10766 if (old_tls_type != tls_type)
10769 elf32_arm_hash_entry (h)->tls_type = tls_type;
10771 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
10774 /* Fall through. */
10776 case R_ARM_TLS_LDM32:
10777 if (r_type == R_ARM_TLS_LDM32)
10778 htab->tls_ldm_got.refcount++;
10779 /* Fall through. */
10781 case R_ARM_GOTOFF32:
10783 if (htab->sgot == NULL)
10785 if (htab->root.dynobj == NULL)
10786 htab->root.dynobj = abfd;
10787 if (!create_got_section (htab->root.dynobj, info))
10793 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
10794 ldr __GOTT_INDEX__ offsets. */
10795 if (!htab->vxworks_p)
10797 /* Fall through. */
10804 case R_ARM_THM_CALL:
10805 case R_ARM_THM_JUMP24:
10806 case R_ARM_THM_JUMP19:
10810 case R_ARM_MOVW_ABS_NC:
10811 case R_ARM_MOVT_ABS:
10812 case R_ARM_THM_MOVW_ABS_NC:
10813 case R_ARM_THM_MOVT_ABS:
10816 (*_bfd_error_handler)
10817 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
10818 abfd, elf32_arm_howto_table_1[r_type].name,
10819 (h) ? h->root.root.string : "a local symbol");
10820 bfd_set_error (bfd_error_bad_value);
10824 /* Fall through. */
10826 case R_ARM_ABS32_NOI:
10828 case R_ARM_REL32_NOI:
10829 case R_ARM_MOVW_PREL_NC:
10830 case R_ARM_MOVT_PREL:
10831 case R_ARM_THM_MOVW_PREL_NC:
10832 case R_ARM_THM_MOVT_PREL:
10836 /* Should the interworking branches be listed here? */
10839 /* If this reloc is in a read-only section, we might
10840 need a copy reloc. We can't check reliably at this
10841 stage whether the section is read-only, as input
10842 sections have not yet been mapped to output sections.
10843 Tentatively set the flag for now, and correct in
10844 adjust_dynamic_symbol. */
10846 h->non_got_ref = 1;
10848 /* We may need a .plt entry if the function this reloc
10849 refers to is in a different object. We can't tell for
10850 sure yet, because something later might force the
10855 /* If we create a PLT entry, this relocation will reference
10856 it, even if it's an ABS32 relocation. */
10857 h->plt.refcount += 1;
10859 /* It's too early to use htab->use_blx here, so we have to
10860 record possible blx references separately from
10861 relocs that definitely need a thumb stub. */
10863 if (r_type == R_ARM_THM_CALL)
10864 eh->plt_maybe_thumb_refcount += 1;
10866 if (r_type == R_ARM_THM_JUMP24
10867 || r_type == R_ARM_THM_JUMP19)
10868 eh->plt_thumb_refcount += 1;
10871 /* If we are creating a shared library or relocatable executable,
10872 and this is a reloc against a global symbol, or a non PC
10873 relative reloc against a local symbol, then we need to copy
10874 the reloc into the shared library. However, if we are linking
10875 with -Bsymbolic, we do not need to copy a reloc against a
10876 global symbol which is defined in an object we are
10877 including in the link (i.e., DEF_REGULAR is set). At
10878 this point we have not seen all the input files, so it is
10879 possible that DEF_REGULAR is not set now but will be set
10880 later (it is never cleared). We account for that
10881 possibility below by storing information in the
10882 relocs_copied field of the hash table entry. */
10883 if ((info->shared || htab->root.is_relocatable_executable)
10884 && (sec->flags & SEC_ALLOC) != 0
10885 && ((r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI)
10886 || (h != NULL && ! h->needs_plt
10887 && (! info->symbolic || ! h->def_regular))))
10889 struct elf32_arm_relocs_copied *p, **head;
10891 /* When creating a shared object, we must copy these
10892 reloc types into the output file. We create a reloc
10893 section in dynobj and make room for this reloc. */
10894 if (sreloc == NULL)
10896 sreloc = _bfd_elf_make_dynamic_reloc_section
10897 (sec, dynobj, 2, abfd, ! htab->use_rel);
10899 if (sreloc == NULL)
10902 /* BPABI objects never have dynamic relocations mapped. */
10903 if (htab->symbian_p)
10907 flags = bfd_get_section_flags (dynobj, sreloc);
10908 flags &= ~(SEC_LOAD | SEC_ALLOC);
10909 bfd_set_section_flags (dynobj, sreloc, flags);
10913 /* If this is a global symbol, we count the number of
10914 relocations we need for this symbol. */
10917 head = &((struct elf32_arm_link_hash_entry *) h)->relocs_copied;
10921 /* Track dynamic relocs needed for local syms too.
10922 We really need local syms available to do this
10923 easily. Oh well. */
10926 Elf_Internal_Sym *isym;
10928 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
10933 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
10937 vpp = &elf_section_data (s)->local_dynrel;
10938 head = (struct elf32_arm_relocs_copied **) vpp;
10942 if (p == NULL || p->section != sec)
10944 bfd_size_type amt = sizeof *p;
10946 p = (struct elf32_arm_relocs_copied *)
10947 bfd_alloc (htab->root.dynobj, amt);
10957 if (r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10963 /* This relocation describes the C++ object vtable hierarchy.
10964 Reconstruct it for later use during GC. */
10965 case R_ARM_GNU_VTINHERIT:
10966 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
10970 /* This relocation describes which C++ vtable entries are actually
10971 used. Record for later use during GC. */
10972 case R_ARM_GNU_VTENTRY:
10973 BFD_ASSERT (h != NULL);
10975 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
10984 /* Unwinding tables are not referenced directly. This pass marks them as
10985 required if the corresponding code section is marked. */
10988 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
10989 elf_gc_mark_hook_fn gc_mark_hook)
10992 Elf_Internal_Shdr **elf_shdrp;
10995 /* Marking EH data may cause additional code sections to be marked,
10996 requiring multiple passes. */
11001 for (sub = info->input_bfds; sub != NULL; sub = sub->link_next)
11005 if (! is_arm_elf (sub))
11008 elf_shdrp = elf_elfsections (sub);
11009 for (o = sub->sections; o != NULL; o = o->next)
11011 Elf_Internal_Shdr *hdr;
11013 hdr = &elf_section_data (o)->this_hdr;
11014 if (hdr->sh_type == SHT_ARM_EXIDX
11016 && hdr->sh_link < elf_numsections (sub)
11018 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
11021 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
11031 /* Treat mapping symbols as special target symbols. */
11034 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
11036 return bfd_is_arm_special_symbol_name (sym->name,
11037 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
11040 /* This is a copy of elf_find_function() from elf.c except that
11041 ARM mapping symbols are ignored when looking for function names
11042 and STT_ARM_TFUNC is considered to a function type. */
11045 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
11046 asection * section,
11047 asymbol ** symbols,
11049 const char ** filename_ptr,
11050 const char ** functionname_ptr)
11052 const char * filename = NULL;
11053 asymbol * func = NULL;
11054 bfd_vma low_func = 0;
11057 for (p = symbols; *p != NULL; p++)
11059 elf_symbol_type *q;
11061 q = (elf_symbol_type *) *p;
11063 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
11068 filename = bfd_asymbol_name (&q->symbol);
11071 case STT_ARM_TFUNC:
11073 /* Skip mapping symbols. */
11074 if ((q->symbol.flags & BSF_LOCAL)
11075 && bfd_is_arm_special_symbol_name (q->symbol.name,
11076 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
11078 /* Fall through. */
11079 if (bfd_get_section (&q->symbol) == section
11080 && q->symbol.value >= low_func
11081 && q->symbol.value <= offset)
11083 func = (asymbol *) q;
11084 low_func = q->symbol.value;
11094 *filename_ptr = filename;
11095 if (functionname_ptr)
11096 *functionname_ptr = bfd_asymbol_name (func);
11102 /* Find the nearest line to a particular section and offset, for error
11103 reporting. This code is a duplicate of the code in elf.c, except
11104 that it uses arm_elf_find_function. */
11107 elf32_arm_find_nearest_line (bfd * abfd,
11108 asection * section,
11109 asymbol ** symbols,
11111 const char ** filename_ptr,
11112 const char ** functionname_ptr,
11113 unsigned int * line_ptr)
11115 bfd_boolean found = FALSE;
11117 /* We skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain uses it. */
11119 if (_bfd_dwarf2_find_nearest_line (abfd, section, symbols, offset,
11120 filename_ptr, functionname_ptr,
11122 & elf_tdata (abfd)->dwarf2_find_line_info))
11124 if (!*functionname_ptr)
11125 arm_elf_find_function (abfd, section, symbols, offset,
11126 *filename_ptr ? NULL : filename_ptr,
11132 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
11133 & found, filename_ptr,
11134 functionname_ptr, line_ptr,
11135 & elf_tdata (abfd)->line_info))
11138 if (found && (*functionname_ptr || *line_ptr))
11141 if (symbols == NULL)
11144 if (! arm_elf_find_function (abfd, section, symbols, offset,
11145 filename_ptr, functionname_ptr))
11153 elf32_arm_find_inliner_info (bfd * abfd,
11154 const char ** filename_ptr,
11155 const char ** functionname_ptr,
11156 unsigned int * line_ptr)
11159 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
11160 functionname_ptr, line_ptr,
11161 & elf_tdata (abfd)->dwarf2_find_line_info);
11165 /* Adjust a symbol defined by a dynamic object and referenced by a
11166 regular object. The current definition is in some section of the
11167 dynamic object, but we're not including those sections. We have to
11168 change the definition to something the rest of the link can
11172 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
11173 struct elf_link_hash_entry * h)
11177 struct elf32_arm_link_hash_entry * eh;
11178 struct elf32_arm_link_hash_table *globals;
11180 globals = elf32_arm_hash_table (info);
11181 if (globals == NULL)
11184 dynobj = elf_hash_table (info)->dynobj;
11186 /* Make sure we know what is going on here. */
11187 BFD_ASSERT (dynobj != NULL
11189 || h->u.weakdef != NULL
11192 && !h->def_regular)));
11194 eh = (struct elf32_arm_link_hash_entry *) h;
11196 /* If this is a function, put it in the procedure linkage table. We
11197 will fill in the contents of the procedure linkage table later,
11198 when we know the address of the .got section. */
11199 if (h->type == STT_FUNC || h->type == STT_ARM_TFUNC
11202 if (h->plt.refcount <= 0
11203 || SYMBOL_CALLS_LOCAL (info, h)
11204 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
11205 && h->root.type == bfd_link_hash_undefweak))
11207 /* This case can occur if we saw a PLT32 reloc in an input
11208 file, but the symbol was never referred to by a dynamic
11209 object, or if all references were garbage collected. In
11210 such a case, we don't actually need to build a procedure
11211 linkage table, and we can just do a PC24 reloc instead. */
11212 h->plt.offset = (bfd_vma) -1;
11213 eh->plt_thumb_refcount = 0;
11214 eh->plt_maybe_thumb_refcount = 0;
11222 /* It's possible that we incorrectly decided a .plt reloc was
11223 needed for an R_ARM_PC24 or similar reloc to a non-function sym
11224 in check_relocs. We can't decide accurately between function
11225 and non-function syms in check-relocs; Objects loaded later in
11226 the link may change h->type. So fix it now. */
11227 h->plt.offset = (bfd_vma) -1;
11228 eh->plt_thumb_refcount = 0;
11229 eh->plt_maybe_thumb_refcount = 0;
11232 /* If this is a weak symbol, and there is a real definition, the
11233 processor independent code will have arranged for us to see the
11234 real definition first, and we can just use the same value. */
11235 if (h->u.weakdef != NULL)
11237 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
11238 || h->u.weakdef->root.type == bfd_link_hash_defweak);
11239 h->root.u.def.section = h->u.weakdef->root.u.def.section;
11240 h->root.u.def.value = h->u.weakdef->root.u.def.value;
11244 /* If there are no non-GOT references, we do not need a copy
11246 if (!h->non_got_ref)
11249 /* This is a reference to a symbol defined by a dynamic object which
11250 is not a function. */
11252 /* If we are creating a shared library, we must presume that the
11253 only references to the symbol are via the global offset table.
11254 For such cases we need not do anything here; the relocations will
11255 be handled correctly by relocate_section. Relocatable executables
11256 can reference data in shared objects directly, so we don't need to
11257 do anything here. */
11258 if (info->shared || globals->root.is_relocatable_executable)
11263 (*_bfd_error_handler) (_("dynamic variable `%s' is zero size"),
11264 h->root.root.string);
11268 /* We must allocate the symbol in our .dynbss section, which will
11269 become part of the .bss section of the executable. There will be
11270 an entry for this symbol in the .dynsym section. The dynamic
11271 object will contain position independent code, so all references
11272 from the dynamic object to this symbol will go through the global
11273 offset table. The dynamic linker will use the .dynsym entry to
11274 determine the address it must put in the global offset table, so
11275 both the dynamic object and the regular object will refer to the
11276 same memory location for the variable. */
11277 s = bfd_get_section_by_name (dynobj, ".dynbss");
11278 BFD_ASSERT (s != NULL);
11280 /* We must generate a R_ARM_COPY reloc to tell the dynamic linker to
11281 copy the initial value out of the dynamic object and into the
11282 runtime process image. We need to remember the offset into the
11283 .rel(a).bss section we are going to use. */
11284 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0)
11288 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (globals, ".bss"));
11289 BFD_ASSERT (srel != NULL);
11290 srel->size += RELOC_SIZE (globals);
11294 return _bfd_elf_adjust_dynamic_copy (h, s);
11297 /* Allocate space in .plt, .got and associated reloc sections for
11301 allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf)
11303 struct bfd_link_info *info;
11304 struct elf32_arm_link_hash_table *htab;
11305 struct elf32_arm_link_hash_entry *eh;
11306 struct elf32_arm_relocs_copied *p;
11307 bfd_signed_vma thumb_refs;
11309 eh = (struct elf32_arm_link_hash_entry *) h;
11311 if (h->root.type == bfd_link_hash_indirect)
11314 if (h->root.type == bfd_link_hash_warning)
11315 /* When warning symbols are created, they **replace** the "real"
11316 entry in the hash table, thus we never get to see the real
11317 symbol in a hash traversal. So look at it now. */
11318 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11320 info = (struct bfd_link_info *) inf;
11321 htab = elf32_arm_hash_table (info);
11325 if (htab->root.dynamic_sections_created
11326 && h->plt.refcount > 0)
11328 /* Make sure this symbol is output as a dynamic symbol.
11329 Undefined weak syms won't yet be marked as dynamic. */
11330 if (h->dynindx == -1
11331 && !h->forced_local)
11333 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11338 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
11340 asection *s = htab->splt;
11342 /* If this is the first .plt entry, make room for the special
11345 s->size += htab->plt_header_size;
11347 h->plt.offset = s->size;
11349 /* If we will insert a Thumb trampoline before this PLT, leave room
11351 thumb_refs = eh->plt_thumb_refcount;
11352 if (!htab->use_blx)
11353 thumb_refs += eh->plt_maybe_thumb_refcount;
11355 if (thumb_refs > 0)
11357 h->plt.offset += PLT_THUMB_STUB_SIZE;
11358 s->size += PLT_THUMB_STUB_SIZE;
11361 /* If this symbol is not defined in a regular file, and we are
11362 not generating a shared library, then set the symbol to this
11363 location in the .plt. This is required to make function
11364 pointers compare as equal between the normal executable and
11365 the shared library. */
11367 && !h->def_regular)
11369 h->root.u.def.section = s;
11370 h->root.u.def.value = h->plt.offset;
11372 /* Make sure the function is not marked as Thumb, in case
11373 it is the target of an ABS32 relocation, which will
11374 point to the PLT entry. */
11375 if (ELF_ST_TYPE (h->type) == STT_ARM_TFUNC)
11376 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11379 /* Make room for this entry. */
11380 s->size += htab->plt_entry_size;
11382 if (!htab->symbian_p)
11384 /* We also need to make an entry in the .got.plt section, which
11385 will be placed in the .got section by the linker script. */
11386 eh->plt_got_offset = htab->sgotplt->size;
11387 htab->sgotplt->size += 4;
11390 /* We also need to make an entry in the .rel(a).plt section. */
11391 htab->srelplt->size += RELOC_SIZE (htab);
11393 /* VxWorks executables have a second set of relocations for
11394 each PLT entry. They go in a separate relocation section,
11395 which is processed by the kernel loader. */
11396 if (htab->vxworks_p && !info->shared)
11398 /* There is a relocation for the initial PLT entry:
11399 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
11400 if (h->plt.offset == htab->plt_header_size)
11401 htab->srelplt2->size += RELOC_SIZE (htab);
11403 /* There are two extra relocations for each subsequent
11404 PLT entry: an R_ARM_32 relocation for the GOT entry,
11405 and an R_ARM_32 relocation for the PLT entry. */
11406 htab->srelplt2->size += RELOC_SIZE (htab) * 2;
11411 h->plt.offset = (bfd_vma) -1;
11417 h->plt.offset = (bfd_vma) -1;
11421 if (h->got.refcount > 0)
11425 int tls_type = elf32_arm_hash_entry (h)->tls_type;
11428 /* Make sure this symbol is output as a dynamic symbol.
11429 Undefined weak syms won't yet be marked as dynamic. */
11430 if (h->dynindx == -1
11431 && !h->forced_local)
11433 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11437 if (!htab->symbian_p)
11440 h->got.offset = s->size;
11442 if (tls_type == GOT_UNKNOWN)
11445 if (tls_type == GOT_NORMAL)
11446 /* Non-TLS symbols need one GOT slot. */
11450 if (tls_type & GOT_TLS_GD)
11451 /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots. */
11453 if (tls_type & GOT_TLS_IE)
11454 /* R_ARM_TLS_IE32 needs one GOT slot. */
11458 dyn = htab->root.dynamic_sections_created;
11461 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
11463 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11466 if (tls_type != GOT_NORMAL
11467 && (info->shared || indx != 0)
11468 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11469 || h->root.type != bfd_link_hash_undefweak))
11471 if (tls_type & GOT_TLS_IE)
11472 htab->srelgot->size += RELOC_SIZE (htab);
11474 if (tls_type & GOT_TLS_GD)
11475 htab->srelgot->size += RELOC_SIZE (htab);
11477 if ((tls_type & GOT_TLS_GD) && indx != 0)
11478 htab->srelgot->size += RELOC_SIZE (htab);
11480 else if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11481 || h->root.type != bfd_link_hash_undefweak)
11483 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
11484 htab->srelgot->size += RELOC_SIZE (htab);
11488 h->got.offset = (bfd_vma) -1;
11490 /* Allocate stubs for exported Thumb functions on v4t. */
11491 if (!htab->use_blx && h->dynindx != -1
11493 && ELF_ST_TYPE (h->type) == STT_ARM_TFUNC
11494 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
11496 struct elf_link_hash_entry * th;
11497 struct bfd_link_hash_entry * bh;
11498 struct elf_link_hash_entry * myh;
11502 /* Create a new symbol to regist the real location of the function. */
11503 s = h->root.u.def.section;
11504 sprintf (name, "__real_%s", h->root.root.string);
11505 _bfd_generic_link_add_one_symbol (info, s->owner,
11506 name, BSF_GLOBAL, s,
11507 h->root.u.def.value,
11508 NULL, TRUE, FALSE, &bh);
11510 myh = (struct elf_link_hash_entry *) bh;
11511 myh->type = ELF_ST_INFO (STB_LOCAL, STT_ARM_TFUNC);
11512 myh->forced_local = 1;
11513 eh->export_glue = myh;
11514 th = record_arm_to_thumb_glue (info, h);
11515 /* Point the symbol at the stub. */
11516 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
11517 h->root.u.def.section = th->root.u.def.section;
11518 h->root.u.def.value = th->root.u.def.value & ~1;
11521 if (eh->relocs_copied == NULL)
11524 /* In the shared -Bsymbolic case, discard space allocated for
11525 dynamic pc-relative relocs against symbols which turn out to be
11526 defined in regular objects. For the normal shared case, discard
11527 space for pc-relative relocs that have become local due to symbol
11528 visibility changes. */
11530 if (info->shared || htab->root.is_relocatable_executable)
11532 /* The only relocs that use pc_count are R_ARM_REL32 and
11533 R_ARM_REL32_NOI, which will appear on something like
11534 ".long foo - .". We want calls to protected symbols to resolve
11535 directly to the function rather than going via the plt. If people
11536 want function pointer comparisons to work as expected then they
11537 should avoid writing assembly like ".long foo - .". */
11538 if (SYMBOL_CALLS_LOCAL (info, h))
11540 struct elf32_arm_relocs_copied **pp;
11542 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11544 p->count -= p->pc_count;
11553 if (htab->vxworks_p)
11555 struct elf32_arm_relocs_copied **pp;
11557 for (pp = &eh->relocs_copied; (p = *pp) != NULL; )
11559 if (strcmp (p->section->output_section->name, ".tls_vars") == 0)
11566 /* Also discard relocs on undefined weak syms with non-default
11568 if (eh->relocs_copied != NULL
11569 && h->root.type == bfd_link_hash_undefweak)
11571 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
11572 eh->relocs_copied = NULL;
11574 /* Make sure undefined weak symbols are output as a dynamic
11576 else if (h->dynindx == -1
11577 && !h->forced_local)
11579 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11584 else if (htab->root.is_relocatable_executable && h->dynindx == -1
11585 && h->root.type == bfd_link_hash_new)
11587 /* Output absolute symbols so that we can create relocations
11588 against them. For normal symbols we output a relocation
11589 against the section that contains them. */
11590 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11597 /* For the non-shared case, discard space for relocs against
11598 symbols which turn out to need copy relocs or are not
11601 if (!h->non_got_ref
11602 && ((h->def_dynamic
11603 && !h->def_regular)
11604 || (htab->root.dynamic_sections_created
11605 && (h->root.type == bfd_link_hash_undefweak
11606 || h->root.type == bfd_link_hash_undefined))))
11608 /* Make sure this symbol is output as a dynamic symbol.
11609 Undefined weak syms won't yet be marked as dynamic. */
11610 if (h->dynindx == -1
11611 && !h->forced_local)
11613 if (! bfd_elf_link_record_dynamic_symbol (info, h))
11617 /* If that succeeded, we know we'll be keeping all the
11619 if (h->dynindx != -1)
11623 eh->relocs_copied = NULL;
11628 /* Finally, allocate space. */
11629 for (p = eh->relocs_copied; p != NULL; p = p->next)
11631 asection *sreloc = elf_section_data (p->section)->sreloc;
11632 sreloc->size += p->count * RELOC_SIZE (htab);
11638 /* Find any dynamic relocs that apply to read-only sections. */
11641 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
11643 struct elf32_arm_link_hash_entry * eh;
11644 struct elf32_arm_relocs_copied * p;
11646 if (h->root.type == bfd_link_hash_warning)
11647 h = (struct elf_link_hash_entry *) h->root.u.i.link;
11649 eh = (struct elf32_arm_link_hash_entry *) h;
11650 for (p = eh->relocs_copied; p != NULL; p = p->next)
11652 asection *s = p->section;
11654 if (s != NULL && (s->flags & SEC_READONLY) != 0)
11656 struct bfd_link_info *info = (struct bfd_link_info *) inf;
11658 info->flags |= DF_TEXTREL;
11660 /* Not an error, just cut short the traversal. */
11668 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
11671 struct elf32_arm_link_hash_table *globals;
11673 globals = elf32_arm_hash_table (info);
11674 if (globals == NULL)
11677 globals->byteswap_code = byteswap_code;
11680 /* Set the sizes of the dynamic sections. */
11683 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
11684 struct bfd_link_info * info)
11689 bfd_boolean relocs;
11691 struct elf32_arm_link_hash_table *htab;
11693 htab = elf32_arm_hash_table (info);
11697 dynobj = elf_hash_table (info)->dynobj;
11698 BFD_ASSERT (dynobj != NULL);
11699 check_use_blx (htab);
11701 if (elf_hash_table (info)->dynamic_sections_created)
11703 /* Set the contents of the .interp section to the interpreter. */
11704 if (info->executable)
11706 s = bfd_get_section_by_name (dynobj, ".interp");
11707 BFD_ASSERT (s != NULL);
11708 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
11709 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
11713 /* Set up .got offsets for local syms, and space for local dynamic
11715 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11717 bfd_signed_vma *local_got;
11718 bfd_signed_vma *end_local_got;
11719 char *local_tls_type;
11720 bfd_size_type locsymcount;
11721 Elf_Internal_Shdr *symtab_hdr;
11723 bfd_boolean is_vxworks = htab->vxworks_p;
11725 if (! is_arm_elf (ibfd))
11728 for (s = ibfd->sections; s != NULL; s = s->next)
11730 struct elf32_arm_relocs_copied *p;
11732 for (p = (struct elf32_arm_relocs_copied *)
11733 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
11735 if (!bfd_is_abs_section (p->section)
11736 && bfd_is_abs_section (p->section->output_section))
11738 /* Input section has been discarded, either because
11739 it is a copy of a linkonce section or due to
11740 linker script /DISCARD/, so we'll be discarding
11743 else if (is_vxworks
11744 && strcmp (p->section->output_section->name,
11747 /* Relocations in vxworks .tls_vars sections are
11748 handled specially by the loader. */
11750 else if (p->count != 0)
11752 srel = elf_section_data (p->section)->sreloc;
11753 srel->size += p->count * RELOC_SIZE (htab);
11754 if ((p->section->output_section->flags & SEC_READONLY) != 0)
11755 info->flags |= DF_TEXTREL;
11760 local_got = elf_local_got_refcounts (ibfd);
11764 symtab_hdr = & elf_symtab_hdr (ibfd);
11765 locsymcount = symtab_hdr->sh_info;
11766 end_local_got = local_got + locsymcount;
11767 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
11769 srel = htab->srelgot;
11770 for (; local_got < end_local_got; ++local_got, ++local_tls_type)
11772 if (*local_got > 0)
11774 *local_got = s->size;
11775 if (*local_tls_type & GOT_TLS_GD)
11776 /* TLS_GD relocs need an 8-byte structure in the GOT. */
11778 if (*local_tls_type & GOT_TLS_IE)
11780 if (*local_tls_type == GOT_NORMAL)
11783 if (info->shared || *local_tls_type == GOT_TLS_GD)
11784 srel->size += RELOC_SIZE (htab);
11787 *local_got = (bfd_vma) -1;
11791 if (htab->tls_ldm_got.refcount > 0)
11793 /* Allocate two GOT entries and one dynamic relocation (if necessary)
11794 for R_ARM_TLS_LDM32 relocations. */
11795 htab->tls_ldm_got.offset = htab->sgot->size;
11796 htab->sgot->size += 8;
11798 htab->srelgot->size += RELOC_SIZE (htab);
11801 htab->tls_ldm_got.offset = -1;
11803 /* Allocate global sym .plt and .got entries, and space for global
11804 sym dynamic relocs. */
11805 elf_link_hash_traverse (& htab->root, allocate_dynrelocs, info);
11807 /* Here we rummage through the found bfds to collect glue information. */
11808 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
11810 if (! is_arm_elf (ibfd))
11813 /* Initialise mapping tables for code/data. */
11814 bfd_elf32_arm_init_maps (ibfd);
11816 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
11817 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info))
11818 /* xgettext:c-format */
11819 _bfd_error_handler (_("Errors encountered processing file %s"),
11823 /* Allocate space for the glue sections now that we've sized them. */
11824 bfd_elf32_arm_allocate_interworking_sections (info);
11826 /* The check_relocs and adjust_dynamic_symbol entry points have
11827 determined the sizes of the various dynamic sections. Allocate
11828 memory for them. */
11831 for (s = dynobj->sections; s != NULL; s = s->next)
11835 if ((s->flags & SEC_LINKER_CREATED) == 0)
11838 /* It's OK to base decisions on the section name, because none
11839 of the dynobj section names depend upon the input files. */
11840 name = bfd_get_section_name (dynobj, s);
11842 if (strcmp (name, ".plt") == 0)
11844 /* Remember whether there is a PLT. */
11845 plt = s->size != 0;
11847 else if (CONST_STRNEQ (name, ".rel"))
11851 /* Remember whether there are any reloc sections other
11852 than .rel(a).plt and .rela.plt.unloaded. */
11853 if (s != htab->srelplt && s != htab->srelplt2)
11856 /* We use the reloc_count field as a counter if we need
11857 to copy relocs into the output file. */
11858 s->reloc_count = 0;
11861 else if (! CONST_STRNEQ (name, ".got")
11862 && strcmp (name, ".dynbss") != 0)
11864 /* It's not one of our sections, so don't allocate space. */
11870 /* If we don't need this section, strip it from the
11871 output file. This is mostly to handle .rel(a).bss and
11872 .rel(a).plt. We must create both sections in
11873 create_dynamic_sections, because they must be created
11874 before the linker maps input sections to output
11875 sections. The linker does that before
11876 adjust_dynamic_symbol is called, and it is that
11877 function which decides whether anything needs to go
11878 into these sections. */
11879 s->flags |= SEC_EXCLUDE;
11883 if ((s->flags & SEC_HAS_CONTENTS) == 0)
11886 /* Allocate memory for the section contents. */
11887 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
11888 if (s->contents == NULL)
11892 if (elf_hash_table (info)->dynamic_sections_created)
11894 /* Add some entries to the .dynamic section. We fill in the
11895 values later, in elf32_arm_finish_dynamic_sections, but we
11896 must add the entries now so that we get the correct size for
11897 the .dynamic section. The DT_DEBUG entry is filled in by the
11898 dynamic linker and used by the debugger. */
11899 #define add_dynamic_entry(TAG, VAL) \
11900 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
11902 if (info->executable)
11904 if (!add_dynamic_entry (DT_DEBUG, 0))
11910 if ( !add_dynamic_entry (DT_PLTGOT, 0)
11911 || !add_dynamic_entry (DT_PLTRELSZ, 0)
11912 || !add_dynamic_entry (DT_PLTREL,
11913 htab->use_rel ? DT_REL : DT_RELA)
11914 || !add_dynamic_entry (DT_JMPREL, 0))
11922 if (!add_dynamic_entry (DT_REL, 0)
11923 || !add_dynamic_entry (DT_RELSZ, 0)
11924 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
11929 if (!add_dynamic_entry (DT_RELA, 0)
11930 || !add_dynamic_entry (DT_RELASZ, 0)
11931 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
11936 /* If any dynamic relocs apply to a read-only section,
11937 then we need a DT_TEXTREL entry. */
11938 if ((info->flags & DF_TEXTREL) == 0)
11939 elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
11942 if ((info->flags & DF_TEXTREL) != 0)
11944 if (!add_dynamic_entry (DT_TEXTREL, 0))
11947 if (htab->vxworks_p
11948 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
11951 #undef add_dynamic_entry
11956 /* Finish up dynamic symbol handling. We set the contents of various
11957 dynamic sections here. */
11960 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
11961 struct bfd_link_info * info,
11962 struct elf_link_hash_entry * h,
11963 Elf_Internal_Sym * sym)
11966 struct elf32_arm_link_hash_table *htab;
11967 struct elf32_arm_link_hash_entry *eh;
11969 dynobj = elf_hash_table (info)->dynobj;
11970 htab = elf32_arm_hash_table (info);
11974 eh = (struct elf32_arm_link_hash_entry *) h;
11976 if (h->plt.offset != (bfd_vma) -1)
11982 Elf_Internal_Rela rel;
11984 /* This symbol has an entry in the procedure linkage table. Set
11987 BFD_ASSERT (h->dynindx != -1);
11989 splt = bfd_get_section_by_name (dynobj, ".plt");
11990 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".plt"));
11991 BFD_ASSERT (splt != NULL && srel != NULL);
11993 /* Fill in the entry in the procedure linkage table. */
11994 if (htab->symbian_p)
11996 put_arm_insn (htab, output_bfd,
11997 elf32_arm_symbian_plt_entry[0],
11998 splt->contents + h->plt.offset);
11999 bfd_put_32 (output_bfd,
12000 elf32_arm_symbian_plt_entry[1],
12001 splt->contents + h->plt.offset + 4);
12003 /* Fill in the entry in the .rel.plt section. */
12004 rel.r_offset = (splt->output_section->vma
12005 + splt->output_offset
12006 + h->plt.offset + 4);
12007 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12009 /* Get the index in the procedure linkage table which
12010 corresponds to this symbol. This is the index of this symbol
12011 in all the symbols for which we are making plt entries. The
12012 first entry in the procedure linkage table is reserved. */
12013 plt_index = ((h->plt.offset - htab->plt_header_size)
12014 / htab->plt_entry_size);
12018 bfd_vma got_offset, got_address, plt_address;
12019 bfd_vma got_displacement;
12023 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12024 BFD_ASSERT (sgot != NULL);
12026 /* Get the offset into the .got.plt table of the entry that
12027 corresponds to this function. */
12028 got_offset = eh->plt_got_offset;
12030 /* Get the index in the procedure linkage table which
12031 corresponds to this symbol. This is the index of this symbol
12032 in all the symbols for which we are making plt entries. The
12033 first three entries in .got.plt are reserved; after that
12034 symbols appear in the same order as in .plt. */
12035 plt_index = (got_offset - 12) / 4;
12037 /* Calculate the address of the GOT entry. */
12038 got_address = (sgot->output_section->vma
12039 + sgot->output_offset
12042 /* ...and the address of the PLT entry. */
12043 plt_address = (splt->output_section->vma
12044 + splt->output_offset
12047 ptr = htab->splt->contents + h->plt.offset;
12048 if (htab->vxworks_p && info->shared)
12053 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12055 val = elf32_arm_vxworks_shared_plt_entry[i];
12057 val |= got_address - sgot->output_section->vma;
12059 val |= plt_index * RELOC_SIZE (htab);
12060 if (i == 2 || i == 5)
12061 bfd_put_32 (output_bfd, val, ptr);
12063 put_arm_insn (htab, output_bfd, val, ptr);
12066 else if (htab->vxworks_p)
12071 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
12073 val = elf32_arm_vxworks_exec_plt_entry[i];
12075 val |= got_address;
12077 val |= 0xffffff & -((h->plt.offset + i * 4 + 8) >> 2);
12079 val |= plt_index * RELOC_SIZE (htab);
12080 if (i == 2 || i == 5)
12081 bfd_put_32 (output_bfd, val, ptr);
12083 put_arm_insn (htab, output_bfd, val, ptr);
12086 loc = (htab->srelplt2->contents
12087 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
12089 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
12090 referencing the GOT for this PLT entry. */
12091 rel.r_offset = plt_address + 8;
12092 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12093 rel.r_addend = got_offset;
12094 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12095 loc += RELOC_SIZE (htab);
12097 /* Create the R_ARM_ABS32 relocation referencing the
12098 beginning of the PLT for this GOT entry. */
12099 rel.r_offset = got_address;
12100 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12102 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12106 bfd_signed_vma thumb_refs;
12107 /* Calculate the displacement between the PLT slot and the
12108 entry in the GOT. The eight-byte offset accounts for the
12109 value produced by adding to pc in the first instruction
12110 of the PLT stub. */
12111 got_displacement = got_address - (plt_address + 8);
12113 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
12115 thumb_refs = eh->plt_thumb_refcount;
12116 if (!htab->use_blx)
12117 thumb_refs += eh->plt_maybe_thumb_refcount;
12119 if (thumb_refs > 0)
12121 put_thumb_insn (htab, output_bfd,
12122 elf32_arm_plt_thumb_stub[0], ptr - 4);
12123 put_thumb_insn (htab, output_bfd,
12124 elf32_arm_plt_thumb_stub[1], ptr - 2);
12127 put_arm_insn (htab, output_bfd,
12128 elf32_arm_plt_entry[0]
12129 | ((got_displacement & 0x0ff00000) >> 20),
12131 put_arm_insn (htab, output_bfd,
12132 elf32_arm_plt_entry[1]
12133 | ((got_displacement & 0x000ff000) >> 12),
12135 put_arm_insn (htab, output_bfd,
12136 elf32_arm_plt_entry[2]
12137 | (got_displacement & 0x00000fff),
12139 #ifdef FOUR_WORD_PLT
12140 bfd_put_32 (output_bfd, elf32_arm_plt_entry[3], ptr + 12);
12144 /* Fill in the entry in the global offset table. */
12145 bfd_put_32 (output_bfd,
12146 (splt->output_section->vma
12147 + splt->output_offset),
12148 sgot->contents + got_offset);
12150 /* Fill in the entry in the .rel(a).plt section. */
12152 rel.r_offset = got_address;
12153 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_JUMP_SLOT);
12156 loc = srel->contents + plt_index * RELOC_SIZE (htab);
12157 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12159 if (!h->def_regular)
12161 /* Mark the symbol as undefined, rather than as defined in
12162 the .plt section. Leave the value alone. */
12163 sym->st_shndx = SHN_UNDEF;
12164 /* If the symbol is weak, we do need to clear the value.
12165 Otherwise, the PLT entry would provide a definition for
12166 the symbol even if the symbol wasn't defined anywhere,
12167 and so the symbol would never be NULL. */
12168 if (!h->ref_regular_nonweak)
12173 if (h->got.offset != (bfd_vma) -1
12174 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_GD) == 0
12175 && (elf32_arm_hash_entry (h)->tls_type & GOT_TLS_IE) == 0)
12179 Elf_Internal_Rela rel;
12183 /* This symbol has an entry in the global offset table. Set it
12185 sgot = bfd_get_section_by_name (dynobj, ".got");
12186 srel = bfd_get_section_by_name (dynobj, RELOC_SECTION (htab, ".got"));
12187 BFD_ASSERT (sgot != NULL && srel != NULL);
12189 offset = (h->got.offset & ~(bfd_vma) 1);
12191 rel.r_offset = (sgot->output_section->vma
12192 + sgot->output_offset
12195 /* If this is a static link, or it is a -Bsymbolic link and the
12196 symbol is defined locally or was forced to be local because
12197 of a version file, we just want to emit a RELATIVE reloc.
12198 The entry in the global offset table will already have been
12199 initialized in the relocate_section function. */
12201 && SYMBOL_REFERENCES_LOCAL (info, h))
12203 BFD_ASSERT ((h->got.offset & 1) != 0);
12204 rel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12205 if (!htab->use_rel)
12207 rel.r_addend = bfd_get_32 (output_bfd, sgot->contents + offset);
12208 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12213 BFD_ASSERT ((h->got.offset & 1) == 0);
12214 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + offset);
12215 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
12218 loc = srel->contents + srel->reloc_count++ * RELOC_SIZE (htab);
12219 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12225 Elf_Internal_Rela rel;
12228 /* This symbol needs a copy reloc. Set it up. */
12229 BFD_ASSERT (h->dynindx != -1
12230 && (h->root.type == bfd_link_hash_defined
12231 || h->root.type == bfd_link_hash_defweak));
12233 s = bfd_get_section_by_name (h->root.u.def.section->owner,
12234 RELOC_SECTION (htab, ".bss"));
12235 BFD_ASSERT (s != NULL);
12238 rel.r_offset = (h->root.u.def.value
12239 + h->root.u.def.section->output_section->vma
12240 + h->root.u.def.section->output_offset);
12241 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
12242 loc = s->contents + s->reloc_count++ * RELOC_SIZE (htab);
12243 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
12246 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
12247 the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
12248 to the ".got" section. */
12249 if (strcmp (h->root.root.string, "_DYNAMIC") == 0
12250 || (!htab->vxworks_p && h == htab->root.hgot))
12251 sym->st_shndx = SHN_ABS;
12256 /* Finish up the dynamic sections. */
12259 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
12264 struct elf32_arm_link_hash_table *htab;
12266 htab = elf32_arm_hash_table (info);
12270 dynobj = elf_hash_table (info)->dynobj;
12272 sgot = bfd_get_section_by_name (dynobj, ".got.plt");
12273 BFD_ASSERT (htab->symbian_p || sgot != NULL);
12274 sdyn = bfd_get_section_by_name (dynobj, ".dynamic");
12276 if (elf_hash_table (info)->dynamic_sections_created)
12279 Elf32_External_Dyn *dyncon, *dynconend;
12281 splt = bfd_get_section_by_name (dynobj, ".plt");
12282 BFD_ASSERT (splt != NULL && sdyn != NULL);
12284 dyncon = (Elf32_External_Dyn *) sdyn->contents;
12285 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
12287 for (; dyncon < dynconend; dyncon++)
12289 Elf_Internal_Dyn dyn;
12293 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
12300 if (htab->vxworks_p
12301 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
12302 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12307 goto get_vma_if_bpabi;
12310 goto get_vma_if_bpabi;
12313 goto get_vma_if_bpabi;
12315 name = ".gnu.version";
12316 goto get_vma_if_bpabi;
12318 name = ".gnu.version_d";
12319 goto get_vma_if_bpabi;
12321 name = ".gnu.version_r";
12322 goto get_vma_if_bpabi;
12328 name = RELOC_SECTION (htab, ".plt");
12330 s = bfd_get_section_by_name (output_bfd, name);
12331 BFD_ASSERT (s != NULL);
12332 if (!htab->symbian_p)
12333 dyn.d_un.d_ptr = s->vma;
12335 /* In the BPABI, tags in the PT_DYNAMIC section point
12336 at the file offset, not the memory address, for the
12337 convenience of the post linker. */
12338 dyn.d_un.d_ptr = s->filepos;
12339 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12343 if (htab->symbian_p)
12348 s = bfd_get_section_by_name (output_bfd,
12349 RELOC_SECTION (htab, ".plt"));
12350 BFD_ASSERT (s != NULL);
12351 dyn.d_un.d_val = s->size;
12352 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12357 if (!htab->symbian_p)
12359 /* My reading of the SVR4 ABI indicates that the
12360 procedure linkage table relocs (DT_JMPREL) should be
12361 included in the overall relocs (DT_REL). This is
12362 what Solaris does. However, UnixWare can not handle
12363 that case. Therefore, we override the DT_RELSZ entry
12364 here to make it not include the JMPREL relocs. Since
12365 the linker script arranges for .rel(a).plt to follow all
12366 other relocation sections, we don't have to worry
12367 about changing the DT_REL entry. */
12368 s = bfd_get_section_by_name (output_bfd,
12369 RELOC_SECTION (htab, ".plt"));
12371 dyn.d_un.d_val -= s->size;
12372 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12375 /* Fall through. */
12379 /* In the BPABI, the DT_REL tag must point at the file
12380 offset, not the VMA, of the first relocation
12381 section. So, we use code similar to that in
12382 elflink.c, but do not check for SHF_ALLOC on the
12383 relcoation section, since relocations sections are
12384 never allocated under the BPABI. The comments above
12385 about Unixware notwithstanding, we include all of the
12386 relocations here. */
12387 if (htab->symbian_p)
12390 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
12391 ? SHT_REL : SHT_RELA);
12392 dyn.d_un.d_val = 0;
12393 for (i = 1; i < elf_numsections (output_bfd); i++)
12395 Elf_Internal_Shdr *hdr
12396 = elf_elfsections (output_bfd)[i];
12397 if (hdr->sh_type == type)
12399 if (dyn.d_tag == DT_RELSZ
12400 || dyn.d_tag == DT_RELASZ)
12401 dyn.d_un.d_val += hdr->sh_size;
12402 else if ((ufile_ptr) hdr->sh_offset
12403 <= dyn.d_un.d_val - 1)
12404 dyn.d_un.d_val = hdr->sh_offset;
12407 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12411 /* Set the bottom bit of DT_INIT/FINI if the
12412 corresponding function is Thumb. */
12414 name = info->init_function;
12417 name = info->fini_function;
12419 /* If it wasn't set by elf_bfd_final_link
12420 then there is nothing to adjust. */
12421 if (dyn.d_un.d_val != 0)
12423 struct elf_link_hash_entry * eh;
12425 eh = elf_link_hash_lookup (elf_hash_table (info), name,
12426 FALSE, FALSE, TRUE);
12428 && ELF_ST_TYPE (eh->type) == STT_ARM_TFUNC)
12430 dyn.d_un.d_val |= 1;
12431 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
12438 /* Fill in the first entry in the procedure linkage table. */
12439 if (splt->size > 0 && htab->plt_header_size)
12441 const bfd_vma *plt0_entry;
12442 bfd_vma got_address, plt_address, got_displacement;
12444 /* Calculate the addresses of the GOT and PLT. */
12445 got_address = sgot->output_section->vma + sgot->output_offset;
12446 plt_address = splt->output_section->vma + splt->output_offset;
12448 if (htab->vxworks_p)
12450 /* The VxWorks GOT is relocated by the dynamic linker.
12451 Therefore, we must emit relocations rather than simply
12452 computing the values now. */
12453 Elf_Internal_Rela rel;
12455 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
12456 put_arm_insn (htab, output_bfd, plt0_entry[0],
12457 splt->contents + 0);
12458 put_arm_insn (htab, output_bfd, plt0_entry[1],
12459 splt->contents + 4);
12460 put_arm_insn (htab, output_bfd, plt0_entry[2],
12461 splt->contents + 8);
12462 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
12464 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
12465 rel.r_offset = plt_address + 12;
12466 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12468 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
12469 htab->srelplt2->contents);
12473 got_displacement = got_address - (plt_address + 16);
12475 plt0_entry = elf32_arm_plt0_entry;
12476 put_arm_insn (htab, output_bfd, plt0_entry[0],
12477 splt->contents + 0);
12478 put_arm_insn (htab, output_bfd, plt0_entry[1],
12479 splt->contents + 4);
12480 put_arm_insn (htab, output_bfd, plt0_entry[2],
12481 splt->contents + 8);
12482 put_arm_insn (htab, output_bfd, plt0_entry[3],
12483 splt->contents + 12);
12485 #ifdef FOUR_WORD_PLT
12486 /* The displacement value goes in the otherwise-unused
12487 last word of the second entry. */
12488 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
12490 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
12495 /* UnixWare sets the entsize of .plt to 4, although that doesn't
12496 really seem like the right value. */
12497 if (splt->output_section->owner == output_bfd)
12498 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
12500 if (htab->vxworks_p && !info->shared && htab->splt->size > 0)
12502 /* Correct the .rel(a).plt.unloaded relocations. They will have
12503 incorrect symbol indexes. */
12507 num_plts = ((htab->splt->size - htab->plt_header_size)
12508 / htab->plt_entry_size);
12509 p = htab->srelplt2->contents + RELOC_SIZE (htab);
12511 for (; num_plts; num_plts--)
12513 Elf_Internal_Rela rel;
12515 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12516 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
12517 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12518 p += RELOC_SIZE (htab);
12520 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
12521 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
12522 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
12523 p += RELOC_SIZE (htab);
12528 /* Fill in the first three entries in the global offset table. */
12531 if (sgot->size > 0)
12534 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
12536 bfd_put_32 (output_bfd,
12537 sdyn->output_section->vma + sdyn->output_offset,
12539 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
12540 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
12543 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
12550 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
12552 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
12553 struct elf32_arm_link_hash_table *globals;
12555 i_ehdrp = elf_elfheader (abfd);
12557 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
12558 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
12560 i_ehdrp->e_ident[EI_OSABI] = 0;
12561 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
12565 globals = elf32_arm_hash_table (link_info);
12566 if (globals != NULL && globals->byteswap_code)
12567 i_ehdrp->e_flags |= EF_ARM_BE8;
12571 static enum elf_reloc_type_class
12572 elf32_arm_reloc_type_class (const Elf_Internal_Rela *rela)
12574 switch ((int) ELF32_R_TYPE (rela->r_info))
12576 case R_ARM_RELATIVE:
12577 return reloc_class_relative;
12578 case R_ARM_JUMP_SLOT:
12579 return reloc_class_plt;
12581 return reloc_class_copy;
12583 return reloc_class_normal;
12587 /* Set the right machine number for an Arm ELF file. */
12590 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr *hdr)
12592 if (hdr->sh_type == SHT_NOTE)
12593 *flags |= SEC_LINK_ONCE | SEC_LINK_DUPLICATES_SAME_CONTENTS;
12599 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
12601 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
12604 /* Return TRUE if this is an unwinding table entry. */
12607 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
12609 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
12610 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
12614 /* Set the type and flags for an ARM section. We do this by
12615 the section name, which is a hack, but ought to work. */
12618 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
12622 name = bfd_get_section_name (abfd, sec);
12624 if (is_arm_elf_unwind_section_name (abfd, name))
12626 hdr->sh_type = SHT_ARM_EXIDX;
12627 hdr->sh_flags |= SHF_LINK_ORDER;
12632 /* Handle an ARM specific section when reading an object file. This is
12633 called when bfd_section_from_shdr finds a section with an unknown
12637 elf32_arm_section_from_shdr (bfd *abfd,
12638 Elf_Internal_Shdr * hdr,
12642 /* There ought to be a place to keep ELF backend specific flags, but
12643 at the moment there isn't one. We just keep track of the
12644 sections by their name, instead. Fortunately, the ABI gives
12645 names for all the ARM specific sections, so we will probably get
12647 switch (hdr->sh_type)
12649 case SHT_ARM_EXIDX:
12650 case SHT_ARM_PREEMPTMAP:
12651 case SHT_ARM_ATTRIBUTES:
12658 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
12664 /* A structure used to record a list of sections, independently
12665 of the next and prev fields in the asection structure. */
12666 typedef struct section_list
12669 struct section_list * next;
12670 struct section_list * prev;
12674 /* Unfortunately we need to keep a list of sections for which
12675 an _arm_elf_section_data structure has been allocated. This
12676 is because it is possible for functions like elf32_arm_write_section
12677 to be called on a section which has had an elf_data_structure
12678 allocated for it (and so the used_by_bfd field is valid) but
12679 for which the ARM extended version of this structure - the
12680 _arm_elf_section_data structure - has not been allocated. */
12681 static section_list * sections_with_arm_elf_section_data = NULL;
12684 record_section_with_arm_elf_section_data (asection * sec)
12686 struct section_list * entry;
12688 entry = (struct section_list *) bfd_malloc (sizeof (* entry));
12692 entry->next = sections_with_arm_elf_section_data;
12693 entry->prev = NULL;
12694 if (entry->next != NULL)
12695 entry->next->prev = entry;
12696 sections_with_arm_elf_section_data = entry;
12699 static struct section_list *
12700 find_arm_elf_section_entry (asection * sec)
12702 struct section_list * entry;
12703 static struct section_list * last_entry = NULL;
12705 /* This is a short cut for the typical case where the sections are added
12706 to the sections_with_arm_elf_section_data list in forward order and
12707 then looked up here in backwards order. This makes a real difference
12708 to the ld-srec/sec64k.exp linker test. */
12709 entry = sections_with_arm_elf_section_data;
12710 if (last_entry != NULL)
12712 if (last_entry->sec == sec)
12713 entry = last_entry;
12714 else if (last_entry->next != NULL
12715 && last_entry->next->sec == sec)
12716 entry = last_entry->next;
12719 for (; entry; entry = entry->next)
12720 if (entry->sec == sec)
12724 /* Record the entry prior to this one - it is the entry we are most
12725 likely to want to locate next time. Also this way if we have been
12726 called from unrecord_section_with_arm_elf_section_data() we will not
12727 be caching a pointer that is about to be freed. */
12728 last_entry = entry->prev;
12733 static _arm_elf_section_data *
12734 get_arm_elf_section_data (asection * sec)
12736 struct section_list * entry;
12738 entry = find_arm_elf_section_entry (sec);
12741 return elf32_arm_section_data (entry->sec);
12747 unrecord_section_with_arm_elf_section_data (asection * sec)
12749 struct section_list * entry;
12751 entry = find_arm_elf_section_entry (sec);
12755 if (entry->prev != NULL)
12756 entry->prev->next = entry->next;
12757 if (entry->next != NULL)
12758 entry->next->prev = entry->prev;
12759 if (entry == sections_with_arm_elf_section_data)
12760 sections_with_arm_elf_section_data = entry->next;
12769 struct bfd_link_info *info;
12772 int (*func) (void *, const char *, Elf_Internal_Sym *,
12773 asection *, struct elf_link_hash_entry *);
12774 } output_arch_syminfo;
12776 enum map_symbol_type
12784 /* Output a single mapping symbol. */
12787 elf32_arm_output_map_sym (output_arch_syminfo *osi,
12788 enum map_symbol_type type,
12791 static const char *names[3] = {"$a", "$t", "$d"};
12792 Elf_Internal_Sym sym;
12794 sym.st_value = osi->sec->output_section->vma
12795 + osi->sec->output_offset
12799 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
12800 sym.st_shndx = osi->sec_shndx;
12801 return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
12805 /* Output mapping symbols for PLT entries associated with H. */
12808 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
12810 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
12811 struct elf32_arm_link_hash_table *htab;
12812 struct elf32_arm_link_hash_entry *eh;
12815 if (h->root.type == bfd_link_hash_indirect)
12818 if (h->root.type == bfd_link_hash_warning)
12819 /* When warning symbols are created, they **replace** the "real"
12820 entry in the hash table, thus we never get to see the real
12821 symbol in a hash traversal. So look at it now. */
12822 h = (struct elf_link_hash_entry *) h->root.u.i.link;
12824 if (h->plt.offset == (bfd_vma) -1)
12827 htab = elf32_arm_hash_table (osi->info);
12831 eh = (struct elf32_arm_link_hash_entry *) h;
12832 addr = h->plt.offset;
12833 if (htab->symbian_p)
12835 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12837 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
12840 else if (htab->vxworks_p)
12842 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12844 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
12846 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
12848 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
12853 bfd_signed_vma thumb_refs;
12855 thumb_refs = eh->plt_thumb_refcount;
12856 if (!htab->use_blx)
12857 thumb_refs += eh->plt_maybe_thumb_refcount;
12859 if (thumb_refs > 0)
12861 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
12864 #ifdef FOUR_WORD_PLT
12865 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12867 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
12870 /* A three-word PLT with no Thumb thunk contains only Arm code,
12871 so only need to output a mapping symbol for the first PLT entry and
12872 entries with thumb thunks. */
12873 if (thumb_refs > 0 || addr == 20)
12875 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
12884 /* Output a single local symbol for a generated stub. */
12887 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
12888 bfd_vma offset, bfd_vma size)
12890 Elf_Internal_Sym sym;
12892 sym.st_value = osi->sec->output_section->vma
12893 + osi->sec->output_offset
12895 sym.st_size = size;
12897 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
12898 sym.st_shndx = osi->sec_shndx;
12899 return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
12903 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
12906 struct elf32_arm_stub_hash_entry *stub_entry;
12907 struct bfd_link_info *info;
12908 asection *stub_sec;
12911 output_arch_syminfo *osi;
12912 const insn_sequence *template_sequence;
12913 enum stub_insn_type prev_type;
12916 enum map_symbol_type sym_type;
12918 /* Massage our args to the form they really have. */
12919 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
12920 osi = (output_arch_syminfo *) in_arg;
12924 stub_sec = stub_entry->stub_sec;
12926 /* Ensure this stub is attached to the current section being
12928 if (stub_sec != osi->sec)
12931 addr = (bfd_vma) stub_entry->stub_offset;
12932 stub_name = stub_entry->output_name;
12934 template_sequence = stub_entry->stub_template;
12935 switch (template_sequence[0].type)
12938 if (!elf32_arm_output_stub_sym (osi, stub_name, addr, stub_entry->stub_size))
12943 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
12944 stub_entry->stub_size))
12952 prev_type = DATA_TYPE;
12954 for (i = 0; i < stub_entry->stub_template_size; i++)
12956 switch (template_sequence[i].type)
12959 sym_type = ARM_MAP_ARM;
12964 sym_type = ARM_MAP_THUMB;
12968 sym_type = ARM_MAP_DATA;
12976 if (template_sequence[i].type != prev_type)
12978 prev_type = template_sequence[i].type;
12979 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
12983 switch (template_sequence[i].type)
13007 /* Output mapping symbols for linker generated sections,
13008 and for those data-only sections that do not have a
13012 elf32_arm_output_arch_local_syms (bfd *output_bfd,
13013 struct bfd_link_info *info,
13015 int (*func) (void *, const char *,
13016 Elf_Internal_Sym *,
13018 struct elf_link_hash_entry *))
13020 output_arch_syminfo osi;
13021 struct elf32_arm_link_hash_table *htab;
13023 bfd_size_type size;
13026 htab = elf32_arm_hash_table (info);
13030 check_use_blx (htab);
13036 /* Add a $d mapping symbol to data-only sections that
13037 don't have any mapping symbol. This may result in (harmless) redundant
13038 mapping symbols. */
13039 for (input_bfd = info->input_bfds;
13041 input_bfd = input_bfd->link_next)
13043 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
13044 for (osi.sec = input_bfd->sections;
13046 osi.sec = osi.sec->next)
13048 if (osi.sec->output_section != NULL
13049 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
13050 == SEC_HAS_CONTENTS
13051 && get_arm_elf_section_data (osi.sec) != NULL
13052 && get_arm_elf_section_data (osi.sec)->mapcount == 0)
13054 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13055 (output_bfd, osi.sec->output_section);
13056 if (osi.sec_shndx != (int)SHN_BAD)
13057 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
13062 /* ARM->Thumb glue. */
13063 if (htab->arm_glue_size > 0)
13065 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13066 ARM2THUMB_GLUE_SECTION_NAME);
13068 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13069 (output_bfd, osi.sec->output_section);
13070 if (info->shared || htab->root.is_relocatable_executable
13071 || htab->pic_veneer)
13072 size = ARM2THUMB_PIC_GLUE_SIZE;
13073 else if (htab->use_blx)
13074 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
13076 size = ARM2THUMB_STATIC_GLUE_SIZE;
13078 for (offset = 0; offset < htab->arm_glue_size; offset += size)
13080 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
13081 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
13085 /* Thumb->ARM glue. */
13086 if (htab->thumb_glue_size > 0)
13088 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13089 THUMB2ARM_GLUE_SECTION_NAME);
13091 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13092 (output_bfd, osi.sec->output_section);
13093 size = THUMB2ARM_GLUE_SIZE;
13095 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
13097 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
13098 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
13102 /* ARMv4 BX veneers. */
13103 if (htab->bx_glue_size > 0)
13105 osi.sec = bfd_get_section_by_name (htab->bfd_of_glue_owner,
13106 ARM_BX_GLUE_SECTION_NAME);
13108 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13109 (output_bfd, osi.sec->output_section);
13111 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
13114 /* Long calls stubs. */
13115 if (htab->stub_bfd && htab->stub_bfd->sections)
13117 asection* stub_sec;
13119 for (stub_sec = htab->stub_bfd->sections;
13121 stub_sec = stub_sec->next)
13123 /* Ignore non-stub sections. */
13124 if (!strstr (stub_sec->name, STUB_SUFFIX))
13127 osi.sec = stub_sec;
13129 osi.sec_shndx = _bfd_elf_section_from_bfd_section
13130 (output_bfd, osi.sec->output_section);
13132 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
13136 /* Finally, output mapping symbols for the PLT. */
13137 if (!htab->splt || htab->splt->size == 0)
13140 osi.sec_shndx = _bfd_elf_section_from_bfd_section (output_bfd,
13141 htab->splt->output_section);
13142 osi.sec = htab->splt;
13143 /* Output mapping symbols for the plt header. SymbianOS does not have a
13145 if (htab->vxworks_p)
13147 /* VxWorks shared libraries have no PLT header. */
13150 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13152 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
13156 else if (!htab->symbian_p)
13158 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
13160 #ifndef FOUR_WORD_PLT
13161 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
13166 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, (void *) &osi);
13170 /* Allocate target specific section data. */
13173 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
13175 if (!sec->used_by_bfd)
13177 _arm_elf_section_data *sdata;
13178 bfd_size_type amt = sizeof (*sdata);
13180 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
13183 sec->used_by_bfd = sdata;
13186 record_section_with_arm_elf_section_data (sec);
13188 return _bfd_elf_new_section_hook (abfd, sec);
13192 /* Used to order a list of mapping symbols by address. */
13195 elf32_arm_compare_mapping (const void * a, const void * b)
13197 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
13198 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
13200 if (amap->vma > bmap->vma)
13202 else if (amap->vma < bmap->vma)
13204 else if (amap->type > bmap->type)
13205 /* Ensure results do not depend on the host qsort for objects with
13206 multiple mapping symbols at the same address by sorting on type
13209 else if (amap->type < bmap->type)
13215 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
13217 static unsigned long
13218 offset_prel31 (unsigned long addr, bfd_vma offset)
13220 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
13223 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
13227 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
13229 unsigned long first_word = bfd_get_32 (output_bfd, from);
13230 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
13232 /* High bit of first word is supposed to be zero. */
13233 if ((first_word & 0x80000000ul) == 0)
13234 first_word = offset_prel31 (first_word, offset);
13236 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
13237 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
13238 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
13239 second_word = offset_prel31 (second_word, offset);
13241 bfd_put_32 (output_bfd, first_word, to);
13242 bfd_put_32 (output_bfd, second_word, to + 4);
13245 /* Data for make_branch_to_a8_stub(). */
13247 struct a8_branch_to_stub_data {
13248 asection *writing_section;
13249 bfd_byte *contents;
13253 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
13254 places for a particular section. */
13257 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
13260 struct elf32_arm_stub_hash_entry *stub_entry;
13261 struct a8_branch_to_stub_data *data;
13262 bfd_byte *contents;
13263 unsigned long branch_insn;
13264 bfd_vma veneered_insn_loc, veneer_entry_loc;
13265 bfd_signed_vma branch_offset;
13267 unsigned int target;
13269 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
13270 data = (struct a8_branch_to_stub_data *) in_arg;
13272 if (stub_entry->target_section != data->writing_section
13273 || stub_entry->stub_type < arm_stub_a8_veneer_b_cond)
13276 contents = data->contents;
13278 veneered_insn_loc = stub_entry->target_section->output_section->vma
13279 + stub_entry->target_section->output_offset
13280 + stub_entry->target_value;
13282 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
13283 + stub_entry->stub_sec->output_offset
13284 + stub_entry->stub_offset;
13286 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
13287 veneered_insn_loc &= ~3u;
13289 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
13291 abfd = stub_entry->target_section->owner;
13292 target = stub_entry->target_value;
13294 /* We attempt to avoid this condition by setting stubs_always_after_branch
13295 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
13296 This check is just to be on the safe side... */
13297 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
13299 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
13300 "allocated in unsafe location"), abfd);
13304 switch (stub_entry->stub_type)
13306 case arm_stub_a8_veneer_b:
13307 case arm_stub_a8_veneer_b_cond:
13308 branch_insn = 0xf0009000;
13311 case arm_stub_a8_veneer_blx:
13312 branch_insn = 0xf000e800;
13315 case arm_stub_a8_veneer_bl:
13317 unsigned int i1, j1, i2, j2, s;
13319 branch_insn = 0xf000d000;
13322 if (branch_offset < -16777216 || branch_offset > 16777214)
13324 /* There's not much we can do apart from complain if this
13326 (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
13327 "of range (input file too large)"), abfd);
13331 /* i1 = not(j1 eor s), so:
13333 j1 = (not i1) eor s. */
13335 branch_insn |= (branch_offset >> 1) & 0x7ff;
13336 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
13337 i2 = (branch_offset >> 22) & 1;
13338 i1 = (branch_offset >> 23) & 1;
13339 s = (branch_offset >> 24) & 1;
13342 branch_insn |= j2 << 11;
13343 branch_insn |= j1 << 13;
13344 branch_insn |= s << 26;
13353 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[target]);
13354 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[target + 2]);
13359 /* Do code byteswapping. Return FALSE afterwards so that the section is
13360 written out as normal. */
13363 elf32_arm_write_section (bfd *output_bfd,
13364 struct bfd_link_info *link_info,
13366 bfd_byte *contents)
13368 unsigned int mapcount, errcount;
13369 _arm_elf_section_data *arm_data;
13370 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
13371 elf32_arm_section_map *map;
13372 elf32_vfp11_erratum_list *errnode;
13375 bfd_vma offset = sec->output_section->vma + sec->output_offset;
13379 if (globals == NULL)
13382 /* If this section has not been allocated an _arm_elf_section_data
13383 structure then we cannot record anything. */
13384 arm_data = get_arm_elf_section_data (sec);
13385 if (arm_data == NULL)
13388 mapcount = arm_data->mapcount;
13389 map = arm_data->map;
13390 errcount = arm_data->erratumcount;
13394 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
13396 for (errnode = arm_data->erratumlist; errnode != 0;
13397 errnode = errnode->next)
13399 bfd_vma target = errnode->vma - offset;
13401 switch (errnode->type)
13403 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
13405 bfd_vma branch_to_veneer;
13406 /* Original condition code of instruction, plus bit mask for
13407 ARM B instruction. */
13408 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
13411 /* The instruction is before the label. */
13414 /* Above offset included in -4 below. */
13415 branch_to_veneer = errnode->u.b.veneer->vma
13416 - errnode->vma - 4;
13418 if ((signed) branch_to_veneer < -(1 << 25)
13419 || (signed) branch_to_veneer >= (1 << 25))
13420 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13421 "range"), output_bfd);
13423 insn |= (branch_to_veneer >> 2) & 0xffffff;
13424 contents[endianflip ^ target] = insn & 0xff;
13425 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13426 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13427 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13431 case VFP11_ERRATUM_ARM_VENEER:
13433 bfd_vma branch_from_veneer;
13436 /* Take size of veneer into account. */
13437 branch_from_veneer = errnode->u.v.branch->vma
13438 - errnode->vma - 12;
13440 if ((signed) branch_from_veneer < -(1 << 25)
13441 || (signed) branch_from_veneer >= (1 << 25))
13442 (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
13443 "range"), output_bfd);
13445 /* Original instruction. */
13446 insn = errnode->u.v.branch->u.b.vfp_insn;
13447 contents[endianflip ^ target] = insn & 0xff;
13448 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
13449 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
13450 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
13452 /* Branch back to insn after original insn. */
13453 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
13454 contents[endianflip ^ (target + 4)] = insn & 0xff;
13455 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
13456 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
13457 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
13467 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
13469 arm_unwind_table_edit *edit_node
13470 = arm_data->u.exidx.unwind_edit_list;
13471 /* Now, sec->size is the size of the section we will write. The original
13472 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
13473 markers) was sec->rawsize. (This isn't the case if we perform no
13474 edits, then rawsize will be zero and we should use size). */
13475 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
13476 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
13477 unsigned int in_index, out_index;
13478 bfd_vma add_to_offsets = 0;
13480 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
13484 unsigned int edit_index = edit_node->index;
13486 if (in_index < edit_index && in_index * 8 < input_size)
13488 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13489 contents + in_index * 8, add_to_offsets);
13493 else if (in_index == edit_index
13494 || (in_index * 8 >= input_size
13495 && edit_index == UINT_MAX))
13497 switch (edit_node->type)
13499 case DELETE_EXIDX_ENTRY:
13501 add_to_offsets += 8;
13504 case INSERT_EXIDX_CANTUNWIND_AT_END:
13506 asection *text_sec = edit_node->linked_section;
13507 bfd_vma text_offset = text_sec->output_section->vma
13508 + text_sec->output_offset
13510 bfd_vma exidx_offset = offset + out_index * 8;
13511 unsigned long prel31_offset;
13513 /* Note: this is meant to be equivalent to an
13514 R_ARM_PREL31 relocation. These synthetic
13515 EXIDX_CANTUNWIND markers are not relocated by the
13516 usual BFD method. */
13517 prel31_offset = (text_offset - exidx_offset)
13520 /* First address we can't unwind. */
13521 bfd_put_32 (output_bfd, prel31_offset,
13522 &edited_contents[out_index * 8]);
13524 /* Code for EXIDX_CANTUNWIND. */
13525 bfd_put_32 (output_bfd, 0x1,
13526 &edited_contents[out_index * 8 + 4]);
13529 add_to_offsets -= 8;
13534 edit_node = edit_node->next;
13539 /* No more edits, copy remaining entries verbatim. */
13540 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
13541 contents + in_index * 8, add_to_offsets);
13547 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
13548 bfd_set_section_contents (output_bfd, sec->output_section,
13550 (file_ptr) sec->output_offset, sec->size);
13555 /* Fix code to point to Cortex-A8 erratum stubs. */
13556 if (globals->fix_cortex_a8)
13558 struct a8_branch_to_stub_data data;
13560 data.writing_section = sec;
13561 data.contents = contents;
13563 bfd_hash_traverse (&globals->stub_hash_table, make_branch_to_a8_stub,
13570 if (globals->byteswap_code)
13572 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
13575 for (i = 0; i < mapcount; i++)
13577 if (i == mapcount - 1)
13580 end = map[i + 1].vma;
13582 switch (map[i].type)
13585 /* Byte swap code words. */
13586 while (ptr + 3 < end)
13588 tmp = contents[ptr];
13589 contents[ptr] = contents[ptr + 3];
13590 contents[ptr + 3] = tmp;
13591 tmp = contents[ptr + 1];
13592 contents[ptr + 1] = contents[ptr + 2];
13593 contents[ptr + 2] = tmp;
13599 /* Byte swap code halfwords. */
13600 while (ptr + 1 < end)
13602 tmp = contents[ptr];
13603 contents[ptr] = contents[ptr + 1];
13604 contents[ptr + 1] = tmp;
13610 /* Leave data alone. */
13618 arm_data->mapcount = 0;
13619 arm_data->mapsize = 0;
13620 arm_data->map = NULL;
13621 unrecord_section_with_arm_elf_section_data (sec);
13627 unrecord_section_via_map_over_sections (bfd * abfd ATTRIBUTE_UNUSED,
13629 void * ignore ATTRIBUTE_UNUSED)
13631 unrecord_section_with_arm_elf_section_data (sec);
13635 elf32_arm_close_and_cleanup (bfd * abfd)
13637 if (abfd->sections)
13638 bfd_map_over_sections (abfd,
13639 unrecord_section_via_map_over_sections,
13642 return _bfd_elf_close_and_cleanup (abfd);
13646 elf32_arm_bfd_free_cached_info (bfd * abfd)
13648 if (abfd->sections)
13649 bfd_map_over_sections (abfd,
13650 unrecord_section_via_map_over_sections,
13653 return _bfd_free_cached_info (abfd);
13656 /* Display STT_ARM_TFUNC symbols as functions. */
13659 elf32_arm_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
13662 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
13664 if (ELF_ST_TYPE (elfsym->internal_elf_sym.st_info) == STT_ARM_TFUNC)
13665 elfsym->symbol.flags |= BSF_FUNCTION;
13669 /* Mangle thumb function symbols as we read them in. */
13672 elf32_arm_swap_symbol_in (bfd * abfd,
13675 Elf_Internal_Sym *dst)
13677 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
13680 /* New EABI objects mark thumb function symbols by setting the low bit of
13681 the address. Turn these into STT_ARM_TFUNC. */
13682 if ((ELF_ST_TYPE (dst->st_info) == STT_FUNC)
13683 && (dst->st_value & 1))
13685 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_ARM_TFUNC);
13686 dst->st_value &= ~(bfd_vma) 1;
13692 /* Mangle thumb function symbols as we write them out. */
13695 elf32_arm_swap_symbol_out (bfd *abfd,
13696 const Elf_Internal_Sym *src,
13700 Elf_Internal_Sym newsym;
13702 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
13703 of the address set, as per the new EABI. We do this unconditionally
13704 because objcopy does not set the elf header flags until after
13705 it writes out the symbol table. */
13706 if (ELF_ST_TYPE (src->st_info) == STT_ARM_TFUNC)
13709 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
13710 if (newsym.st_shndx != SHN_UNDEF)
13712 /* Do this only for defined symbols. At link type, the static
13713 linker will simulate the work of dynamic linker of resolving
13714 symbols and will carry over the thumbness of found symbols to
13715 the output symbol table. It's not clear how it happens, but
13716 the thumbness of undefined symbols can well be different at
13717 runtime, and writing '1' for them will be confusing for users
13718 and possibly for dynamic linker itself.
13720 newsym.st_value |= 1;
13725 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
13728 /* Add the PT_ARM_EXIDX program header. */
13731 elf32_arm_modify_segment_map (bfd *abfd,
13732 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13734 struct elf_segment_map *m;
13737 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13738 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13740 /* If there is already a PT_ARM_EXIDX header, then we do not
13741 want to add another one. This situation arises when running
13742 "strip"; the input binary already has the header. */
13743 m = elf_tdata (abfd)->segment_map;
13744 while (m && m->p_type != PT_ARM_EXIDX)
13748 m = (struct elf_segment_map *)
13749 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
13752 m->p_type = PT_ARM_EXIDX;
13754 m->sections[0] = sec;
13756 m->next = elf_tdata (abfd)->segment_map;
13757 elf_tdata (abfd)->segment_map = m;
13764 /* We may add a PT_ARM_EXIDX program header. */
13767 elf32_arm_additional_program_headers (bfd *abfd,
13768 struct bfd_link_info *info ATTRIBUTE_UNUSED)
13772 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
13773 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
13779 /* We have two function types: STT_FUNC and STT_ARM_TFUNC. */
13782 elf32_arm_is_function_type (unsigned int type)
13784 return (type == STT_FUNC) || (type == STT_ARM_TFUNC);
13787 /* We use this to override swap_symbol_in and swap_symbol_out. */
13788 const struct elf_size_info elf32_arm_size_info =
13790 sizeof (Elf32_External_Ehdr),
13791 sizeof (Elf32_External_Phdr),
13792 sizeof (Elf32_External_Shdr),
13793 sizeof (Elf32_External_Rel),
13794 sizeof (Elf32_External_Rela),
13795 sizeof (Elf32_External_Sym),
13796 sizeof (Elf32_External_Dyn),
13797 sizeof (Elf_External_Note),
13801 ELFCLASS32, EV_CURRENT,
13802 bfd_elf32_write_out_phdrs,
13803 bfd_elf32_write_shdrs_and_ehdr,
13804 bfd_elf32_checksum_contents,
13805 bfd_elf32_write_relocs,
13806 elf32_arm_swap_symbol_in,
13807 elf32_arm_swap_symbol_out,
13808 bfd_elf32_slurp_reloc_table,
13809 bfd_elf32_slurp_symbol_table,
13810 bfd_elf32_swap_dyn_in,
13811 bfd_elf32_swap_dyn_out,
13812 bfd_elf32_swap_reloc_in,
13813 bfd_elf32_swap_reloc_out,
13814 bfd_elf32_swap_reloca_in,
13815 bfd_elf32_swap_reloca_out
13818 #define ELF_ARCH bfd_arch_arm
13819 #define ELF_MACHINE_CODE EM_ARM
13820 #ifdef __QNXTARGET__
13821 #define ELF_MAXPAGESIZE 0x1000
13823 #define ELF_MAXPAGESIZE 0x8000
13825 #define ELF_MINPAGESIZE 0x1000
13826 #define ELF_COMMONPAGESIZE 0x1000
13828 #define bfd_elf32_mkobject elf32_arm_mkobject
13830 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
13831 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
13832 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
13833 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
13834 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
13835 #define bfd_elf32_bfd_link_hash_table_free elf32_arm_hash_table_free
13836 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
13837 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
13838 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
13839 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
13840 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
13841 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
13842 #define bfd_elf32_close_and_cleanup elf32_arm_close_and_cleanup
13843 #define bfd_elf32_bfd_free_cached_info elf32_arm_bfd_free_cached_info
13844 #define bfd_elf32_bfd_final_link elf32_arm_final_link
13846 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
13847 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
13848 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
13849 #define elf_backend_gc_sweep_hook elf32_arm_gc_sweep_hook
13850 #define elf_backend_check_relocs elf32_arm_check_relocs
13851 #define elf_backend_relocate_section elf32_arm_relocate_section
13852 #define elf_backend_write_section elf32_arm_write_section
13853 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
13854 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
13855 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
13856 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
13857 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
13858 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
13859 #define elf_backend_post_process_headers elf32_arm_post_process_headers
13860 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
13861 #define elf_backend_object_p elf32_arm_object_p
13862 #define elf_backend_section_flags elf32_arm_section_flags
13863 #define elf_backend_fake_sections elf32_arm_fake_sections
13864 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
13865 #define elf_backend_final_write_processing elf32_arm_final_write_processing
13866 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
13867 #define elf_backend_symbol_processing elf32_arm_symbol_processing
13868 #define elf_backend_size_info elf32_arm_size_info
13869 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
13870 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
13871 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
13872 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
13873 #define elf_backend_is_function_type elf32_arm_is_function_type
13875 #define elf_backend_can_refcount 1
13876 #define elf_backend_can_gc_sections 1
13877 #define elf_backend_plt_readonly 1
13878 #define elf_backend_want_got_plt 1
13879 #define elf_backend_want_plt_sym 0
13880 #define elf_backend_may_use_rel_p 1
13881 #define elf_backend_may_use_rela_p 0
13882 #define elf_backend_default_use_rela_p 0
13884 #define elf_backend_got_header_size 12
13886 #undef elf_backend_obj_attrs_vendor
13887 #define elf_backend_obj_attrs_vendor "aeabi"
13888 #undef elf_backend_obj_attrs_section
13889 #define elf_backend_obj_attrs_section ".ARM.attributes"
13890 #undef elf_backend_obj_attrs_arg_type
13891 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
13892 #undef elf_backend_obj_attrs_section_type
13893 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
13894 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
13896 #include "elf32-target.h"
13898 /* VxWorks Targets. */
13900 #undef TARGET_LITTLE_SYM
13901 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_vxworks_vec
13902 #undef TARGET_LITTLE_NAME
13903 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
13904 #undef TARGET_BIG_SYM
13905 #define TARGET_BIG_SYM bfd_elf32_bigarm_vxworks_vec
13906 #undef TARGET_BIG_NAME
13907 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
13909 /* Like elf32_arm_link_hash_table_create -- but overrides
13910 appropriately for VxWorks. */
13912 static struct bfd_link_hash_table *
13913 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
13915 struct bfd_link_hash_table *ret;
13917 ret = elf32_arm_link_hash_table_create (abfd);
13920 struct elf32_arm_link_hash_table *htab
13921 = (struct elf32_arm_link_hash_table *) ret;
13923 htab->vxworks_p = 1;
13929 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
13931 elf32_arm_final_write_processing (abfd, linker);
13932 elf_vxworks_final_write_processing (abfd, linker);
13936 #define elf32_bed elf32_arm_vxworks_bed
13938 #undef bfd_elf32_bfd_link_hash_table_create
13939 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
13940 #undef elf_backend_add_symbol_hook
13941 #define elf_backend_add_symbol_hook elf_vxworks_add_symbol_hook
13942 #undef elf_backend_final_write_processing
13943 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
13944 #undef elf_backend_emit_relocs
13945 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
13947 #undef elf_backend_may_use_rel_p
13948 #define elf_backend_may_use_rel_p 0
13949 #undef elf_backend_may_use_rela_p
13950 #define elf_backend_may_use_rela_p 1
13951 #undef elf_backend_default_use_rela_p
13952 #define elf_backend_default_use_rela_p 1
13953 #undef elf_backend_want_plt_sym
13954 #define elf_backend_want_plt_sym 1
13955 #undef ELF_MAXPAGESIZE
13956 #define ELF_MAXPAGESIZE 0x1000
13958 #include "elf32-target.h"
13961 /* Merge backend specific data from an object file to the output
13962 object file when linking. */
13965 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
13967 flagword out_flags;
13969 bfd_boolean flags_compatible = TRUE;
13972 /* Check if we have the same endianess. */
13973 if (! _bfd_generic_verify_endian_match (ibfd, obfd))
13976 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13979 if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
13982 /* The input BFD must have had its flags initialised. */
13983 /* The following seems bogus to me -- The flags are initialized in
13984 the assembler but I don't think an elf_flags_init field is
13985 written into the object. */
13986 /* BFD_ASSERT (elf_flags_init (ibfd)); */
13988 in_flags = elf_elfheader (ibfd)->e_flags;
13989 out_flags = elf_elfheader (obfd)->e_flags;
13991 /* In theory there is no reason why we couldn't handle this. However
13992 in practice it isn't even close to working and there is no real
13993 reason to want it. */
13994 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
13995 && !(ibfd->flags & DYNAMIC)
13996 && (in_flags & EF_ARM_BE8))
13998 _bfd_error_handler (_("error: %B is already in final BE8 format"),
14003 if (!elf_flags_init (obfd))
14005 /* If the input is the default architecture and had the default
14006 flags then do not bother setting the flags for the output
14007 architecture, instead allow future merges to do this. If no
14008 future merges ever set these flags then they will retain their
14009 uninitialised values, which surprise surprise, correspond
14010 to the default values. */
14011 if (bfd_get_arch_info (ibfd)->the_default
14012 && elf_elfheader (ibfd)->e_flags == 0)
14015 elf_flags_init (obfd) = TRUE;
14016 elf_elfheader (obfd)->e_flags = in_flags;
14018 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
14019 && bfd_get_arch_info (obfd)->the_default)
14020 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
14025 /* Determine what should happen if the input ARM architecture
14026 does not match the output ARM architecture. */
14027 if (! bfd_arm_merge_machines (ibfd, obfd))
14030 /* Identical flags must be compatible. */
14031 if (in_flags == out_flags)
14034 /* Check to see if the input BFD actually contains any sections. If
14035 not, its flags may not have been initialised either, but it
14036 cannot actually cause any incompatiblity. Do not short-circuit
14037 dynamic objects; their section list may be emptied by
14038 elf_link_add_object_symbols.
14040 Also check to see if there are no code sections in the input.
14041 In this case there is no need to check for code specific flags.
14042 XXX - do we need to worry about floating-point format compatability
14043 in data sections ? */
14044 if (!(ibfd->flags & DYNAMIC))
14046 bfd_boolean null_input_bfd = TRUE;
14047 bfd_boolean only_data_sections = TRUE;
14049 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
14051 /* Ignore synthetic glue sections. */
14052 if (strcmp (sec->name, ".glue_7")
14053 && strcmp (sec->name, ".glue_7t"))
14055 if ((bfd_get_section_flags (ibfd, sec)
14056 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14057 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
14058 only_data_sections = FALSE;
14060 null_input_bfd = FALSE;
14065 if (null_input_bfd || only_data_sections)
14069 /* Complain about various flag mismatches. */
14070 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
14071 EF_ARM_EABI_VERSION (out_flags)))
14074 (_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
14076 (in_flags & EF_ARM_EABIMASK) >> 24,
14077 (out_flags & EF_ARM_EABIMASK) >> 24);
14081 /* Not sure what needs to be checked for EABI versions >= 1. */
14082 /* VxWorks libraries do not use these flags. */
14083 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
14084 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
14085 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
14087 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
14090 (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
14092 in_flags & EF_ARM_APCS_26 ? 26 : 32,
14093 out_flags & EF_ARM_APCS_26 ? 26 : 32);
14094 flags_compatible = FALSE;
14097 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
14099 if (in_flags & EF_ARM_APCS_FLOAT)
14101 (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
14105 (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
14108 flags_compatible = FALSE;
14111 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
14113 if (in_flags & EF_ARM_VFP_FLOAT)
14115 (_("error: %B uses VFP instructions, whereas %B does not"),
14119 (_("error: %B uses FPA instructions, whereas %B does not"),
14122 flags_compatible = FALSE;
14125 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
14127 if (in_flags & EF_ARM_MAVERICK_FLOAT)
14129 (_("error: %B uses Maverick instructions, whereas %B does not"),
14133 (_("error: %B does not use Maverick instructions, whereas %B does"),
14136 flags_compatible = FALSE;
14139 #ifdef EF_ARM_SOFT_FLOAT
14140 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
14142 /* We can allow interworking between code that is VFP format
14143 layout, and uses either soft float or integer regs for
14144 passing floating point arguments and results. We already
14145 know that the APCS_FLOAT flags match; similarly for VFP
14147 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
14148 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
14150 if (in_flags & EF_ARM_SOFT_FLOAT)
14152 (_("error: %B uses software FP, whereas %B uses hardware FP"),
14156 (_("error: %B uses hardware FP, whereas %B uses software FP"),
14159 flags_compatible = FALSE;
14164 /* Interworking mismatch is only a warning. */
14165 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14167 if (in_flags & EF_ARM_INTERWORK)
14170 (_("Warning: %B supports interworking, whereas %B does not"),
14176 (_("Warning: %B does not support interworking, whereas %B does"),
14182 return flags_compatible;
14186 /* Symbian OS Targets. */
14188 #undef TARGET_LITTLE_SYM
14189 #define TARGET_LITTLE_SYM bfd_elf32_littlearm_symbian_vec
14190 #undef TARGET_LITTLE_NAME
14191 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
14192 #undef TARGET_BIG_SYM
14193 #define TARGET_BIG_SYM bfd_elf32_bigarm_symbian_vec
14194 #undef TARGET_BIG_NAME
14195 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
14197 /* Like elf32_arm_link_hash_table_create -- but overrides
14198 appropriately for Symbian OS. */
14200 static struct bfd_link_hash_table *
14201 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
14203 struct bfd_link_hash_table *ret;
14205 ret = elf32_arm_link_hash_table_create (abfd);
14208 struct elf32_arm_link_hash_table *htab
14209 = (struct elf32_arm_link_hash_table *)ret;
14210 /* There is no PLT header for Symbian OS. */
14211 htab->plt_header_size = 0;
14212 /* The PLT entries are each one instruction and one word. */
14213 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
14214 htab->symbian_p = 1;
14215 /* Symbian uses armv5t or above, so use_blx is always true. */
14217 htab->root.is_relocatable_executable = 1;
14222 static const struct bfd_elf_special_section
14223 elf32_arm_symbian_special_sections[] =
14225 /* In a BPABI executable, the dynamic linking sections do not go in
14226 the loadable read-only segment. The post-linker may wish to
14227 refer to these sections, but they are not part of the final
14229 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
14230 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
14231 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
14232 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
14233 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
14234 /* These sections do not need to be writable as the SymbianOS
14235 postlinker will arrange things so that no dynamic relocation is
14237 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
14238 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
14239 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
14240 { NULL, 0, 0, 0, 0 }
14244 elf32_arm_symbian_begin_write_processing (bfd *abfd,
14245 struct bfd_link_info *link_info)
14247 /* BPABI objects are never loaded directly by an OS kernel; they are
14248 processed by a postlinker first, into an OS-specific format. If
14249 the D_PAGED bit is set on the file, BFD will align segments on
14250 page boundaries, so that an OS can directly map the file. With
14251 BPABI objects, that just results in wasted space. In addition,
14252 because we clear the D_PAGED bit, map_sections_to_segments will
14253 recognize that the program headers should not be mapped into any
14254 loadable segment. */
14255 abfd->flags &= ~D_PAGED;
14256 elf32_arm_begin_write_processing (abfd, link_info);
14260 elf32_arm_symbian_modify_segment_map (bfd *abfd,
14261 struct bfd_link_info *info)
14263 struct elf_segment_map *m;
14266 /* BPABI shared libraries and executables should have a PT_DYNAMIC
14267 segment. However, because the .dynamic section is not marked
14268 with SEC_LOAD, the generic ELF code will not create such a
14270 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
14273 for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
14274 if (m->p_type == PT_DYNAMIC)
14279 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
14280 m->next = elf_tdata (abfd)->segment_map;
14281 elf_tdata (abfd)->segment_map = m;
14285 /* Also call the generic arm routine. */
14286 return elf32_arm_modify_segment_map (abfd, info);
14289 /* Return address for Ith PLT stub in section PLT, for relocation REL
14290 or (bfd_vma) -1 if it should not be included. */
14293 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
14294 const arelent *rel ATTRIBUTE_UNUSED)
14296 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
14301 #define elf32_bed elf32_arm_symbian_bed
14303 /* The dynamic sections are not allocated on SymbianOS; the postlinker
14304 will process them and then discard them. */
14305 #undef ELF_DYNAMIC_SEC_FLAGS
14306 #define ELF_DYNAMIC_SEC_FLAGS \
14307 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
14309 #undef elf_backend_add_symbol_hook
14310 #undef elf_backend_emit_relocs
14312 #undef bfd_elf32_bfd_link_hash_table_create
14313 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
14314 #undef elf_backend_special_sections
14315 #define elf_backend_special_sections elf32_arm_symbian_special_sections
14316 #undef elf_backend_begin_write_processing
14317 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
14318 #undef elf_backend_final_write_processing
14319 #define elf_backend_final_write_processing elf32_arm_final_write_processing
14321 #undef elf_backend_modify_segment_map
14322 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
14324 /* There is no .got section for BPABI objects, and hence no header. */
14325 #undef elf_backend_got_header_size
14326 #define elf_backend_got_header_size 0
14328 /* Similarly, there is no .got.plt section. */
14329 #undef elf_backend_want_got_plt
14330 #define elf_backend_want_got_plt 0
14332 #undef elf_backend_plt_sym_val
14333 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
14335 #undef elf_backend_may_use_rel_p
14336 #define elf_backend_may_use_rel_p 1
14337 #undef elf_backend_may_use_rela_p
14338 #define elf_backend_may_use_rela_p 0
14339 #undef elf_backend_default_use_rela_p
14340 #define elf_backend_default_use_rela_p 0
14341 #undef elf_backend_want_plt_sym
14342 #define elf_backend_want_plt_sym 0
14343 #undef ELF_MAXPAGESIZE
14344 #define ELF_MAXPAGESIZE 0x8000
14346 #include "elf32-target.h"